1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57 
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60 
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66 
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79 
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85 
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_tx.h"
89 #include "et131x.h"
90 
91 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
92 					   struct tcb *tcb);
93 static int et131x_send_packet(struct sk_buff *skb,
94 			      struct et131x_adapter *etdev);
95 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
96 
97 /**
98  * et131x_tx_dma_memory_alloc
99  * @adapter: pointer to our private adapter structure
100  *
101  * Returns 0 on success and errno on failure (as defined in errno.h).
102  *
103  * Allocates memory that will be visible both to the device and to the CPU.
104  * The OS will pass us packets, pointers to which we will insert in the Tx
105  * Descriptor queue. The device will read this queue to find the packets in
106  * memory. The device will update the "status" in memory each time it xmits a
107  * packet.
108  */
et131x_tx_dma_memory_alloc(struct et131x_adapter * adapter)109 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
110 {
111 	int desc_size = 0;
112 	struct tx_ring *tx_ring = &adapter->tx_ring;
113 
114 	/* Allocate memory for the TCB's (Transmit Control Block) */
115 	adapter->tx_ring.tcb_ring =
116 		kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
117 	if (!adapter->tx_ring.tcb_ring) {
118 		dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
119 		return -ENOMEM;
120 	}
121 
122 	/* Allocate enough memory for the Tx descriptor ring, and allocate
123 	 * some extra so that the ring can be aligned on a 4k boundary.
124 	 */
125 	desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
126 	tx_ring->tx_desc_ring =
127 	    (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
128 						    &tx_ring->tx_desc_ring_pa);
129 	if (!adapter->tx_ring.tx_desc_ring) {
130 		dev_err(&adapter->pdev->dev,
131 					"Cannot alloc memory for Tx Ring\n");
132 		return -ENOMEM;
133 	}
134 
135 	/* Save physical address
136 	 *
137 	 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
138 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
139 	 * are ever returned, make sure the high part is retrieved here before
140 	 * storing the adjusted address.
141 	 */
142 	/* Allocate memory for the Tx status block */
143 	tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
144 						    sizeof(u32),
145 						    &tx_ring->tx_status_pa);
146 	if (!adapter->tx_ring.tx_status_pa) {
147 		dev_err(&adapter->pdev->dev,
148 				  "Cannot alloc memory for Tx status block\n");
149 		return -ENOMEM;
150 	}
151 	return 0;
152 }
153 
154 /**
155  * et131x_tx_dma_memory_free - Free all memory allocated within this module
156  * @adapter: pointer to our private adapter structure
157  *
158  * Returns 0 on success and errno on failure (as defined in errno.h).
159  */
et131x_tx_dma_memory_free(struct et131x_adapter * adapter)160 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
161 {
162 	int desc_size = 0;
163 
164 	if (adapter->tx_ring.tx_desc_ring) {
165 		/* Free memory relating to Tx rings here */
166 		desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
167 								+ 4096 - 1;
168 		pci_free_consistent(adapter->pdev,
169 				    desc_size,
170 				    adapter->tx_ring.tx_desc_ring,
171 				    adapter->tx_ring.tx_desc_ring_pa);
172 		adapter->tx_ring.tx_desc_ring = NULL;
173 	}
174 
175 	/* Free memory for the Tx status block */
176 	if (adapter->tx_ring.tx_status) {
177 		pci_free_consistent(adapter->pdev,
178 				    sizeof(u32),
179 				    adapter->tx_ring.tx_status,
180 				    adapter->tx_ring.tx_status_pa);
181 
182 		adapter->tx_ring.tx_status = NULL;
183 	}
184 	/* Free the memory for the tcb structures */
185 	kfree(adapter->tx_ring.tcb_ring);
186 }
187 
188 /**
189  * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
190  * @etdev: pointer to our private adapter structure
191  *
192  * Configure the transmit engine with the ring buffers we have created
193  * and prepare it for use.
194  */
ConfigTxDmaRegs(struct et131x_adapter * etdev)195 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
196 {
197 	struct txdma_regs __iomem *txdma = &etdev->regs->txdma;
198 
199 	/* Load the hardware with the start of the transmit descriptor ring. */
200 	writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
201 	       &txdma->pr_base_hi);
202 	writel((u32) etdev->tx_ring.tx_desc_ring_pa,
203 	       &txdma->pr_base_lo);
204 
205 	/* Initialise the transmit DMA engine */
206 	writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
207 
208 	/* Load the completion writeback physical address */
209 	writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
210 						&txdma->dma_wb_base_hi);
211 	writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
212 
213 	*etdev->tx_ring.tx_status = 0;
214 
215 	writel(0, &txdma->service_request);
216 	etdev->tx_ring.send_idx = 0;
217 }
218 
219 /**
220  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
221  * @etdev: pointer to our adapter structure
222  */
et131x_tx_dma_disable(struct et131x_adapter * etdev)223 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
224 {
225 	/* Setup the tramsmit dma configuration register */
226 	writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
227 					&etdev->regs->txdma.csr);
228 }
229 
230 /**
231  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
232  * @etdev: pointer to our adapter structure
233  *
234  * Mainly used after a return to the D0 (full-power) state from a lower state.
235  */
et131x_tx_dma_enable(struct et131x_adapter * etdev)236 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
237 {
238 	/* Setup the transmit dma configuration register for normal
239 	 * operation
240 	 */
241 	writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
242 					&etdev->regs->txdma.csr);
243 }
244 
245 /**
246  * et131x_init_send - Initialize send data structures
247  * @adapter: pointer to our private adapter structure
248  */
et131x_init_send(struct et131x_adapter * adapter)249 void et131x_init_send(struct et131x_adapter *adapter)
250 {
251 	struct tcb *tcb;
252 	u32 ct;
253 	struct tx_ring *tx_ring;
254 
255 	/* Setup some convenience pointers */
256 	tx_ring = &adapter->tx_ring;
257 	tcb = adapter->tx_ring.tcb_ring;
258 
259 	tx_ring->tcb_qhead = tcb;
260 
261 	memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
262 
263 	/* Go through and set up each TCB */
264 	for (ct = 0; ct++ < NUM_TCB; tcb++)
265 		/* Set the link pointer in HW TCB to the next TCB in the
266 		 * chain
267 		 */
268 		tcb->next = tcb + 1;
269 
270 	/* Set the  tail pointer */
271 	tcb--;
272 	tx_ring->tcb_qtail = tcb;
273 	tcb->next = NULL;
274 	/* Curr send queue should now be empty */
275 	tx_ring->send_head = NULL;
276 	tx_ring->send_tail = NULL;
277 }
278 
279 /**
280  * et131x_send_packets - This function is called by the OS to send packets
281  * @skb: the packet(s) to send
282  * @netdev:device on which to TX the above packet(s)
283  *
284  * Return 0 in almost all cases; non-zero value in extreme hard failure only
285  */
et131x_send_packets(struct sk_buff * skb,struct net_device * netdev)286 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
287 {
288 	int status = 0;
289 	struct et131x_adapter *etdev = NULL;
290 
291 	etdev = netdev_priv(netdev);
292 
293 	/* Send these packets
294 	 *
295 	 * NOTE: The Linux Tx entry point is only given one packet at a time
296 	 * to Tx, so the PacketCount and it's array used makes no sense here
297 	 */
298 
299 	/* TCB is not available */
300 	if (etdev->tx_ring.used >= NUM_TCB) {
301 		/* NOTE: If there's an error on send, no need to queue the
302 		 * packet under Linux; if we just send an error up to the
303 		 * netif layer, it will resend the skb to us.
304 		 */
305 		status = -ENOMEM;
306 	} else {
307 		/* We need to see if the link is up; if it's not, make the
308 		 * netif layer think we're good and drop the packet
309 		 */
310 		if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
311 					!netif_carrier_ok(netdev)) {
312 			dev_kfree_skb_any(skb);
313 			skb = NULL;
314 
315 			etdev->net_stats.tx_dropped++;
316 		} else {
317 			status = et131x_send_packet(skb, etdev);
318 			if (status != 0 && status != -ENOMEM) {
319 				/* On any other error, make netif think we're
320 				 * OK and drop the packet
321 				 */
322 				dev_kfree_skb_any(skb);
323 				skb = NULL;
324 				etdev->net_stats.tx_dropped++;
325 			}
326 		}
327 	}
328 	return status;
329 }
330 
331 /**
332  * et131x_send_packet - Do the work to send a packet
333  * @skb: the packet(s) to send
334  * @etdev: a pointer to the device's private adapter structure
335  *
336  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
337  *
338  * Assumption: Send spinlock has been acquired
339  */
et131x_send_packet(struct sk_buff * skb,struct et131x_adapter * etdev)340 static int et131x_send_packet(struct sk_buff *skb,
341 			      struct et131x_adapter *etdev)
342 {
343 	int status;
344 	struct tcb *tcb = NULL;
345 	u16 *shbufva;
346 	unsigned long flags;
347 
348 	/* All packets must have at least a MAC address and a protocol type */
349 	if (skb->len < ETH_HLEN)
350 		return -EIO;
351 
352 	/* Get a TCB for this packet */
353 	spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
354 
355 	tcb = etdev->tx_ring.tcb_qhead;
356 
357 	if (tcb == NULL) {
358 		spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
359 		return -ENOMEM;
360 	}
361 
362 	etdev->tx_ring.tcb_qhead = tcb->next;
363 
364 	if (etdev->tx_ring.tcb_qhead == NULL)
365 		etdev->tx_ring.tcb_qtail = NULL;
366 
367 	spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
368 
369 	tcb->skb = skb;
370 
371 	if (skb->data != NULL && skb->len - skb->data_len >= 6) {
372 		shbufva = (u16 *) skb->data;
373 
374 		if ((shbufva[0] == 0xffff) &&
375 		    (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
376 			tcb->flags |= fMP_DEST_BROAD;
377 		} else if ((shbufva[0] & 0x3) == 0x0001) {
378 			tcb->flags |=  fMP_DEST_MULTI;
379 		}
380 	}
381 
382 	tcb->next = NULL;
383 
384 	/* Call the NIC specific send handler. */
385 	status = nic_send_packet(etdev, tcb);
386 
387 	if (status != 0) {
388 		spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
389 
390 		if (etdev->tx_ring.tcb_qtail)
391 			etdev->tx_ring.tcb_qtail->next = tcb;
392 		else
393 			/* Apparently ready Q is empty. */
394 			etdev->tx_ring.tcb_qhead = tcb;
395 
396 		etdev->tx_ring.tcb_qtail = tcb;
397 		spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
398 		return status;
399 	}
400 	WARN_ON(etdev->tx_ring.used > NUM_TCB);
401 	return 0;
402 }
403 
404 /**
405  * nic_send_packet - NIC specific send handler for version B silicon.
406  * @etdev: pointer to our adapter
407  * @tcb: pointer to struct tcb
408  *
409  * Returns 0 or errno.
410  */
nic_send_packet(struct et131x_adapter * etdev,struct tcb * tcb)411 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
412 {
413 	u32 i;
414 	struct tx_desc desc[24];	/* 24 x 16 byte */
415 	u32 frag = 0;
416 	u32 thiscopy, remainder;
417 	struct sk_buff *skb = tcb->skb;
418 	u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
419 	struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
420 	unsigned long flags;
421 
422 	/* Part of the optimizations of this send routine restrict us to
423 	 * sending 24 fragments at a pass.  In practice we should never see
424 	 * more than 5 fragments.
425 	 *
426 	 * NOTE: The older version of this function (below) can handle any
427 	 * number of fragments. If needed, we can call this function,
428 	 * although it is less efficient.
429 	 */
430 	if (nr_frags > 23)
431 		return -EIO;
432 
433 	memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
434 
435 	for (i = 0; i < nr_frags; i++) {
436 		/* If there is something in this element, lets get a
437 		 * descriptor from the ring and get the necessary data
438 		 */
439 		if (i == 0) {
440 			/* If the fragments are smaller than a standard MTU,
441 			 * then map them to a single descriptor in the Tx
442 			 * Desc ring. However, if they're larger, as is
443 			 * possible with support for jumbo packets, then
444 			 * split them each across 2 descriptors.
445 			 *
446 			 * This will work until we determine why the hardware
447 			 * doesn't seem to like large fragments.
448 			 */
449 			if ((skb->len - skb->data_len) <= 1514) {
450 				desc[frag].addr_hi = 0;
451 				/* Low 16bits are length, high is vlan and
452 				   unused currently so zero */
453 				desc[frag].len_vlan =
454 					skb->len - skb->data_len;
455 
456 				/* NOTE: Here, the dma_addr_t returned from
457 				 * pci_map_single() is implicitly cast as a
458 				 * u32. Although dma_addr_t can be
459 				 * 64-bit, the address returned by
460 				 * pci_map_single() is always 32-bit
461 				 * addressable (as defined by the pci/dma
462 				 * subsystem)
463 				 */
464 				desc[frag++].addr_lo =
465 				    pci_map_single(etdev->pdev,
466 						   skb->data,
467 						   skb->len -
468 						   skb->data_len,
469 						   PCI_DMA_TODEVICE);
470 			} else {
471 				desc[frag].addr_hi = 0;
472 				desc[frag].len_vlan =
473 				    (skb->len - skb->data_len) / 2;
474 
475 				/* NOTE: Here, the dma_addr_t returned from
476 				 * pci_map_single() is implicitly cast as a
477 				 * u32. Although dma_addr_t can be
478 				 * 64-bit, the address returned by
479 				 * pci_map_single() is always 32-bit
480 				 * addressable (as defined by the pci/dma
481 				 * subsystem)
482 				 */
483 				desc[frag++].addr_lo =
484 				    pci_map_single(etdev->pdev,
485 						   skb->data,
486 						   ((skb->len -
487 						     skb->data_len) / 2),
488 						   PCI_DMA_TODEVICE);
489 				desc[frag].addr_hi = 0;
490 
491 				desc[frag].len_vlan =
492 				    (skb->len - skb->data_len) / 2;
493 
494 				/* NOTE: Here, the dma_addr_t returned from
495 				 * pci_map_single() is implicitly cast as a
496 				 * u32. Although dma_addr_t can be
497 				 * 64-bit, the address returned by
498 				 * pci_map_single() is always 32-bit
499 				 * addressable (as defined by the pci/dma
500 				 * subsystem)
501 				 */
502 				desc[frag++].addr_lo =
503 				    pci_map_single(etdev->pdev,
504 						   skb->data +
505 						   ((skb->len -
506 						     skb->data_len) / 2),
507 						   ((skb->len -
508 						     skb->data_len) / 2),
509 						   PCI_DMA_TODEVICE);
510 			}
511 		} else {
512 			desc[frag].addr_hi = 0;
513 			desc[frag].len_vlan =
514 					frags[i - 1].size;
515 
516 			/* NOTE: Here, the dma_addr_t returned from
517 			 * pci_map_page() is implicitly cast as a u32.
518 			 * Although dma_addr_t can be 64-bit, the address
519 			 * returned by pci_map_page() is always 32-bit
520 			 * addressable (as defined by the pci/dma subsystem)
521 			 */
522 			desc[frag++].addr_lo =
523 			    pci_map_page(etdev->pdev,
524 					 frags[i - 1].page,
525 					 frags[i - 1].page_offset,
526 					 frags[i - 1].size,
527 					 PCI_DMA_TODEVICE);
528 		}
529 	}
530 
531 	if (frag == 0)
532 		return -EIO;
533 
534 	if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
535 		if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
536 			/* Last element & Interrupt flag */
537 			desc[frag - 1].flags = 0x5;
538 			etdev->tx_ring.since_irq = 0;
539 		} else { /* Last element */
540 			desc[frag - 1].flags = 0x1;
541 		}
542 	} else
543 		desc[frag - 1].flags = 0x5;
544 
545 	desc[0].flags |= 2;	/* First element flag */
546 
547 	tcb->index_start = etdev->tx_ring.send_idx;
548 	tcb->stale = 0;
549 
550 	spin_lock_irqsave(&etdev->send_hw_lock, flags);
551 
552 	thiscopy = NUM_DESC_PER_RING_TX -
553 				INDEX10(etdev->tx_ring.send_idx);
554 
555 	if (thiscopy >= frag) {
556 		remainder = 0;
557 		thiscopy = frag;
558 	} else {
559 		remainder = frag - thiscopy;
560 	}
561 
562 	memcpy(etdev->tx_ring.tx_desc_ring +
563 	       INDEX10(etdev->tx_ring.send_idx), desc,
564 	       sizeof(struct tx_desc) * thiscopy);
565 
566 	add_10bit(&etdev->tx_ring.send_idx, thiscopy);
567 
568 	if (INDEX10(etdev->tx_ring.send_idx) == 0 ||
569 		    INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
570 		etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
571 		etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
572 	}
573 
574 	if (remainder) {
575 		memcpy(etdev->tx_ring.tx_desc_ring,
576 		       desc + thiscopy,
577 		       sizeof(struct tx_desc) * remainder);
578 
579 		add_10bit(&etdev->tx_ring.send_idx, remainder);
580 	}
581 
582 	if (INDEX10(etdev->tx_ring.send_idx) == 0) {
583 		if (etdev->tx_ring.send_idx)
584 			tcb->index = NUM_DESC_PER_RING_TX - 1;
585 		else
586 			tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
587 	} else
588 		tcb->index = etdev->tx_ring.send_idx - 1;
589 
590 	spin_lock(&etdev->TCBSendQLock);
591 
592 	if (etdev->tx_ring.send_tail)
593 		etdev->tx_ring.send_tail->next = tcb;
594 	else
595 		etdev->tx_ring.send_head = tcb;
596 
597 	etdev->tx_ring.send_tail = tcb;
598 
599 	WARN_ON(tcb->next != NULL);
600 
601 	etdev->tx_ring.used++;
602 
603 	spin_unlock(&etdev->TCBSendQLock);
604 
605 	/* Write the new write pointer back to the device. */
606 	writel(etdev->tx_ring.send_idx,
607 	       &etdev->regs->txdma.service_request);
608 
609 	/* For Gig only, we use Tx Interrupt coalescing.  Enable the software
610 	 * timer to wake us up if this packet isn't followed by N more.
611 	 */
612 	if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
613 		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
614 		       &etdev->regs->global.watchdog_timer);
615 	}
616 	spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
617 
618 	return 0;
619 }
620 
621 
622 /**
623  * et131x_free_send_packet - Recycle a struct tcb
624  * @etdev: pointer to our adapter
625  * @tcb: pointer to struct tcb
626  *
627  * Complete the packet if necessary
628  * Assumption - Send spinlock has been acquired
629  */
et131x_free_send_packet(struct et131x_adapter * etdev,struct tcb * tcb)630 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
631 						struct tcb *tcb)
632 {
633 	unsigned long flags;
634 	struct tx_desc *desc = NULL;
635 	struct net_device_stats *stats = &etdev->net_stats;
636 
637 	if (tcb->flags & fMP_DEST_BROAD)
638 		atomic_inc(&etdev->Stats.brdcstxmt);
639 	else if (tcb->flags & fMP_DEST_MULTI)
640 		atomic_inc(&etdev->Stats.multixmt);
641 	else
642 		atomic_inc(&etdev->Stats.unixmt);
643 
644 	if (tcb->skb) {
645 		stats->tx_bytes += tcb->skb->len;
646 
647 		/* Iterate through the TX descriptors on the ring
648 		 * corresponding to this packet and umap the fragments
649 		 * they point to
650 		 */
651 		do {
652 			desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring +
653 						INDEX10(tcb->index_start));
654 
655 			pci_unmap_single(etdev->pdev,
656 					 desc->addr_lo,
657 					 desc->len_vlan, PCI_DMA_TODEVICE);
658 
659 			add_10bit(&tcb->index_start, 1);
660 			if (INDEX10(tcb->index_start) >=
661 							NUM_DESC_PER_RING_TX) {
662 				tcb->index_start &= ~ET_DMA10_MASK;
663 				tcb->index_start ^= ET_DMA10_WRAP;
664 			}
665 		} while (desc != (etdev->tx_ring.tx_desc_ring +
666 				INDEX10(tcb->index)));
667 
668 		dev_kfree_skb_any(tcb->skb);
669 	}
670 
671 	memset(tcb, 0, sizeof(struct tcb));
672 
673 	/* Add the TCB to the Ready Q */
674 	spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
675 
676 	etdev->Stats.opackets++;
677 
678 	if (etdev->tx_ring.tcb_qtail)
679 		etdev->tx_ring.tcb_qtail->next = tcb;
680 	else
681 		/* Apparently ready Q is empty. */
682 		etdev->tx_ring.tcb_qhead = tcb;
683 
684 	etdev->tx_ring.tcb_qtail = tcb;
685 
686 	spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
687 	WARN_ON(etdev->tx_ring.used < 0);
688 }
689 
690 /**
691  * et131x_free_busy_send_packets - Free and complete the stopped active sends
692  * @etdev: pointer to our adapter
693  *
694  * Assumption - Send spinlock has been acquired
695  */
et131x_free_busy_send_packets(struct et131x_adapter * etdev)696 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
697 {
698 	struct tcb *tcb;
699 	unsigned long flags;
700 	u32 freed = 0;
701 
702 	/* Any packets being sent? Check the first TCB on the send list */
703 	spin_lock_irqsave(&etdev->TCBSendQLock, flags);
704 
705 	tcb = etdev->tx_ring.send_head;
706 
707 	while (tcb != NULL && freed < NUM_TCB) {
708 		struct tcb *next = tcb->next;
709 
710 		etdev->tx_ring.send_head = next;
711 
712 		if (next == NULL)
713 			etdev->tx_ring.send_tail = NULL;
714 
715 		etdev->tx_ring.used--;
716 
717 		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
718 
719 		freed++;
720 		et131x_free_send_packet(etdev, tcb);
721 
722 		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
723 
724 		tcb = etdev->tx_ring.send_head;
725 	}
726 
727 	WARN_ON(freed == NUM_TCB);
728 
729 	spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
730 
731 	etdev->tx_ring.used = 0;
732 }
733 
734 /**
735  * et131x_handle_send_interrupt - Interrupt handler for sending processing
736  * @etdev: pointer to our adapter
737  *
738  * Re-claim the send resources, complete sends and get more to send from
739  * the send wait queue.
740  *
741  * Assumption - Send spinlock has been acquired
742  */
et131x_handle_send_interrupt(struct et131x_adapter * etdev)743 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
744 {
745 	unsigned long flags;
746 	u32 serviced;
747 	struct tcb *tcb;
748 	u32 index;
749 
750 	serviced = readl(&etdev->regs->txdma.NewServiceComplete);
751 	index = INDEX10(serviced);
752 
753 	/* Has the ring wrapped?  Process any descriptors that do not have
754 	 * the same "wrap" indicator as the current completion indicator
755 	 */
756 	spin_lock_irqsave(&etdev->TCBSendQLock, flags);
757 
758 	tcb = etdev->tx_ring.send_head;
759 
760 	while (tcb &&
761 	       ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
762 	       index < INDEX10(tcb->index)) {
763 		etdev->tx_ring.used--;
764 		etdev->tx_ring.send_head = tcb->next;
765 		if (tcb->next == NULL)
766 			etdev->tx_ring.send_tail = NULL;
767 
768 		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
769 		et131x_free_send_packet(etdev, tcb);
770 		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
771 
772 		/* Goto the next packet */
773 		tcb = etdev->tx_ring.send_head;
774 	}
775 	while (tcb &&
776 	       !((serviced ^ tcb->index) & ET_DMA10_WRAP)
777 	       && index > (tcb->index & ET_DMA10_MASK)) {
778 		etdev->tx_ring.used--;
779 		etdev->tx_ring.send_head = tcb->next;
780 		if (tcb->next == NULL)
781 			etdev->tx_ring.send_tail = NULL;
782 
783 		spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
784 		et131x_free_send_packet(etdev, tcb);
785 		spin_lock_irqsave(&etdev->TCBSendQLock, flags);
786 
787 		/* Goto the next packet */
788 		tcb = etdev->tx_ring.send_head;
789 	}
790 
791 	/* Wake up the queue when we hit a low-water mark */
792 	if (etdev->tx_ring.used <= NUM_TCB / 3)
793 		netif_wake_queue(etdev->netdev);
794 
795 	spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
796 }
797 
798