1 /*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, see <http://www.gnu.org/licenses/>. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39 #include "common.h"
40
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/ktime.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_vlan.h>
48 #include <linux/skbuff.h>
49 #include <linux/mm.h>
50 #include <linux/tcp.h>
51 #include <linux/ip.h>
52 #include <linux/in.h>
53 #include <linux/if_arp.h>
54 #include <linux/slab.h>
55 #include <linux/prefetch.h>
56
57 #include "cpl5_cmd.h"
58 #include "sge.h"
59 #include "regs.h"
60 #include "espi.h"
61
62 /* This belongs in if_ether.h */
63 #define ETH_P_CPL5 0xf
64
65 #define SGE_CMDQ_N 2
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_SM_BUF_SIZE 1536
75 #define SGE_TX_DESC_MAX_PLEN 16384
76
77 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
78
79 /*
80 * Period of the TX buffer reclaim timer. This timer does not need to run
81 * frequently as TX buffers are usually reclaimed by new TX packets.
82 */
83 #define TX_RECLAIM_PERIOD (HZ / 4)
84
85 #define M_CMD_LEN 0x7fffffff
86 #define V_CMD_LEN(v) (v)
87 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
88 #define V_CMD_GEN1(v) ((v) << 31)
89 #define V_CMD_GEN2(v) (v)
90 #define F_CMD_DATAVALID (1 << 1)
91 #define F_CMD_SOP (1 << 2)
92 #define V_CMD_EOP(v) ((v) << 3)
93
94 /*
95 * Command queue, receive buffer list, and response queue descriptors.
96 */
97 #if defined(__BIG_ENDIAN_BITFIELD)
98 struct cmdQ_e {
99 u32 addr_lo;
100 u32 len_gen;
101 u32 flags;
102 u32 addr_hi;
103 };
104
105 struct freelQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 gen2;
109 u32 addr_hi;
110 };
111
112 struct respQ_e {
113 u32 Qsleeping : 4;
114 u32 Cmdq1CreditReturn : 5;
115 u32 Cmdq1DmaComplete : 5;
116 u32 Cmdq0CreditReturn : 5;
117 u32 Cmdq0DmaComplete : 5;
118 u32 FreelistQid : 2;
119 u32 CreditValid : 1;
120 u32 DataValid : 1;
121 u32 Offload : 1;
122 u32 Eop : 1;
123 u32 Sop : 1;
124 u32 GenerationBit : 1;
125 u32 BufferLength;
126 };
127 #elif defined(__LITTLE_ENDIAN_BITFIELD)
128 struct cmdQ_e {
129 u32 len_gen;
130 u32 addr_lo;
131 u32 addr_hi;
132 u32 flags;
133 };
134
135 struct freelQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 gen2;
140 };
141
142 struct respQ_e {
143 u32 BufferLength;
144 u32 GenerationBit : 1;
145 u32 Sop : 1;
146 u32 Eop : 1;
147 u32 Offload : 1;
148 u32 DataValid : 1;
149 u32 CreditValid : 1;
150 u32 FreelistQid : 2;
151 u32 Cmdq0DmaComplete : 5;
152 u32 Cmdq0CreditReturn : 5;
153 u32 Cmdq1DmaComplete : 5;
154 u32 Cmdq1CreditReturn : 5;
155 u32 Qsleeping : 4;
156 } ;
157 #endif
158
159 /*
160 * SW Context Command and Freelist Queue Descriptors
161 */
162 struct cmdQ_ce {
163 struct sk_buff *skb;
164 DEFINE_DMA_UNMAP_ADDR(dma_addr);
165 DEFINE_DMA_UNMAP_LEN(dma_len);
166 };
167
168 struct freelQ_ce {
169 struct sk_buff *skb;
170 DEFINE_DMA_UNMAP_ADDR(dma_addr);
171 DEFINE_DMA_UNMAP_LEN(dma_len);
172 };
173
174 /*
175 * SW command, freelist and response rings
176 */
177 struct cmdQ {
178 unsigned long status; /* HW DMA fetch status */
179 unsigned int in_use; /* # of in-use command descriptors */
180 unsigned int size; /* # of descriptors */
181 unsigned int processed; /* total # of descs HW has processed */
182 unsigned int cleaned; /* total # of descs SW has reclaimed */
183 unsigned int stop_thres; /* SW TX queue suspend threshold */
184 u16 pidx; /* producer index (SW) */
185 u16 cidx; /* consumer index (HW) */
186 u8 genbit; /* current generation (=valid) bit */
187 u8 sop; /* is next entry start of packet? */
188 struct cmdQ_e *entries; /* HW command descriptor Q */
189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
192 };
193
194 struct freelQ {
195 unsigned int credits; /* # of available RX buffers */
196 unsigned int size; /* free list capacity */
197 u16 pidx; /* producer index (SW) */
198 u16 cidx; /* consumer index (HW) */
199 u16 rx_buffer_size; /* Buffer size on this free list */
200 u16 dma_offset; /* DMA offset to align IP headers */
201 u16 recycleq_idx; /* skb recycle q to use */
202 u8 genbit; /* current generation (=valid) bit */
203 struct freelQ_e *entries; /* HW freelist descriptor Q */
204 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
205 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
206 };
207
208 struct respQ {
209 unsigned int credits; /* credits to be returned to SGE */
210 unsigned int size; /* # of response Q descriptors */
211 u16 cidx; /* consumer index (SW) */
212 u8 genbit; /* current generation(=valid) bit */
213 struct respQ_e *entries; /* HW response descriptor Q */
214 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
215 };
216
217 /* Bit flags for cmdQ.status */
218 enum {
219 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
220 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
221 };
222
223 /* T204 TX SW scheduler */
224
225 /* Per T204 TX port */
226 struct sched_port {
227 unsigned int avail; /* available bits - quota */
228 unsigned int drain_bits_per_1024ns; /* drain rate */
229 unsigned int speed; /* drain rate, mbps */
230 unsigned int mtu; /* mtu size */
231 struct sk_buff_head skbq; /* pending skbs */
232 };
233
234 /* Per T204 device */
235 struct sched {
236 ktime_t last_updated; /* last time quotas were computed */
237 unsigned int max_avail; /* max bits to be sent to any port */
238 unsigned int port; /* port index (round robin ports) */
239 unsigned int num; /* num skbs in per port queues */
240 struct sched_port p[MAX_NPORTS];
241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
242 struct sge *sge;
243 };
244
245 static void restart_sched(struct tasklet_struct *t);
246
247
248 /*
249 * Main SGE data structure
250 *
251 * Interrupts are handled by a single CPU and it is likely that on a MP system
252 * the application is migrated to another CPU. In that scenario, we try to
253 * separate the RX(in irq context) and TX state in order to decrease memory
254 * contention.
255 */
256 struct sge {
257 struct adapter *adapter; /* adapter backpointer */
258 struct net_device *netdev; /* netdevice backpointer */
259 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
260 struct respQ respQ; /* response Q */
261 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
262 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
263 unsigned int jumbo_fl; /* jumbo freelist Q index */
264 unsigned int intrtimer_nres; /* no-resource interrupt timer */
265 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
266 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
267 struct timer_list espibug_timer;
268 unsigned long espibug_timeout;
269 struct sk_buff *espibug_skb[MAX_NPORTS];
270 u32 sge_control; /* shadow value of sge control reg */
271 struct sge_intr_counts stats;
272 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
273 struct sched *tx_sched;
274 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
275 };
276
277 static const u8 ch_mac_addr[ETH_ALEN] = {
278 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
279 };
280
281 /*
282 * stop tasklet and free all pending skb's
283 */
tx_sched_stop(struct sge * sge)284 static void tx_sched_stop(struct sge *sge)
285 {
286 struct sched *s = sge->tx_sched;
287 int i;
288
289 tasklet_kill(&s->sched_tsk);
290
291 for (i = 0; i < MAX_NPORTS; i++)
292 __skb_queue_purge(&s->p[s->port].skbq);
293 }
294
295 /*
296 * t1_sched_update_parms() is called when the MTU or link speed changes. It
297 * re-computes scheduler parameters to scope with the change.
298 */
t1_sched_update_parms(struct sge * sge,unsigned int port,unsigned int mtu,unsigned int speed)299 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
300 unsigned int mtu, unsigned int speed)
301 {
302 struct sched *s = sge->tx_sched;
303 struct sched_port *p = &s->p[port];
304 unsigned int max_avail_segs;
305
306 pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
307 if (speed)
308 p->speed = speed;
309 if (mtu)
310 p->mtu = mtu;
311
312 if (speed || mtu) {
313 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
314 do_div(drain, (p->mtu + 50) * 1000);
315 p->drain_bits_per_1024ns = (unsigned int) drain;
316
317 if (p->speed < 1000)
318 p->drain_bits_per_1024ns =
319 90 * p->drain_bits_per_1024ns / 100;
320 }
321
322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
323 p->drain_bits_per_1024ns -= 16;
324 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
325 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
326 } else {
327 s->max_avail = 16384;
328 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
329 }
330
331 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
332 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
333 p->speed, s->max_avail, max_avail_segs,
334 p->drain_bits_per_1024ns);
335
336 return max_avail_segs * (p->mtu - 40);
337 }
338
339 #if 0
340
341 /*
342 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
343 * data that can be pushed per port.
344 */
345 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
346 {
347 struct sched *s = sge->tx_sched;
348 unsigned int i;
349
350 s->max_avail = val;
351 for (i = 0; i < MAX_NPORTS; i++)
352 t1_sched_update_parms(sge, i, 0, 0);
353 }
354
355 /*
356 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
357 * is draining.
358 */
359 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
360 unsigned int val)
361 {
362 struct sched *s = sge->tx_sched;
363 struct sched_port *p = &s->p[port];
364 p->drain_bits_per_1024ns = val * 1024 / 1000;
365 t1_sched_update_parms(sge, port, 0, 0);
366 }
367
368 #endif /* 0 */
369
370 /*
371 * tx_sched_init() allocates resources and does basic initialization.
372 */
tx_sched_init(struct sge * sge)373 static int tx_sched_init(struct sge *sge)
374 {
375 struct sched *s;
376 int i;
377
378 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
379 if (!s)
380 return -ENOMEM;
381
382 pr_debug("tx_sched_init\n");
383 tasklet_setup(&s->sched_tsk, restart_sched);
384 s->sge = sge;
385 sge->tx_sched = s;
386
387 for (i = 0; i < MAX_NPORTS; i++) {
388 skb_queue_head_init(&s->p[i].skbq);
389 t1_sched_update_parms(sge, i, 1500, 1000);
390 }
391
392 return 0;
393 }
394
395 /*
396 * sched_update_avail() computes the delta since the last time it was called
397 * and updates the per port quota (number of bits that can be sent to the any
398 * port).
399 */
sched_update_avail(struct sge * sge)400 static inline int sched_update_avail(struct sge *sge)
401 {
402 struct sched *s = sge->tx_sched;
403 ktime_t now = ktime_get();
404 unsigned int i;
405 long long delta_time_ns;
406
407 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
408
409 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
410 if (delta_time_ns < 15000)
411 return 0;
412
413 for (i = 0; i < MAX_NPORTS; i++) {
414 struct sched_port *p = &s->p[i];
415 unsigned int delta_avail;
416
417 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
418 p->avail = min(p->avail + delta_avail, s->max_avail);
419 }
420
421 s->last_updated = now;
422
423 return 1;
424 }
425
426 /*
427 * sched_skb() is called from two different places. In the tx path, any
428 * packet generating load on an output port will call sched_skb()
429 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
430 * context (skb == NULL).
431 * The scheduler only returns a skb (which will then be sent) if the
432 * length of the skb is <= the current quota of the output port.
433 */
sched_skb(struct sge * sge,struct sk_buff * skb,unsigned int credits)434 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
435 unsigned int credits)
436 {
437 struct sched *s = sge->tx_sched;
438 struct sk_buff_head *skbq;
439 unsigned int i, len, update = 1;
440
441 pr_debug("sched_skb %p\n", skb);
442 if (!skb) {
443 if (!s->num)
444 return NULL;
445 } else {
446 skbq = &s->p[skb->dev->if_port].skbq;
447 __skb_queue_tail(skbq, skb);
448 s->num++;
449 skb = NULL;
450 }
451
452 if (credits < MAX_SKB_FRAGS + 1)
453 goto out;
454
455 again:
456 for (i = 0; i < MAX_NPORTS; i++) {
457 s->port = (s->port + 1) & (MAX_NPORTS - 1);
458 skbq = &s->p[s->port].skbq;
459
460 skb = skb_peek(skbq);
461
462 if (!skb)
463 continue;
464
465 len = skb->len;
466 if (len <= s->p[s->port].avail) {
467 s->p[s->port].avail -= len;
468 s->num--;
469 __skb_unlink(skb, skbq);
470 goto out;
471 }
472 skb = NULL;
473 }
474
475 if (update-- && sched_update_avail(sge))
476 goto again;
477
478 out:
479 /* If there are more pending skbs, we use the hardware to schedule us
480 * again.
481 */
482 if (s->num && !skb) {
483 struct cmdQ *q = &sge->cmdQ[0];
484 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
485 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
486 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
487 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
488 }
489 }
490 pr_debug("sched_skb ret %p\n", skb);
491
492 return skb;
493 }
494
495 /*
496 * PIO to indicate that memory mapped Q contains valid descriptor(s).
497 */
doorbell_pio(struct adapter * adapter,u32 val)498 static inline void doorbell_pio(struct adapter *adapter, u32 val)
499 {
500 wmb();
501 writel(val, adapter->regs + A_SG_DOORBELL);
502 }
503
504 /*
505 * Frees all RX buffers on the freelist Q. The caller must make sure that
506 * the SGE is turned off before calling this function.
507 */
free_freelQ_buffers(struct pci_dev * pdev,struct freelQ * q)508 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
509 {
510 unsigned int cidx = q->cidx;
511
512 while (q->credits--) {
513 struct freelQ_ce *ce = &q->centries[cidx];
514
515 dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
516 dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
517 dev_kfree_skb(ce->skb);
518 ce->skb = NULL;
519 if (++cidx == q->size)
520 cidx = 0;
521 }
522 }
523
524 /*
525 * Free RX free list and response queue resources.
526 */
free_rx_resources(struct sge * sge)527 static void free_rx_resources(struct sge *sge)
528 {
529 struct pci_dev *pdev = sge->adapter->pdev;
530 unsigned int size, i;
531
532 if (sge->respQ.entries) {
533 size = sizeof(struct respQ_e) * sge->respQ.size;
534 dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
535 sge->respQ.dma_addr);
536 }
537
538 for (i = 0; i < SGE_FREELQ_N; i++) {
539 struct freelQ *q = &sge->freelQ[i];
540
541 if (q->centries) {
542 free_freelQ_buffers(pdev, q);
543 kfree(q->centries);
544 }
545 if (q->entries) {
546 size = sizeof(struct freelQ_e) * q->size;
547 dma_free_coherent(&pdev->dev, size, q->entries,
548 q->dma_addr);
549 }
550 }
551 }
552
553 /*
554 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
555 * response queue.
556 */
alloc_rx_resources(struct sge * sge,struct sge_params * p)557 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
558 {
559 struct pci_dev *pdev = sge->adapter->pdev;
560 unsigned int size, i;
561
562 for (i = 0; i < SGE_FREELQ_N; i++) {
563 struct freelQ *q = &sge->freelQ[i];
564
565 q->genbit = 1;
566 q->size = p->freelQ_size[i];
567 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
568 size = sizeof(struct freelQ_e) * q->size;
569 q->entries = dma_alloc_coherent(&pdev->dev, size,
570 &q->dma_addr, GFP_KERNEL);
571 if (!q->entries)
572 goto err_no_mem;
573
574 size = sizeof(struct freelQ_ce) * q->size;
575 q->centries = kzalloc(size, GFP_KERNEL);
576 if (!q->centries)
577 goto err_no_mem;
578 }
579
580 /*
581 * Calculate the buffer sizes for the two free lists. FL0 accommodates
582 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
583 * including all the sk_buff overhead.
584 *
585 * Note: For T2 FL0 and FL1 are reversed.
586 */
587 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
588 sizeof(struct cpl_rx_data) +
589 sge->freelQ[!sge->jumbo_fl].dma_offset;
590
591 size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
592
593 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
594
595 /*
596 * Setup which skb recycle Q should be used when recycling buffers from
597 * each free list.
598 */
599 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
600 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
601
602 sge->respQ.genbit = 1;
603 sge->respQ.size = SGE_RESPQ_E_N;
604 sge->respQ.credits = 0;
605 size = sizeof(struct respQ_e) * sge->respQ.size;
606 sge->respQ.entries =
607 dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
608 GFP_KERNEL);
609 if (!sge->respQ.entries)
610 goto err_no_mem;
611 return 0;
612
613 err_no_mem:
614 free_rx_resources(sge);
615 return -ENOMEM;
616 }
617
618 /*
619 * Reclaims n TX descriptors and frees the buffers associated with them.
620 */
free_cmdQ_buffers(struct sge * sge,struct cmdQ * q,unsigned int n)621 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
622 {
623 struct cmdQ_ce *ce;
624 struct pci_dev *pdev = sge->adapter->pdev;
625 unsigned int cidx = q->cidx;
626
627 q->in_use -= n;
628 ce = &q->centries[cidx];
629 while (n--) {
630 if (likely(dma_unmap_len(ce, dma_len))) {
631 dma_unmap_single(&pdev->dev,
632 dma_unmap_addr(ce, dma_addr),
633 dma_unmap_len(ce, dma_len),
634 DMA_TO_DEVICE);
635 if (q->sop)
636 q->sop = 0;
637 }
638 if (ce->skb) {
639 dev_kfree_skb_any(ce->skb);
640 q->sop = 1;
641 }
642 ce++;
643 if (++cidx == q->size) {
644 cidx = 0;
645 ce = q->centries;
646 }
647 }
648 q->cidx = cidx;
649 }
650
651 /*
652 * Free TX resources.
653 *
654 * Assumes that SGE is stopped and all interrupts are disabled.
655 */
free_tx_resources(struct sge * sge)656 static void free_tx_resources(struct sge *sge)
657 {
658 struct pci_dev *pdev = sge->adapter->pdev;
659 unsigned int size, i;
660
661 for (i = 0; i < SGE_CMDQ_N; i++) {
662 struct cmdQ *q = &sge->cmdQ[i];
663
664 if (q->centries) {
665 if (q->in_use)
666 free_cmdQ_buffers(sge, q, q->in_use);
667 kfree(q->centries);
668 }
669 if (q->entries) {
670 size = sizeof(struct cmdQ_e) * q->size;
671 dma_free_coherent(&pdev->dev, size, q->entries,
672 q->dma_addr);
673 }
674 }
675 }
676
677 /*
678 * Allocates basic TX resources, consisting of memory mapped command Qs.
679 */
alloc_tx_resources(struct sge * sge,struct sge_params * p)680 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
681 {
682 struct pci_dev *pdev = sge->adapter->pdev;
683 unsigned int size, i;
684
685 for (i = 0; i < SGE_CMDQ_N; i++) {
686 struct cmdQ *q = &sge->cmdQ[i];
687
688 q->genbit = 1;
689 q->sop = 1;
690 q->size = p->cmdQ_size[i];
691 q->in_use = 0;
692 q->status = 0;
693 q->processed = q->cleaned = 0;
694 q->stop_thres = 0;
695 spin_lock_init(&q->lock);
696 size = sizeof(struct cmdQ_e) * q->size;
697 q->entries = dma_alloc_coherent(&pdev->dev, size,
698 &q->dma_addr, GFP_KERNEL);
699 if (!q->entries)
700 goto err_no_mem;
701
702 size = sizeof(struct cmdQ_ce) * q->size;
703 q->centries = kzalloc(size, GFP_KERNEL);
704 if (!q->centries)
705 goto err_no_mem;
706 }
707
708 /*
709 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
710 * only. For queue 0 set the stop threshold so we can handle one more
711 * packet from each port, plus reserve an additional 24 entries for
712 * Ethernet packets only. Queue 1 never suspends nor do we reserve
713 * space for Ethernet packets.
714 */
715 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
716 (MAX_SKB_FRAGS + 1);
717 return 0;
718
719 err_no_mem:
720 free_tx_resources(sge);
721 return -ENOMEM;
722 }
723
setup_ring_params(struct adapter * adapter,u64 addr,u32 size,int base_reg_lo,int base_reg_hi,int size_reg)724 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
725 u32 size, int base_reg_lo,
726 int base_reg_hi, int size_reg)
727 {
728 writel((u32)addr, adapter->regs + base_reg_lo);
729 writel(addr >> 32, adapter->regs + base_reg_hi);
730 writel(size, adapter->regs + size_reg);
731 }
732
733 /*
734 * Enable/disable VLAN acceleration.
735 */
t1_vlan_mode(struct adapter * adapter,netdev_features_t features)736 void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
737 {
738 struct sge *sge = adapter->sge;
739
740 if (features & NETIF_F_HW_VLAN_CTAG_RX)
741 sge->sge_control |= F_VLAN_XTRACT;
742 else
743 sge->sge_control &= ~F_VLAN_XTRACT;
744 if (adapter->open_device_map) {
745 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
746 readl(adapter->regs + A_SG_CONTROL); /* flush */
747 }
748 }
749
750 /*
751 * Programs the various SGE registers. However, the engine is not yet enabled,
752 * but sge->sge_control is setup and ready to go.
753 */
configure_sge(struct sge * sge,struct sge_params * p)754 static void configure_sge(struct sge *sge, struct sge_params *p)
755 {
756 struct adapter *ap = sge->adapter;
757
758 writel(0, ap->regs + A_SG_CONTROL);
759 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
760 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
761 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
762 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
763 setup_ring_params(ap, sge->freelQ[0].dma_addr,
764 sge->freelQ[0].size, A_SG_FL0BASELWR,
765 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
766 setup_ring_params(ap, sge->freelQ[1].dma_addr,
767 sge->freelQ[1].size, A_SG_FL1BASELWR,
768 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
769
770 /* The threshold comparison uses <. */
771 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
772
773 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
774 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
775 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
776
777 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
778 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
779 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
780 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
781
782 #if defined(__BIG_ENDIAN_BITFIELD)
783 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
784 #endif
785
786 /* Initialize no-resource timer */
787 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
788
789 t1_sge_set_coalesce_params(sge, p);
790 }
791
792 /*
793 * Return the payload capacity of the jumbo free-list buffers.
794 */
jumbo_payload_capacity(const struct sge * sge)795 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
796 {
797 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
798 sge->freelQ[sge->jumbo_fl].dma_offset -
799 sizeof(struct cpl_rx_data);
800 }
801
802 /*
803 * Frees all SGE related resources and the sge structure itself
804 */
t1_sge_destroy(struct sge * sge)805 void t1_sge_destroy(struct sge *sge)
806 {
807 int i;
808
809 for_each_port(sge->adapter, i)
810 free_percpu(sge->port_stats[i]);
811
812 kfree(sge->tx_sched);
813 free_tx_resources(sge);
814 free_rx_resources(sge);
815 kfree(sge);
816 }
817
818 /*
819 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
820 * context Q) until the Q is full or alloc_skb fails.
821 *
822 * It is possible that the generation bits already match, indicating that the
823 * buffer is already valid and nothing needs to be done. This happens when we
824 * copied a received buffer into a new sk_buff during the interrupt processing.
825 *
826 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
827 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
828 * aligned.
829 */
refill_free_list(struct sge * sge,struct freelQ * q)830 static void refill_free_list(struct sge *sge, struct freelQ *q)
831 {
832 struct pci_dev *pdev = sge->adapter->pdev;
833 struct freelQ_ce *ce = &q->centries[q->pidx];
834 struct freelQ_e *e = &q->entries[q->pidx];
835 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
836
837 while (q->credits < q->size) {
838 struct sk_buff *skb;
839 dma_addr_t mapping;
840
841 skb = dev_alloc_skb(q->rx_buffer_size);
842 if (!skb)
843 break;
844
845 skb_reserve(skb, q->dma_offset);
846 mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
847 DMA_FROM_DEVICE);
848 skb_reserve(skb, sge->rx_pkt_pad);
849
850 ce->skb = skb;
851 dma_unmap_addr_set(ce, dma_addr, mapping);
852 dma_unmap_len_set(ce, dma_len, dma_len);
853 e->addr_lo = (u32)mapping;
854 e->addr_hi = (u64)mapping >> 32;
855 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
856 wmb();
857 e->gen2 = V_CMD_GEN2(q->genbit);
858
859 e++;
860 ce++;
861 if (++q->pidx == q->size) {
862 q->pidx = 0;
863 q->genbit ^= 1;
864 ce = q->centries;
865 e = q->entries;
866 }
867 q->credits++;
868 }
869 }
870
871 /*
872 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
873 * of both rings, we go into 'few interrupt mode' in order to give the system
874 * time to free up resources.
875 */
freelQs_empty(struct sge * sge)876 static void freelQs_empty(struct sge *sge)
877 {
878 struct adapter *adapter = sge->adapter;
879 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
880 u32 irqholdoff_reg;
881
882 refill_free_list(sge, &sge->freelQ[0]);
883 refill_free_list(sge, &sge->freelQ[1]);
884
885 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
886 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
887 irq_reg |= F_FL_EXHAUSTED;
888 irqholdoff_reg = sge->fixed_intrtimer;
889 } else {
890 /* Clear the F_FL_EXHAUSTED interrupts for now */
891 irq_reg &= ~F_FL_EXHAUSTED;
892 irqholdoff_reg = sge->intrtimer_nres;
893 }
894 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
895 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
896
897 /* We reenable the Qs to force a freelist GTS interrupt later */
898 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
899 }
900
901 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
902 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
903 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
904 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
905
906 /*
907 * Disable SGE Interrupts
908 */
t1_sge_intr_disable(struct sge * sge)909 void t1_sge_intr_disable(struct sge *sge)
910 {
911 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
912
913 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
914 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
915 }
916
917 /*
918 * Enable SGE interrupts.
919 */
t1_sge_intr_enable(struct sge * sge)920 void t1_sge_intr_enable(struct sge *sge)
921 {
922 u32 en = SGE_INT_ENABLE;
923 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
924
925 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
926 en &= ~F_PACKET_TOO_BIG;
927 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
928 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
929 }
930
931 /*
932 * Clear SGE interrupts.
933 */
t1_sge_intr_clear(struct sge * sge)934 void t1_sge_intr_clear(struct sge *sge)
935 {
936 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
937 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
938 }
939
940 /*
941 * SGE 'Error' interrupt handler
942 */
t1_sge_intr_error_handler(struct sge * sge)943 bool t1_sge_intr_error_handler(struct sge *sge)
944 {
945 struct adapter *adapter = sge->adapter;
946 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
947 bool wake = false;
948
949 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
950 cause &= ~F_PACKET_TOO_BIG;
951 if (cause & F_RESPQ_EXHAUSTED)
952 sge->stats.respQ_empty++;
953 if (cause & F_RESPQ_OVERFLOW) {
954 sge->stats.respQ_overflow++;
955 pr_alert("%s: SGE response queue overflow\n",
956 adapter->name);
957 }
958 if (cause & F_FL_EXHAUSTED) {
959 sge->stats.freelistQ_empty++;
960 freelQs_empty(sge);
961 }
962 if (cause & F_PACKET_TOO_BIG) {
963 sge->stats.pkt_too_big++;
964 pr_alert("%s: SGE max packet size exceeded\n",
965 adapter->name);
966 }
967 if (cause & F_PACKET_MISMATCH) {
968 sge->stats.pkt_mismatch++;
969 pr_alert("%s: SGE packet mismatch\n", adapter->name);
970 }
971 if (cause & SGE_INT_FATAL) {
972 t1_interrupts_disable(adapter);
973 adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
974 wake = true;
975 }
976
977 writel(cause, adapter->regs + A_SG_INT_CAUSE);
978 return wake;
979 }
980
t1_sge_get_intr_counts(const struct sge * sge)981 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
982 {
983 return &sge->stats;
984 }
985
t1_sge_get_port_stats(const struct sge * sge,int port,struct sge_port_stats * ss)986 void t1_sge_get_port_stats(const struct sge *sge, int port,
987 struct sge_port_stats *ss)
988 {
989 int cpu;
990
991 memset(ss, 0, sizeof(*ss));
992 for_each_possible_cpu(cpu) {
993 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
994
995 ss->rx_cso_good += st->rx_cso_good;
996 ss->tx_cso += st->tx_cso;
997 ss->tx_tso += st->tx_tso;
998 ss->tx_need_hdrroom += st->tx_need_hdrroom;
999 ss->vlan_xtract += st->vlan_xtract;
1000 ss->vlan_insert += st->vlan_insert;
1001 }
1002 }
1003
1004 /**
1005 * recycle_fl_buf - recycle a free list buffer
1006 * @fl: the free list
1007 * @idx: index of buffer to recycle
1008 *
1009 * Recycles the specified buffer on the given free list by adding it at
1010 * the next available slot on the list.
1011 */
recycle_fl_buf(struct freelQ * fl,int idx)1012 static void recycle_fl_buf(struct freelQ *fl, int idx)
1013 {
1014 struct freelQ_e *from = &fl->entries[idx];
1015 struct freelQ_e *to = &fl->entries[fl->pidx];
1016
1017 fl->centries[fl->pidx] = fl->centries[idx];
1018 to->addr_lo = from->addr_lo;
1019 to->addr_hi = from->addr_hi;
1020 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1021 wmb();
1022 to->gen2 = V_CMD_GEN2(fl->genbit);
1023 fl->credits++;
1024
1025 if (++fl->pidx == fl->size) {
1026 fl->pidx = 0;
1027 fl->genbit ^= 1;
1028 }
1029 }
1030
1031 static int copybreak __read_mostly = 256;
1032 module_param(copybreak, int, 0);
1033 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1034
1035 /**
1036 * get_packet - return the next ingress packet buffer
1037 * @adapter: the adapter that received the packet
1038 * @fl: the SGE free list holding the packet
1039 * @len: the actual packet length, excluding any SGE padding
1040 *
1041 * Get the next packet from a free list and complete setup of the
1042 * sk_buff. If the packet is small we make a copy and recycle the
1043 * original buffer, otherwise we use the original buffer itself. If a
1044 * positive drop threshold is supplied packets are dropped and their
1045 * buffers recycled if (a) the number of remaining buffers is under the
1046 * threshold and the packet is too big to copy, or (b) the packet should
1047 * be copied but there is no memory for the copy.
1048 */
get_packet(struct adapter * adapter,struct freelQ * fl,unsigned int len)1049 static inline struct sk_buff *get_packet(struct adapter *adapter,
1050 struct freelQ *fl, unsigned int len)
1051 {
1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1053 struct pci_dev *pdev = adapter->pdev;
1054 struct sk_buff *skb;
1055
1056 if (len < copybreak) {
1057 skb = napi_alloc_skb(&adapter->napi, len);
1058 if (!skb)
1059 goto use_orig_buf;
1060
1061 skb_put(skb, len);
1062 dma_sync_single_for_cpu(&pdev->dev,
1063 dma_unmap_addr(ce, dma_addr),
1064 dma_unmap_len(ce, dma_len),
1065 DMA_FROM_DEVICE);
1066 skb_copy_from_linear_data(ce->skb, skb->data, len);
1067 dma_sync_single_for_device(&pdev->dev,
1068 dma_unmap_addr(ce, dma_addr),
1069 dma_unmap_len(ce, dma_len),
1070 DMA_FROM_DEVICE);
1071 recycle_fl_buf(fl, fl->cidx);
1072 return skb;
1073 }
1074
1075 use_orig_buf:
1076 if (fl->credits < 2) {
1077 recycle_fl_buf(fl, fl->cidx);
1078 return NULL;
1079 }
1080
1081 dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
1082 dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
1083 skb = ce->skb;
1084 prefetch(skb->data);
1085
1086 skb_put(skb, len);
1087 return skb;
1088 }
1089
1090 /**
1091 * unexpected_offload - handle an unexpected offload packet
1092 * @adapter: the adapter
1093 * @fl: the free list that received the packet
1094 *
1095 * Called when we receive an unexpected offload packet (e.g., the TOE
1096 * function is disabled or the card is a NIC). Prints a message and
1097 * recycles the buffer.
1098 */
unexpected_offload(struct adapter * adapter,struct freelQ * fl)1099 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1100 {
1101 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1102 struct sk_buff *skb = ce->skb;
1103
1104 dma_sync_single_for_cpu(&adapter->pdev->dev,
1105 dma_unmap_addr(ce, dma_addr),
1106 dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
1107 pr_err("%s: unexpected offload packet, cmd %u\n",
1108 adapter->name, *skb->data);
1109 recycle_fl_buf(fl, fl->cidx);
1110 }
1111
1112 /*
1113 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1114 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1115 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1116 * Note that the *_large_page_tx_descs stuff will be optimized out when
1117 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1118 *
1119 * compute_large_page_descs() computes how many additional descriptors are
1120 * required to break down the stack's request.
1121 */
compute_large_page_tx_descs(struct sk_buff * skb)1122 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1123 {
1124 unsigned int count = 0;
1125
1126 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1127 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1128 unsigned int i, len = skb_headlen(skb);
1129 while (len > SGE_TX_DESC_MAX_PLEN) {
1130 count++;
1131 len -= SGE_TX_DESC_MAX_PLEN;
1132 }
1133 for (i = 0; nfrags--; i++) {
1134 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1135 len = skb_frag_size(frag);
1136 while (len > SGE_TX_DESC_MAX_PLEN) {
1137 count++;
1138 len -= SGE_TX_DESC_MAX_PLEN;
1139 }
1140 }
1141 }
1142 return count;
1143 }
1144
1145 /*
1146 * Write a cmdQ entry.
1147 *
1148 * Since this function writes the 'flags' field, it must not be used to
1149 * write the first cmdQ entry.
1150 */
write_tx_desc(struct cmdQ_e * e,dma_addr_t mapping,unsigned int len,unsigned int gen,unsigned int eop)1151 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1152 unsigned int len, unsigned int gen,
1153 unsigned int eop)
1154 {
1155 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1156
1157 e->addr_lo = (u32)mapping;
1158 e->addr_hi = (u64)mapping >> 32;
1159 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1160 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1161 }
1162
1163 /*
1164 * See comment for previous function.
1165 *
1166 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1167 * *desc_len exceeds HW's capability.
1168 */
write_large_page_tx_descs(unsigned int pidx,struct cmdQ_e ** e,struct cmdQ_ce ** ce,unsigned int * gen,dma_addr_t * desc_mapping,unsigned int * desc_len,unsigned int nfrags,struct cmdQ * q)1169 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1170 struct cmdQ_e **e,
1171 struct cmdQ_ce **ce,
1172 unsigned int *gen,
1173 dma_addr_t *desc_mapping,
1174 unsigned int *desc_len,
1175 unsigned int nfrags,
1176 struct cmdQ *q)
1177 {
1178 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1179 struct cmdQ_e *e1 = *e;
1180 struct cmdQ_ce *ce1 = *ce;
1181
1182 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1183 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1184 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1185 *gen, nfrags == 0 && *desc_len == 0);
1186 ce1->skb = NULL;
1187 dma_unmap_len_set(ce1, dma_len, 0);
1188 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1189 if (*desc_len) {
1190 ce1++;
1191 e1++;
1192 if (++pidx == q->size) {
1193 pidx = 0;
1194 *gen ^= 1;
1195 ce1 = q->centries;
1196 e1 = q->entries;
1197 }
1198 }
1199 }
1200 *e = e1;
1201 *ce = ce1;
1202 }
1203 return pidx;
1204 }
1205
1206 /*
1207 * Write the command descriptors to transmit the given skb starting at
1208 * descriptor pidx with the given generation.
1209 */
write_tx_descs(struct adapter * adapter,struct sk_buff * skb,unsigned int pidx,unsigned int gen,struct cmdQ * q)1210 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1211 unsigned int pidx, unsigned int gen,
1212 struct cmdQ *q)
1213 {
1214 dma_addr_t mapping, desc_mapping;
1215 struct cmdQ_e *e, *e1;
1216 struct cmdQ_ce *ce;
1217 unsigned int i, flags, first_desc_len, desc_len,
1218 nfrags = skb_shinfo(skb)->nr_frags;
1219
1220 e = e1 = &q->entries[pidx];
1221 ce = &q->centries[pidx];
1222
1223 mapping = dma_map_single(&adapter->pdev->dev, skb->data,
1224 skb_headlen(skb), DMA_TO_DEVICE);
1225
1226 desc_mapping = mapping;
1227 desc_len = skb_headlen(skb);
1228
1229 flags = F_CMD_DATAVALID | F_CMD_SOP |
1230 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1231 V_CMD_GEN2(gen);
1232 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1233 desc_len : SGE_TX_DESC_MAX_PLEN;
1234 e->addr_lo = (u32)desc_mapping;
1235 e->addr_hi = (u64)desc_mapping >> 32;
1236 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1237 ce->skb = NULL;
1238 dma_unmap_len_set(ce, dma_len, 0);
1239
1240 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1241 desc_len > SGE_TX_DESC_MAX_PLEN) {
1242 desc_mapping += first_desc_len;
1243 desc_len -= first_desc_len;
1244 e1++;
1245 ce++;
1246 if (++pidx == q->size) {
1247 pidx = 0;
1248 gen ^= 1;
1249 e1 = q->entries;
1250 ce = q->centries;
1251 }
1252 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1253 &desc_mapping, &desc_len,
1254 nfrags, q);
1255
1256 if (likely(desc_len))
1257 write_tx_desc(e1, desc_mapping, desc_len, gen,
1258 nfrags == 0);
1259 }
1260
1261 ce->skb = NULL;
1262 dma_unmap_addr_set(ce, dma_addr, mapping);
1263 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1264
1265 for (i = 0; nfrags--; i++) {
1266 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1267 e1++;
1268 ce++;
1269 if (++pidx == q->size) {
1270 pidx = 0;
1271 gen ^= 1;
1272 e1 = q->entries;
1273 ce = q->centries;
1274 }
1275
1276 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1277 skb_frag_size(frag), DMA_TO_DEVICE);
1278 desc_mapping = mapping;
1279 desc_len = skb_frag_size(frag);
1280
1281 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1282 &desc_mapping, &desc_len,
1283 nfrags, q);
1284 if (likely(desc_len))
1285 write_tx_desc(e1, desc_mapping, desc_len, gen,
1286 nfrags == 0);
1287 ce->skb = NULL;
1288 dma_unmap_addr_set(ce, dma_addr, mapping);
1289 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1290 }
1291 ce->skb = skb;
1292 wmb();
1293 e->flags = flags;
1294 }
1295
1296 /*
1297 * Clean up completed Tx buffers.
1298 */
reclaim_completed_tx(struct sge * sge,struct cmdQ * q)1299 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1300 {
1301 unsigned int reclaim = q->processed - q->cleaned;
1302
1303 if (reclaim) {
1304 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1305 q->processed, q->cleaned);
1306 free_cmdQ_buffers(sge, q, reclaim);
1307 q->cleaned += reclaim;
1308 }
1309 }
1310
1311 /*
1312 * Called from tasklet. Checks the scheduler for any
1313 * pending skbs that can be sent.
1314 */
restart_sched(struct tasklet_struct * t)1315 static void restart_sched(struct tasklet_struct *t)
1316 {
1317 struct sched *s = from_tasklet(s, t, sched_tsk);
1318 struct sge *sge = s->sge;
1319 struct adapter *adapter = sge->adapter;
1320 struct cmdQ *q = &sge->cmdQ[0];
1321 struct sk_buff *skb;
1322 unsigned int credits, queued_skb = 0;
1323
1324 spin_lock(&q->lock);
1325 reclaim_completed_tx(sge, q);
1326
1327 credits = q->size - q->in_use;
1328 pr_debug("restart_sched credits=%d\n", credits);
1329 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1330 unsigned int genbit, pidx, count;
1331 count = 1 + skb_shinfo(skb)->nr_frags;
1332 count += compute_large_page_tx_descs(skb);
1333 q->in_use += count;
1334 genbit = q->genbit;
1335 pidx = q->pidx;
1336 q->pidx += count;
1337 if (q->pidx >= q->size) {
1338 q->pidx -= q->size;
1339 q->genbit ^= 1;
1340 }
1341 write_tx_descs(adapter, skb, pidx, genbit, q);
1342 credits = q->size - q->in_use;
1343 queued_skb = 1;
1344 }
1345
1346 if (queued_skb) {
1347 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1348 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1349 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1350 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1351 }
1352 }
1353 spin_unlock(&q->lock);
1354 }
1355
1356 /**
1357 * sge_rx - process an ingress ethernet packet
1358 * @sge: the sge structure
1359 * @fl: the free list that contains the packet buffer
1360 * @len: the packet length
1361 *
1362 * Process an ingress ethernet packet and deliver it to the stack.
1363 */
sge_rx(struct sge * sge,struct freelQ * fl,unsigned int len)1364 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1365 {
1366 struct sk_buff *skb;
1367 const struct cpl_rx_pkt *p;
1368 struct adapter *adapter = sge->adapter;
1369 struct sge_port_stats *st;
1370 struct net_device *dev;
1371
1372 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
1373 if (unlikely(!skb)) {
1374 sge->stats.rx_drops++;
1375 return;
1376 }
1377
1378 p = (const struct cpl_rx_pkt *) skb->data;
1379 if (p->iff >= adapter->params.nports) {
1380 kfree_skb(skb);
1381 return;
1382 }
1383 __skb_pull(skb, sizeof(*p));
1384
1385 st = this_cpu_ptr(sge->port_stats[p->iff]);
1386 dev = adapter->port[p->iff].dev;
1387
1388 skb->protocol = eth_type_trans(skb, dev);
1389 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1390 skb->protocol == htons(ETH_P_IP) &&
1391 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1392 ++st->rx_cso_good;
1393 skb->ip_summed = CHECKSUM_UNNECESSARY;
1394 } else
1395 skb_checksum_none_assert(skb);
1396
1397 if (p->vlan_valid) {
1398 st->vlan_xtract++;
1399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1400 }
1401 netif_receive_skb(skb);
1402 }
1403
1404 /*
1405 * Returns true if a command queue has enough available descriptors that
1406 * we can resume Tx operation after temporarily disabling its packet queue.
1407 */
enough_free_Tx_descs(const struct cmdQ * q)1408 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1409 {
1410 unsigned int r = q->processed - q->cleaned;
1411
1412 return q->in_use - r < (q->size >> 1);
1413 }
1414
1415 /*
1416 * Called when sufficient space has become available in the SGE command queues
1417 * after the Tx packet schedulers have been suspended to restart the Tx path.
1418 */
restart_tx_queues(struct sge * sge)1419 static void restart_tx_queues(struct sge *sge)
1420 {
1421 struct adapter *adap = sge->adapter;
1422 int i;
1423
1424 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1425 return;
1426
1427 for_each_port(adap, i) {
1428 struct net_device *nd = adap->port[i].dev;
1429
1430 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1431 netif_running(nd)) {
1432 sge->stats.cmdQ_restarted[2]++;
1433 netif_wake_queue(nd);
1434 }
1435 }
1436 }
1437
1438 /*
1439 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1440 * information.
1441 */
update_tx_info(struct adapter * adapter,unsigned int flags,unsigned int pr0)1442 static unsigned int update_tx_info(struct adapter *adapter,
1443 unsigned int flags,
1444 unsigned int pr0)
1445 {
1446 struct sge *sge = adapter->sge;
1447 struct cmdQ *cmdq = &sge->cmdQ[0];
1448
1449 cmdq->processed += pr0;
1450 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1451 freelQs_empty(sge);
1452 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1453 }
1454 if (flags & F_CMDQ0_ENABLE) {
1455 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1456
1457 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1458 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1459 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1460 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1461 }
1462 if (sge->tx_sched)
1463 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1464
1465 flags &= ~F_CMDQ0_ENABLE;
1466 }
1467
1468 if (unlikely(sge->stopped_tx_queues != 0))
1469 restart_tx_queues(sge);
1470
1471 return flags;
1472 }
1473
1474 /*
1475 * Process SGE responses, up to the supplied budget. Returns the number of
1476 * responses processed. A negative budget is effectively unlimited.
1477 */
process_responses(struct adapter * adapter,int budget)1478 static int process_responses(struct adapter *adapter, int budget)
1479 {
1480 struct sge *sge = adapter->sge;
1481 struct respQ *q = &sge->respQ;
1482 struct respQ_e *e = &q->entries[q->cidx];
1483 int done = 0;
1484 unsigned int flags = 0;
1485 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1486
1487 while (done < budget && e->GenerationBit == q->genbit) {
1488 flags |= e->Qsleeping;
1489
1490 cmdq_processed[0] += e->Cmdq0CreditReturn;
1491 cmdq_processed[1] += e->Cmdq1CreditReturn;
1492
1493 /* We batch updates to the TX side to avoid cacheline
1494 * ping-pong of TX state information on MP where the sender
1495 * might run on a different CPU than this function...
1496 */
1497 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1498 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1499 cmdq_processed[0] = 0;
1500 }
1501
1502 if (unlikely(cmdq_processed[1] > 16)) {
1503 sge->cmdQ[1].processed += cmdq_processed[1];
1504 cmdq_processed[1] = 0;
1505 }
1506
1507 if (likely(e->DataValid)) {
1508 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1509
1510 BUG_ON(!e->Sop || !e->Eop);
1511 if (unlikely(e->Offload))
1512 unexpected_offload(adapter, fl);
1513 else
1514 sge_rx(sge, fl, e->BufferLength);
1515
1516 ++done;
1517
1518 /*
1519 * Note: this depends on each packet consuming a
1520 * single free-list buffer; cf. the BUG above.
1521 */
1522 if (++fl->cidx == fl->size)
1523 fl->cidx = 0;
1524 prefetch(fl->centries[fl->cidx].skb);
1525
1526 if (unlikely(--fl->credits <
1527 fl->size - SGE_FREEL_REFILL_THRESH))
1528 refill_free_list(sge, fl);
1529 } else
1530 sge->stats.pure_rsps++;
1531
1532 e++;
1533 if (unlikely(++q->cidx == q->size)) {
1534 q->cidx = 0;
1535 q->genbit ^= 1;
1536 e = q->entries;
1537 }
1538 prefetch(e);
1539
1540 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1541 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1542 q->credits = 0;
1543 }
1544 }
1545
1546 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1547 sge->cmdQ[1].processed += cmdq_processed[1];
1548
1549 return done;
1550 }
1551
responses_pending(const struct adapter * adapter)1552 static inline int responses_pending(const struct adapter *adapter)
1553 {
1554 const struct respQ *Q = &adapter->sge->respQ;
1555 const struct respQ_e *e = &Q->entries[Q->cidx];
1556
1557 return e->GenerationBit == Q->genbit;
1558 }
1559
1560 /*
1561 * A simpler version of process_responses() that handles only pure (i.e.,
1562 * non data-carrying) responses. Such respones are too light-weight to justify
1563 * calling a softirq when using NAPI, so we handle them specially in hard
1564 * interrupt context. The function is called with a pointer to a response,
1565 * which the caller must ensure is a valid pure response. Returns 1 if it
1566 * encounters a valid data-carrying response, 0 otherwise.
1567 */
process_pure_responses(struct adapter * adapter)1568 static int process_pure_responses(struct adapter *adapter)
1569 {
1570 struct sge *sge = adapter->sge;
1571 struct respQ *q = &sge->respQ;
1572 struct respQ_e *e = &q->entries[q->cidx];
1573 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1574 unsigned int flags = 0;
1575 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1576
1577 prefetch(fl->centries[fl->cidx].skb);
1578 if (e->DataValid)
1579 return 1;
1580
1581 do {
1582 flags |= e->Qsleeping;
1583
1584 cmdq_processed[0] += e->Cmdq0CreditReturn;
1585 cmdq_processed[1] += e->Cmdq1CreditReturn;
1586
1587 e++;
1588 if (unlikely(++q->cidx == q->size)) {
1589 q->cidx = 0;
1590 q->genbit ^= 1;
1591 e = q->entries;
1592 }
1593 prefetch(e);
1594
1595 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1596 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1597 q->credits = 0;
1598 }
1599 sge->stats.pure_rsps++;
1600 } while (e->GenerationBit == q->genbit && !e->DataValid);
1601
1602 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1603 sge->cmdQ[1].processed += cmdq_processed[1];
1604
1605 return e->GenerationBit == q->genbit;
1606 }
1607
1608 /*
1609 * Handler for new data events when using NAPI. This does not need any locking
1610 * or protection from interrupts as data interrupts are off at this point and
1611 * other adapter interrupts do not interfere.
1612 */
t1_poll(struct napi_struct * napi,int budget)1613 int t1_poll(struct napi_struct *napi, int budget)
1614 {
1615 struct adapter *adapter = container_of(napi, struct adapter, napi);
1616 int work_done = process_responses(adapter, budget);
1617
1618 if (likely(work_done < budget)) {
1619 napi_complete_done(napi, work_done);
1620 writel(adapter->sge->respQ.cidx,
1621 adapter->regs + A_SG_SLEEPING);
1622 }
1623 return work_done;
1624 }
1625
t1_interrupt_thread(int irq,void * data)1626 irqreturn_t t1_interrupt_thread(int irq, void *data)
1627 {
1628 struct adapter *adapter = data;
1629 u32 pending_thread_intr;
1630
1631 spin_lock_irq(&adapter->async_lock);
1632 pending_thread_intr = adapter->pending_thread_intr;
1633 adapter->pending_thread_intr = 0;
1634 spin_unlock_irq(&adapter->async_lock);
1635
1636 if (!pending_thread_intr)
1637 return IRQ_NONE;
1638
1639 if (pending_thread_intr & F_PL_INTR_EXT)
1640 t1_elmer0_ext_intr_handler(adapter);
1641
1642 /* This error is fatal, interrupts remain off */
1643 if (pending_thread_intr & F_PL_INTR_SGE_ERR) {
1644 pr_alert("%s: encountered fatal error, operation suspended\n",
1645 adapter->name);
1646 t1_sge_stop(adapter->sge);
1647 return IRQ_HANDLED;
1648 }
1649
1650 spin_lock_irq(&adapter->async_lock);
1651 adapter->slow_intr_mask |= F_PL_INTR_EXT;
1652
1653 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
1654 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1655 adapter->regs + A_PL_ENABLE);
1656 spin_unlock_irq(&adapter->async_lock);
1657
1658 return IRQ_HANDLED;
1659 }
1660
t1_interrupt(int irq,void * data)1661 irqreturn_t t1_interrupt(int irq, void *data)
1662 {
1663 struct adapter *adapter = data;
1664 struct sge *sge = adapter->sge;
1665 irqreturn_t handled;
1666
1667 if (likely(responses_pending(adapter))) {
1668 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1669
1670 if (napi_schedule_prep(&adapter->napi)) {
1671 if (process_pure_responses(adapter))
1672 __napi_schedule(&adapter->napi);
1673 else {
1674 /* no data, no NAPI needed */
1675 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1676 /* undo schedule_prep */
1677 napi_enable(&adapter->napi);
1678 }
1679 }
1680 return IRQ_HANDLED;
1681 }
1682
1683 spin_lock(&adapter->async_lock);
1684 handled = t1_slow_intr_handler(adapter);
1685 spin_unlock(&adapter->async_lock);
1686
1687 if (handled == IRQ_NONE)
1688 sge->stats.unhandled_irqs++;
1689
1690 return handled;
1691 }
1692
1693 /*
1694 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1695 *
1696 * The code figures out how many entries the sk_buff will require in the
1697 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1698 * has complete. Then, it doesn't access the global structure anymore, but
1699 * uses the corresponding fields on the stack. In conjunction with a spinlock
1700 * around that code, we can make the function reentrant without holding the
1701 * lock when we actually enqueue (which might be expensive, especially on
1702 * architectures with IO MMUs).
1703 *
1704 * This runs with softirqs disabled.
1705 */
t1_sge_tx(struct sk_buff * skb,struct adapter * adapter,unsigned int qid,struct net_device * dev)1706 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1707 unsigned int qid, struct net_device *dev)
1708 {
1709 struct sge *sge = adapter->sge;
1710 struct cmdQ *q = &sge->cmdQ[qid];
1711 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1712
1713 spin_lock(&q->lock);
1714
1715 reclaim_completed_tx(sge, q);
1716
1717 pidx = q->pidx;
1718 credits = q->size - q->in_use;
1719 count = 1 + skb_shinfo(skb)->nr_frags;
1720 count += compute_large_page_tx_descs(skb);
1721
1722 /* Ethernet packet */
1723 if (unlikely(credits < count)) {
1724 if (!netif_queue_stopped(dev)) {
1725 netif_stop_queue(dev);
1726 set_bit(dev->if_port, &sge->stopped_tx_queues);
1727 sge->stats.cmdQ_full[2]++;
1728 pr_err("%s: Tx ring full while queue awake!\n",
1729 adapter->name);
1730 }
1731 spin_unlock(&q->lock);
1732 return NETDEV_TX_BUSY;
1733 }
1734
1735 if (unlikely(credits - count < q->stop_thres)) {
1736 netif_stop_queue(dev);
1737 set_bit(dev->if_port, &sge->stopped_tx_queues);
1738 sge->stats.cmdQ_full[2]++;
1739 }
1740
1741 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1742 * through the scheduler.
1743 */
1744 if (sge->tx_sched && !qid && skb->dev) {
1745 use_sched:
1746 use_sched_skb = 1;
1747 /* Note that the scheduler might return a different skb than
1748 * the one passed in.
1749 */
1750 skb = sched_skb(sge, skb, credits);
1751 if (!skb) {
1752 spin_unlock(&q->lock);
1753 return NETDEV_TX_OK;
1754 }
1755 pidx = q->pidx;
1756 count = 1 + skb_shinfo(skb)->nr_frags;
1757 count += compute_large_page_tx_descs(skb);
1758 }
1759
1760 q->in_use += count;
1761 genbit = q->genbit;
1762 pidx = q->pidx;
1763 q->pidx += count;
1764 if (q->pidx >= q->size) {
1765 q->pidx -= q->size;
1766 q->genbit ^= 1;
1767 }
1768 spin_unlock(&q->lock);
1769
1770 write_tx_descs(adapter, skb, pidx, genbit, q);
1771
1772 /*
1773 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1774 * the doorbell if the Q is asleep. There is a natural race, where
1775 * the hardware is going to sleep just after we checked, however,
1776 * then the interrupt handler will detect the outstanding TX packet
1777 * and ring the doorbell for us.
1778 */
1779 if (qid)
1780 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1781 else {
1782 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1783 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1784 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1785 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1786 }
1787 }
1788
1789 if (use_sched_skb) {
1790 if (spin_trylock(&q->lock)) {
1791 credits = q->size - q->in_use;
1792 skb = NULL;
1793 goto use_sched;
1794 }
1795 }
1796 return NETDEV_TX_OK;
1797 }
1798
1799 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1800
1801 /*
1802 * eth_hdr_len - return the length of an Ethernet header
1803 * @data: pointer to the start of the Ethernet header
1804 *
1805 * Returns the length of an Ethernet header, including optional VLAN tag.
1806 */
eth_hdr_len(const void * data)1807 static inline int eth_hdr_len(const void *data)
1808 {
1809 const struct ethhdr *e = data;
1810
1811 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1812 }
1813
1814 /*
1815 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1816 */
t1_start_xmit(struct sk_buff * skb,struct net_device * dev)1817 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1818 {
1819 struct adapter *adapter = dev->ml_priv;
1820 struct sge *sge = adapter->sge;
1821 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1822 struct cpl_tx_pkt *cpl;
1823 struct sk_buff *orig_skb = skb;
1824 int ret;
1825
1826 if (skb->protocol == htons(ETH_P_CPL5))
1827 goto send;
1828
1829 /*
1830 * We are using a non-standard hard_header_len.
1831 * Allocate more header room in the rare cases it is not big enough.
1832 */
1833 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1834 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1835 ++st->tx_need_hdrroom;
1836 dev_kfree_skb_any(orig_skb);
1837 if (!skb)
1838 return NETDEV_TX_OK;
1839 }
1840
1841 if (skb_shinfo(skb)->gso_size) {
1842 int eth_type;
1843 struct cpl_tx_pkt_lso *hdr;
1844
1845 ++st->tx_tso;
1846
1847 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1848 CPL_ETH_II : CPL_ETH_II_VLAN;
1849
1850 hdr = skb_push(skb, sizeof(*hdr));
1851 hdr->opcode = CPL_TX_PKT_LSO;
1852 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1853 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1854 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1855 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1856 skb_shinfo(skb)->gso_size));
1857 hdr->len = htonl(skb->len - sizeof(*hdr));
1858 cpl = (struct cpl_tx_pkt *)hdr;
1859 } else {
1860 /*
1861 * Packets shorter than ETH_HLEN can break the MAC, drop them
1862 * early. Also, we may get oversized packets because some
1863 * parts of the kernel don't handle our unusual hard_header_len
1864 * right, drop those too.
1865 */
1866 if (unlikely(skb->len < ETH_HLEN ||
1867 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1868 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1869 skb->len, eth_hdr_len(skb->data), dev->mtu);
1870 dev_kfree_skb_any(skb);
1871 return NETDEV_TX_OK;
1872 }
1873
1874 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1875 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1876 if (unlikely(skb_checksum_help(skb))) {
1877 netdev_dbg(dev, "unable to do udp checksum\n");
1878 dev_kfree_skb_any(skb);
1879 return NETDEV_TX_OK;
1880 }
1881 }
1882
1883 /* Hmmm, assuming to catch the gratious arp... and we'll use
1884 * it to flush out stuck espi packets...
1885 */
1886 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1887 if (skb->protocol == htons(ETH_P_ARP) &&
1888 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1889 adapter->sge->espibug_skb[dev->if_port] = skb;
1890 /* We want to re-use this skb later. We
1891 * simply bump the reference count and it
1892 * will not be freed...
1893 */
1894 skb = skb_get(skb);
1895 }
1896 }
1897
1898 cpl = __skb_push(skb, sizeof(*cpl));
1899 cpl->opcode = CPL_TX_PKT;
1900 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1901 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1902 /* the length field isn't used so don't bother setting it */
1903
1904 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1905 }
1906 cpl->iff = dev->if_port;
1907
1908 if (skb_vlan_tag_present(skb)) {
1909 cpl->vlan_valid = 1;
1910 cpl->vlan = htons(skb_vlan_tag_get(skb));
1911 st->vlan_insert++;
1912 } else
1913 cpl->vlan_valid = 0;
1914
1915 send:
1916 ret = t1_sge_tx(skb, adapter, 0, dev);
1917
1918 /* If transmit busy, and we reallocated skb's due to headroom limit,
1919 * then silently discard to avoid leak.
1920 */
1921 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1922 dev_kfree_skb_any(skb);
1923 ret = NETDEV_TX_OK;
1924 }
1925 return ret;
1926 }
1927
1928 /*
1929 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1930 */
sge_tx_reclaim_cb(struct timer_list * t)1931 static void sge_tx_reclaim_cb(struct timer_list *t)
1932 {
1933 int i;
1934 struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
1935
1936 for (i = 0; i < SGE_CMDQ_N; ++i) {
1937 struct cmdQ *q = &sge->cmdQ[i];
1938
1939 if (!spin_trylock(&q->lock))
1940 continue;
1941
1942 reclaim_completed_tx(sge, q);
1943 if (i == 0 && q->in_use) { /* flush pending credits */
1944 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1945 }
1946 spin_unlock(&q->lock);
1947 }
1948 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1949 }
1950
1951 /*
1952 * Propagate changes of the SGE coalescing parameters to the HW.
1953 */
t1_sge_set_coalesce_params(struct sge * sge,struct sge_params * p)1954 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1955 {
1956 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1957 core_ticks_per_usec(sge->adapter);
1958 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1959 return 0;
1960 }
1961
1962 /*
1963 * Allocates both RX and TX resources and configures the SGE. However,
1964 * the hardware is not enabled yet.
1965 */
t1_sge_configure(struct sge * sge,struct sge_params * p)1966 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1967 {
1968 if (alloc_rx_resources(sge, p))
1969 return -ENOMEM;
1970 if (alloc_tx_resources(sge, p)) {
1971 free_rx_resources(sge);
1972 return -ENOMEM;
1973 }
1974 configure_sge(sge, p);
1975
1976 /*
1977 * Now that we have sized the free lists calculate the payload
1978 * capacity of the large buffers. Other parts of the driver use
1979 * this to set the max offload coalescing size so that RX packets
1980 * do not overflow our large buffers.
1981 */
1982 p->large_buf_capacity = jumbo_payload_capacity(sge);
1983 return 0;
1984 }
1985
1986 /*
1987 * Disables the DMA engine.
1988 */
t1_sge_stop(struct sge * sge)1989 void t1_sge_stop(struct sge *sge)
1990 {
1991 int i;
1992 writel(0, sge->adapter->regs + A_SG_CONTROL);
1993 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1994
1995 if (is_T2(sge->adapter))
1996 del_timer_sync(&sge->espibug_timer);
1997
1998 del_timer_sync(&sge->tx_reclaim_timer);
1999 if (sge->tx_sched)
2000 tx_sched_stop(sge);
2001
2002 for (i = 0; i < MAX_NPORTS; i++)
2003 kfree_skb(sge->espibug_skb[i]);
2004 }
2005
2006 /*
2007 * Enables the DMA engine.
2008 */
t1_sge_start(struct sge * sge)2009 void t1_sge_start(struct sge *sge)
2010 {
2011 refill_free_list(sge, &sge->freelQ[0]);
2012 refill_free_list(sge, &sge->freelQ[1]);
2013
2014 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
2015 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
2016 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
2017
2018 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2019
2020 if (is_T2(sge->adapter))
2021 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2022 }
2023
2024 /*
2025 * Callback for the T2 ESPI 'stuck packet feature' workaorund
2026 */
espibug_workaround_t204(struct timer_list * t)2027 static void espibug_workaround_t204(struct timer_list *t)
2028 {
2029 struct sge *sge = from_timer(sge, t, espibug_timer);
2030 struct adapter *adapter = sge->adapter;
2031 unsigned int nports = adapter->params.nports;
2032 u32 seop[MAX_NPORTS];
2033
2034 if (adapter->open_device_map & PORT_MASK) {
2035 int i;
2036
2037 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2038 return;
2039
2040 for (i = 0; i < nports; i++) {
2041 struct sk_buff *skb = sge->espibug_skb[i];
2042
2043 if (!netif_running(adapter->port[i].dev) ||
2044 netif_queue_stopped(adapter->port[i].dev) ||
2045 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2046 continue;
2047
2048 if (!skb->cb[0]) {
2049 skb_copy_to_linear_data_offset(skb,
2050 sizeof(struct cpl_tx_pkt),
2051 ch_mac_addr,
2052 ETH_ALEN);
2053 skb_copy_to_linear_data_offset(skb,
2054 skb->len - 10,
2055 ch_mac_addr,
2056 ETH_ALEN);
2057 skb->cb[0] = 0xff;
2058 }
2059
2060 /* bump the reference count to avoid freeing of
2061 * the skb once the DMA has completed.
2062 */
2063 skb = skb_get(skb);
2064 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2065 }
2066 }
2067 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2068 }
2069
espibug_workaround(struct timer_list * t)2070 static void espibug_workaround(struct timer_list *t)
2071 {
2072 struct sge *sge = from_timer(sge, t, espibug_timer);
2073 struct adapter *adapter = sge->adapter;
2074
2075 if (netif_running(adapter->port[0].dev)) {
2076 struct sk_buff *skb = sge->espibug_skb[0];
2077 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2078
2079 if ((seop & 0xfff0fff) == 0xfff && skb) {
2080 if (!skb->cb[0]) {
2081 skb_copy_to_linear_data_offset(skb,
2082 sizeof(struct cpl_tx_pkt),
2083 ch_mac_addr,
2084 ETH_ALEN);
2085 skb_copy_to_linear_data_offset(skb,
2086 skb->len - 10,
2087 ch_mac_addr,
2088 ETH_ALEN);
2089 skb->cb[0] = 0xff;
2090 }
2091
2092 /* bump the reference count to avoid freeing of the
2093 * skb once the DMA has completed.
2094 */
2095 skb = skb_get(skb);
2096 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2097 }
2098 }
2099 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2100 }
2101
2102 /*
2103 * Creates a t1_sge structure and returns suggested resource parameters.
2104 */
t1_sge_create(struct adapter * adapter,struct sge_params * p)2105 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2106 {
2107 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2108 int i;
2109
2110 if (!sge)
2111 return NULL;
2112
2113 sge->adapter = adapter;
2114 sge->netdev = adapter->port[0].dev;
2115 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2116 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2117
2118 for_each_port(adapter, i) {
2119 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2120 if (!sge->port_stats[i])
2121 goto nomem_port;
2122 }
2123
2124 timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0);
2125
2126 if (is_T2(sge->adapter)) {
2127 timer_setup(&sge->espibug_timer,
2128 adapter->params.nports > 1 ? espibug_workaround_t204 : espibug_workaround,
2129 0);
2130
2131 if (adapter->params.nports > 1)
2132 tx_sched_init(sge);
2133
2134 sge->espibug_timeout = 1;
2135 /* for T204, every 10ms */
2136 if (adapter->params.nports > 1)
2137 sge->espibug_timeout = HZ/100;
2138 }
2139
2140
2141 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2142 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2143 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2144 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2145 if (sge->tx_sched) {
2146 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2147 p->rx_coalesce_usecs = 15;
2148 else
2149 p->rx_coalesce_usecs = 50;
2150 } else
2151 p->rx_coalesce_usecs = 50;
2152
2153 p->coalesce_enable = 0;
2154 p->sample_interval_usecs = 0;
2155
2156 return sge;
2157 nomem_port:
2158 while (i >= 0) {
2159 free_percpu(sge->port_stats[i]);
2160 --i;
2161 }
2162 kfree(sge);
2163 return NULL;
2164
2165 }
2166