1 /*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
41 #include <net/arp.h>
42 #include "common.h"
43 #include "regs.h"
44 #include "sge_defs.h"
45 #include "t3_cpl.h"
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
48
49 #define USE_GTS 0
50
51 #define SGE_RX_SM_BUF_SIZE 1536
52
53 #define SGE_RX_COPY_THRES 256
54 #define SGE_RX_PULL_LEN 128
55
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
57 /*
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
60 * directly.
61 */
62 #define FL0_PG_CHUNK_SIZE 2048
63 #define FL0_PG_ORDER 0
64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68
69 #define SGE_RX_DROP_THRES 16
70 #define RX_RECLAIM_PERIOD (HZ/4)
71
72 /*
73 * Max number of Rx buffers we replenish at a time.
74 */
75 #define MAX_RX_REFILL 16U
76 /*
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
79 */
80 #define TX_RECLAIM_PERIOD (HZ / 4)
81 #define TX_RECLAIM_TIMER_CHUNK 64U
82 #define TX_RECLAIM_CHUNK 16U
83
84 /* WR size in bytes */
85 #define WR_LEN (WR_FLITS * 8)
86
87 /*
88 * Types of Tx queues in each queue set. Order here matters, do not change.
89 */
90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91
92 /* Values for sge_txq.flags */
93 enum {
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
96 };
97
98 struct tx_desc {
99 __be64 flit[TX_DESC_FLITS];
100 };
101
102 struct rx_desc {
103 __be32 addr_lo;
104 __be32 len_gen;
105 __be32 gen2;
106 __be32 addr_hi;
107 };
108
109 struct tx_sw_desc { /* SW state per Tx descriptor */
110 struct sk_buff *skb;
111 u8 eop; /* set if last descriptor for packet */
112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
113 u8 fragidx; /* first page fragment associated with descriptor */
114 s8 sflit; /* start flit of first SGL entry in descriptor */
115 };
116
117 struct rx_sw_desc { /* SW state per Rx descriptor */
118 union {
119 struct sk_buff *skb;
120 struct fl_pg_chunk pg_chunk;
121 };
122 DEFINE_DMA_UNMAP_ADDR(dma_addr);
123 };
124
125 struct rsp_desc { /* response queue descriptor */
126 struct rss_header rss_hdr;
127 __be32 flags;
128 __be32 len_cq;
129 u8 imm_data[47];
130 u8 intr_gen;
131 };
132
133 /*
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
136 */
137 struct deferred_unmap_info {
138 struct pci_dev *pdev;
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
140 };
141
142 /*
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
144 * The formula is
145 *
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147 *
148 * HW allows up to 4 descriptors to be combined into a WR.
149 */
150 static u8 flit_desc_map[] = {
151 0,
152 #if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157 #elif SGE_NUM_GENBITS == 2
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162 #else
163 # error "SGE_NUM_GENBITS must be 1 or 2"
164 #endif
165 };
166
fl_to_qset(const struct sge_fl * q,int qidx)167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168 {
169 return container_of(q, struct sge_qset, fl[qidx]);
170 }
171
rspq_to_qset(const struct sge_rspq * q)172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173 {
174 return container_of(q, struct sge_qset, rspq);
175 }
176
txq_to_qset(const struct sge_txq * q,int qidx)177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178 {
179 return container_of(q, struct sge_qset, txq[qidx]);
180 }
181
182 /**
183 * refill_rspq - replenish an SGE response queue
184 * @adapter: the adapter
185 * @q: the response queue to replenish
186 * @credits: how many new responses to make available
187 *
188 * Replenishes a response queue by making the supplied number of responses
189 * available to HW.
190 */
refill_rspq(struct adapter * adapter,const struct sge_rspq * q,unsigned int credits)191 static inline void refill_rspq(struct adapter *adapter,
192 const struct sge_rspq *q, unsigned int credits)
193 {
194 rmb();
195 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197 }
198
199 /**
200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
201 *
202 * Returns true if the platform needs sk_buff unmapping. The compiler
203 * optimizes away unnecessary code if this returns true.
204 */
need_skb_unmap(void)205 static inline int need_skb_unmap(void)
206 {
207 #ifdef CONFIG_NEED_DMA_MAP_STATE
208 return 1;
209 #else
210 return 0;
211 #endif
212 }
213
214 /**
215 * unmap_skb - unmap a packet main body and its page fragments
216 * @skb: the packet
217 * @q: the Tx queue containing Tx descriptors for the packet
218 * @cidx: index of Tx descriptor
219 * @pdev: the PCI device
220 *
221 * Unmap the main body of an sk_buff and its page fragments, if any.
222 * Because of the fairly complicated structure of our SGLs and the desire
223 * to conserve space for metadata, the information necessary to unmap an
224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225 * descriptors (the physical addresses of the various data buffers), and
226 * the SW descriptor state (assorted indices). The send functions
227 * initialize the indices for the first packet descriptor so we can unmap
228 * the buffers held in the first Tx descriptor here, and we have enough
229 * information at this point to set the state for the next Tx descriptor.
230 *
231 * Note that it is possible to clean up the first descriptor of a packet
232 * before the send routines have written the next descriptors, but this
233 * race does not cause any problem. We just end up writing the unmapping
234 * info for the descriptor first.
235 */
unmap_skb(struct sk_buff * skb,struct sge_txq * q,unsigned int cidx,struct pci_dev * pdev)236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237 unsigned int cidx, struct pci_dev *pdev)
238 {
239 const struct sg_ent *sgp;
240 struct tx_sw_desc *d = &q->sdesc[cidx];
241 int nfrags, frag_idx, curflit, j = d->addr_idx;
242
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244 frag_idx = d->fragidx;
245
246 if (frag_idx == 0 && skb_headlen(skb)) {
247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248 skb_headlen(skb), PCI_DMA_TODEVICE);
249 j = 1;
250 }
251
252 curflit = d->sflit + 1 + j;
253 nfrags = skb_shinfo(skb)->nr_frags;
254
255 while (frag_idx < nfrags && curflit < WR_FLITS) {
256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
257 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258 PCI_DMA_TODEVICE);
259 j ^= 1;
260 if (j == 0) {
261 sgp++;
262 curflit++;
263 }
264 curflit++;
265 frag_idx++;
266 }
267
268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
269 d = cidx + 1 == q->size ? q->sdesc : d + 1;
270 d->fragidx = frag_idx;
271 d->addr_idx = j;
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273 }
274 }
275
276 /**
277 * free_tx_desc - reclaims Tx descriptors and their buffers
278 * @adapter: the adapter
279 * @q: the Tx queue to reclaim descriptors from
280 * @n: the number of descriptors to reclaim
281 *
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
284 */
free_tx_desc(struct adapter * adapter,struct sge_txq * q,unsigned int n)285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286 unsigned int n)
287 {
288 struct tx_sw_desc *d;
289 struct pci_dev *pdev = adapter->pdev;
290 unsigned int cidx = q->cidx;
291
292 const int need_unmap = need_skb_unmap() &&
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294
295 d = &q->sdesc[cidx];
296 while (n--) {
297 if (d->skb) { /* an SGL is present */
298 if (need_unmap)
299 unmap_skb(d->skb, q, cidx, pdev);
300 if (d->eop) {
301 kfree_skb(d->skb);
302 d->skb = NULL;
303 }
304 }
305 ++d;
306 if (++cidx == q->size) {
307 cidx = 0;
308 d = q->sdesc;
309 }
310 }
311 q->cidx = cidx;
312 }
313
314 /**
315 * reclaim_completed_tx - reclaims completed Tx descriptors
316 * @adapter: the adapter
317 * @q: the Tx queue to reclaim completed descriptors from
318 * @chunk: maximum number of descriptors to reclaim
319 *
320 * Reclaims Tx descriptors that the SGE has indicated it has processed,
321 * and frees the associated buffers if possible. Called with the Tx
322 * queue's lock held.
323 */
reclaim_completed_tx(struct adapter * adapter,struct sge_txq * q,unsigned int chunk)324 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325 struct sge_txq *q,
326 unsigned int chunk)
327 {
328 unsigned int reclaim = q->processed - q->cleaned;
329
330 reclaim = min(chunk, reclaim);
331 if (reclaim) {
332 free_tx_desc(adapter, q, reclaim);
333 q->cleaned += reclaim;
334 q->in_use -= reclaim;
335 }
336 return q->processed - q->cleaned;
337 }
338
339 /**
340 * should_restart_tx - are there enough resources to restart a Tx queue?
341 * @q: the Tx queue
342 *
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
344 */
should_restart_tx(const struct sge_txq * q)345 static inline int should_restart_tx(const struct sge_txq *q)
346 {
347 unsigned int r = q->processed - q->cleaned;
348
349 return q->in_use - r < (q->size >> 1);
350 }
351
clear_rx_desc(struct pci_dev * pdev,const struct sge_fl * q,struct rx_sw_desc * d)352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353 struct rx_sw_desc *d)
354 {
355 if (q->use_pages && d->pg_chunk.page) {
356 (*d->pg_chunk.p_cnt)--;
357 if (!*d->pg_chunk.p_cnt)
358 pci_unmap_page(pdev,
359 d->pg_chunk.mapping,
360 q->alloc_size, PCI_DMA_FROMDEVICE);
361
362 put_page(d->pg_chunk.page);
363 d->pg_chunk.page = NULL;
364 } else {
365 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366 q->buf_size, PCI_DMA_FROMDEVICE);
367 kfree_skb(d->skb);
368 d->skb = NULL;
369 }
370 }
371
372 /**
373 * free_rx_bufs - free the Rx buffers on an SGE free list
374 * @pdev: the PCI device associated with the adapter
375 * @rxq: the SGE free list to clean up
376 *
377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
378 * this queue should be stopped before calling this function.
379 */
free_rx_bufs(struct pci_dev * pdev,struct sge_fl * q)380 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381 {
382 unsigned int cidx = q->cidx;
383
384 while (q->credits--) {
385 struct rx_sw_desc *d = &q->sdesc[cidx];
386
387
388 clear_rx_desc(pdev, q, d);
389 if (++cidx == q->size)
390 cidx = 0;
391 }
392
393 if (q->pg_chunk.page) {
394 __free_pages(q->pg_chunk.page, q->order);
395 q->pg_chunk.page = NULL;
396 }
397 }
398
399 /**
400 * add_one_rx_buf - add a packet buffer to a free-buffer list
401 * @va: buffer start VA
402 * @len: the buffer length
403 * @d: the HW Rx descriptor to write
404 * @sd: the SW Rx descriptor to write
405 * @gen: the generation bit value
406 * @pdev: the PCI device associated with the adapter
407 *
408 * Add a buffer of the given length to the supplied HW and SW Rx
409 * descriptors.
410 */
add_one_rx_buf(void * va,unsigned int len,struct rx_desc * d,struct rx_sw_desc * sd,unsigned int gen,struct pci_dev * pdev)411 static inline int add_one_rx_buf(void *va, unsigned int len,
412 struct rx_desc *d, struct rx_sw_desc *sd,
413 unsigned int gen, struct pci_dev *pdev)
414 {
415 dma_addr_t mapping;
416
417 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419 return -ENOMEM;
420
421 dma_unmap_addr_set(sd, dma_addr, mapping);
422
423 d->addr_lo = cpu_to_be32(mapping);
424 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425 wmb();
426 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428 return 0;
429 }
430
add_one_rx_chunk(dma_addr_t mapping,struct rx_desc * d,unsigned int gen)431 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432 unsigned int gen)
433 {
434 d->addr_lo = cpu_to_be32(mapping);
435 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436 wmb();
437 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439 return 0;
440 }
441
alloc_pg_chunk(struct adapter * adapter,struct sge_fl * q,struct rx_sw_desc * sd,gfp_t gfp,unsigned int order)442 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443 struct rx_sw_desc *sd, gfp_t gfp,
444 unsigned int order)
445 {
446 if (!q->pg_chunk.page) {
447 dma_addr_t mapping;
448
449 q->pg_chunk.page = alloc_pages(gfp, order);
450 if (unlikely(!q->pg_chunk.page))
451 return -ENOMEM;
452 q->pg_chunk.va = page_address(q->pg_chunk.page);
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454 SGE_PG_RSVD;
455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 q->pg_chunk.mapping = mapping;
459 }
460 sd->pg_chunk = q->pg_chunk;
461
462 prefetch(sd->pg_chunk.p_cnt);
463
464 q->pg_chunk.offset += q->buf_size;
465 if (q->pg_chunk.offset == (PAGE_SIZE << order))
466 q->pg_chunk.page = NULL;
467 else {
468 q->pg_chunk.va += q->buf_size;
469 get_page(q->pg_chunk.page);
470 }
471
472 if (sd->pg_chunk.offset == 0)
473 *sd->pg_chunk.p_cnt = 1;
474 else
475 *sd->pg_chunk.p_cnt += 1;
476
477 return 0;
478 }
479
ring_fl_db(struct adapter * adap,struct sge_fl * q)480 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
481 {
482 if (q->pend_cred >= q->credits / 4) {
483 q->pend_cred = 0;
484 wmb();
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
486 }
487 }
488
489 /**
490 * refill_fl - refill an SGE free-buffer list
491 * @adapter: the adapter
492 * @q: the free-list to refill
493 * @n: the number of new buffers to allocate
494 * @gfp: the gfp flags for allocating new buffers
495 *
496 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
497 * allocated with the supplied gfp flags. The caller must assure that
498 * @n does not exceed the queue's capacity.
499 */
refill_fl(struct adapter * adap,struct sge_fl * q,int n,gfp_t gfp)500 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
501 {
502 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
503 struct rx_desc *d = &q->desc[q->pidx];
504 unsigned int count = 0;
505
506 while (n--) {
507 dma_addr_t mapping;
508 int err;
509
510 if (q->use_pages) {
511 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
512 q->order))) {
513 nomem: q->alloc_failed++;
514 break;
515 }
516 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
517 dma_unmap_addr_set(sd, dma_addr, mapping);
518
519 add_one_rx_chunk(mapping, d, q->gen);
520 pci_dma_sync_single_for_device(adap->pdev, mapping,
521 q->buf_size - SGE_PG_RSVD,
522 PCI_DMA_FROMDEVICE);
523 } else {
524 void *buf_start;
525
526 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
527 if (!skb)
528 goto nomem;
529
530 sd->skb = skb;
531 buf_start = skb->data;
532 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
533 q->gen, adap->pdev);
534 if (unlikely(err)) {
535 clear_rx_desc(adap->pdev, q, sd);
536 break;
537 }
538 }
539
540 d++;
541 sd++;
542 if (++q->pidx == q->size) {
543 q->pidx = 0;
544 q->gen ^= 1;
545 sd = q->sdesc;
546 d = q->desc;
547 }
548 count++;
549 }
550
551 q->credits += count;
552 q->pend_cred += count;
553 ring_fl_db(adap, q);
554
555 return count;
556 }
557
__refill_fl(struct adapter * adap,struct sge_fl * fl)558 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
559 {
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
561 GFP_ATOMIC | __GFP_COMP);
562 }
563
564 /**
565 * recycle_rx_buf - recycle a receive buffer
566 * @adapter: the adapter
567 * @q: the SGE free list
568 * @idx: index of buffer to recycle
569 *
570 * Recycles the specified buffer on the given free list by adding it at
571 * the next available slot on the list.
572 */
recycle_rx_buf(struct adapter * adap,struct sge_fl * q,unsigned int idx)573 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
574 unsigned int idx)
575 {
576 struct rx_desc *from = &q->desc[idx];
577 struct rx_desc *to = &q->desc[q->pidx];
578
579 q->sdesc[q->pidx] = q->sdesc[idx];
580 to->addr_lo = from->addr_lo; /* already big endian */
581 to->addr_hi = from->addr_hi; /* likewise */
582 wmb();
583 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
584 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
585
586 if (++q->pidx == q->size) {
587 q->pidx = 0;
588 q->gen ^= 1;
589 }
590
591 q->credits++;
592 q->pend_cred++;
593 ring_fl_db(adap, q);
594 }
595
596 /**
597 * alloc_ring - allocate resources for an SGE descriptor ring
598 * @pdev: the PCI device
599 * @nelem: the number of descriptors
600 * @elem_size: the size of each descriptor
601 * @sw_size: the size of the SW state associated with each ring element
602 * @phys: the physical address of the allocated ring
603 * @metadata: address of the array holding the SW state for the ring
604 *
605 * Allocates resources for an SGE descriptor ring, such as Tx queues,
606 * free buffer lists, or response queues. Each SGE ring requires
607 * space for its HW descriptors plus, optionally, space for the SW state
608 * associated with each HW entry (the metadata). The function returns
609 * three values: the virtual address for the HW ring (the return value
610 * of the function), the physical address of the HW ring, and the address
611 * of the SW ring.
612 */
alloc_ring(struct pci_dev * pdev,size_t nelem,size_t elem_size,size_t sw_size,dma_addr_t * phys,void * metadata)613 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
614 size_t sw_size, dma_addr_t * phys, void *metadata)
615 {
616 size_t len = nelem * elem_size;
617 void *s = NULL;
618 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
619
620 if (!p)
621 return NULL;
622 if (sw_size && metadata) {
623 s = kcalloc(nelem, sw_size, GFP_KERNEL);
624
625 if (!s) {
626 dma_free_coherent(&pdev->dev, len, p, *phys);
627 return NULL;
628 }
629 *(void **)metadata = s;
630 }
631 memset(p, 0, len);
632 return p;
633 }
634
635 /**
636 * t3_reset_qset - reset a sge qset
637 * @q: the queue set
638 *
639 * Reset the qset structure.
640 * the NAPI structure is preserved in the event of
641 * the qset's reincarnation, for example during EEH recovery.
642 */
t3_reset_qset(struct sge_qset * q)643 static void t3_reset_qset(struct sge_qset *q)
644 {
645 if (q->adap &&
646 !(q->adap->flags & NAPI_INIT)) {
647 memset(q, 0, sizeof(*q));
648 return;
649 }
650
651 q->adap = NULL;
652 memset(&q->rspq, 0, sizeof(q->rspq));
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
655 q->txq_stopped = 0;
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
657 q->rx_reclaim_timer.function = NULL;
658 q->nomem = 0;
659 napi_free_frags(&q->napi);
660 }
661
662
663 /**
664 * free_qset - free the resources of an SGE queue set
665 * @adapter: the adapter owning the queue set
666 * @q: the queue set
667 *
668 * Release the HW and SW resources associated with an SGE queue set, such
669 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
670 * queue set must be quiesced prior to calling this.
671 */
t3_free_qset(struct adapter * adapter,struct sge_qset * q)672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
673 {
674 int i;
675 struct pci_dev *pdev = adapter->pdev;
676
677 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
678 if (q->fl[i].desc) {
679 spin_lock_irq(&adapter->sge.reg_lock);
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
681 spin_unlock_irq(&adapter->sge.reg_lock);
682 free_rx_bufs(pdev, &q->fl[i]);
683 kfree(q->fl[i].sdesc);
684 dma_free_coherent(&pdev->dev,
685 q->fl[i].size *
686 sizeof(struct rx_desc), q->fl[i].desc,
687 q->fl[i].phys_addr);
688 }
689
690 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
691 if (q->txq[i].desc) {
692 spin_lock_irq(&adapter->sge.reg_lock);
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
694 spin_unlock_irq(&adapter->sge.reg_lock);
695 if (q->txq[i].sdesc) {
696 free_tx_desc(adapter, &q->txq[i],
697 q->txq[i].in_use);
698 kfree(q->txq[i].sdesc);
699 }
700 dma_free_coherent(&pdev->dev,
701 q->txq[i].size *
702 sizeof(struct tx_desc),
703 q->txq[i].desc, q->txq[i].phys_addr);
704 __skb_queue_purge(&q->txq[i].sendq);
705 }
706
707 if (q->rspq.desc) {
708 spin_lock_irq(&adapter->sge.reg_lock);
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
710 spin_unlock_irq(&adapter->sge.reg_lock);
711 dma_free_coherent(&pdev->dev,
712 q->rspq.size * sizeof(struct rsp_desc),
713 q->rspq.desc, q->rspq.phys_addr);
714 }
715
716 t3_reset_qset(q);
717 }
718
719 /**
720 * init_qset_cntxt - initialize an SGE queue set context info
721 * @qs: the queue set
722 * @id: the queue set id
723 *
724 * Initializes the TIDs and context ids for the queues of a queue set.
725 */
init_qset_cntxt(struct sge_qset * qs,unsigned int id)726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
727 {
728 qs->rspq.cntxt_id = id;
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
736 }
737
738 /**
739 * sgl_len - calculates the size of an SGL of the given capacity
740 * @n: the number of SGL entries
741 *
742 * Calculates the number of flits needed for a scatter/gather list that
743 * can hold the given number of entries.
744 */
sgl_len(unsigned int n)745 static inline unsigned int sgl_len(unsigned int n)
746 {
747 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
748 return (3 * n) / 2 + (n & 1);
749 }
750
751 /**
752 * flits_to_desc - returns the num of Tx descriptors for the given flits
753 * @n: the number of flits
754 *
755 * Calculates the number of Tx descriptors needed for the supplied number
756 * of flits.
757 */
flits_to_desc(unsigned int n)758 static inline unsigned int flits_to_desc(unsigned int n)
759 {
760 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
761 return flit_desc_map[n];
762 }
763
764 /**
765 * get_packet - return the next ingress packet buffer from a free list
766 * @adap: the adapter that received the packet
767 * @fl: the SGE free list holding the packet
768 * @len: the packet length including any SGE padding
769 * @drop_thres: # of remaining buffers before we start dropping packets
770 *
771 * Get the next packet from a free list and complete setup of the
772 * sk_buff. If the packet is small we make a copy and recycle the
773 * original buffer, otherwise we use the original buffer itself. If a
774 * positive drop threshold is supplied packets are dropped and their
775 * buffers recycled if (a) the number of remaining buffers is under the
776 * threshold and the packet is too big to copy, or (b) the packet should
777 * be copied but there is no memory for the copy.
778 */
get_packet(struct adapter * adap,struct sge_fl * fl,unsigned int len,unsigned int drop_thres)779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
780 unsigned int len, unsigned int drop_thres)
781 {
782 struct sk_buff *skb = NULL;
783 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
784
785 prefetch(sd->skb->data);
786 fl->credits--;
787
788 if (len <= SGE_RX_COPY_THRES) {
789 skb = alloc_skb(len, GFP_ATOMIC);
790 if (likely(skb != NULL)) {
791 __skb_put(skb, len);
792 pci_dma_sync_single_for_cpu(adap->pdev,
793 dma_unmap_addr(sd, dma_addr), len,
794 PCI_DMA_FROMDEVICE);
795 memcpy(skb->data, sd->skb->data, len);
796 pci_dma_sync_single_for_device(adap->pdev,
797 dma_unmap_addr(sd, dma_addr), len,
798 PCI_DMA_FROMDEVICE);
799 } else if (!drop_thres)
800 goto use_orig_buf;
801 recycle:
802 recycle_rx_buf(adap, fl, fl->cidx);
803 return skb;
804 }
805
806 if (unlikely(fl->credits < drop_thres) &&
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
808 GFP_ATOMIC | __GFP_COMP) == 0)
809 goto recycle;
810
811 use_orig_buf:
812 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
813 fl->buf_size, PCI_DMA_FROMDEVICE);
814 skb = sd->skb;
815 skb_put(skb, len);
816 __refill_fl(adap, fl);
817 return skb;
818 }
819
820 /**
821 * get_packet_pg - return the next ingress packet buffer from a free list
822 * @adap: the adapter that received the packet
823 * @fl: the SGE free list holding the packet
824 * @len: the packet length including any SGE padding
825 * @drop_thres: # of remaining buffers before we start dropping packets
826 *
827 * Get the next packet from a free list populated with page chunks.
828 * If the packet is small we make a copy and recycle the original buffer,
829 * otherwise we attach the original buffer as a page fragment to a fresh
830 * sk_buff. If a positive drop threshold is supplied packets are dropped
831 * and their buffers recycled if (a) the number of remaining buffers is
832 * under the threshold and the packet is too big to copy, or (b) there's
833 * no system memory.
834 *
835 * Note: this function is similar to @get_packet but deals with Rx buffers
836 * that are page chunks rather than sk_buffs.
837 */
get_packet_pg(struct adapter * adap,struct sge_fl * fl,struct sge_rspq * q,unsigned int len,unsigned int drop_thres)838 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
839 struct sge_rspq *q, unsigned int len,
840 unsigned int drop_thres)
841 {
842 struct sk_buff *newskb, *skb;
843 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
844
845 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
846
847 newskb = skb = q->pg_skb;
848 if (!skb && (len <= SGE_RX_COPY_THRES)) {
849 newskb = alloc_skb(len, GFP_ATOMIC);
850 if (likely(newskb != NULL)) {
851 __skb_put(newskb, len);
852 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
853 PCI_DMA_FROMDEVICE);
854 memcpy(newskb->data, sd->pg_chunk.va, len);
855 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
856 len,
857 PCI_DMA_FROMDEVICE);
858 } else if (!drop_thres)
859 return NULL;
860 recycle:
861 fl->credits--;
862 recycle_rx_buf(adap, fl, fl->cidx);
863 q->rx_recycle_buf++;
864 return newskb;
865 }
866
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
868 goto recycle;
869
870 prefetch(sd->pg_chunk.p_cnt);
871
872 if (!skb)
873 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
874
875 if (unlikely(!newskb)) {
876 if (!drop_thres)
877 return NULL;
878 goto recycle;
879 }
880
881 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
882 PCI_DMA_FROMDEVICE);
883 (*sd->pg_chunk.p_cnt)--;
884 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
885 pci_unmap_page(adap->pdev,
886 sd->pg_chunk.mapping,
887 fl->alloc_size,
888 PCI_DMA_FROMDEVICE);
889 if (!skb) {
890 __skb_put(newskb, SGE_RX_PULL_LEN);
891 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
892 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
893 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
894 len - SGE_RX_PULL_LEN);
895 newskb->len = len;
896 newskb->data_len = len - SGE_RX_PULL_LEN;
897 newskb->truesize += newskb->data_len;
898 } else {
899 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
900 sd->pg_chunk.page,
901 sd->pg_chunk.offset, len);
902 newskb->len += len;
903 newskb->data_len += len;
904 newskb->truesize += len;
905 }
906
907 fl->credits--;
908 /*
909 * We do not refill FLs here, we let the caller do it to overlap a
910 * prefetch.
911 */
912 return newskb;
913 }
914
915 /**
916 * get_imm_packet - return the next ingress packet buffer from a response
917 * @resp: the response descriptor containing the packet data
918 *
919 * Return a packet containing the immediate data of the given response.
920 */
get_imm_packet(const struct rsp_desc * resp)921 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
922 {
923 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
924
925 if (skb) {
926 __skb_put(skb, IMMED_PKT_SIZE);
927 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
928 }
929 return skb;
930 }
931
932 /**
933 * calc_tx_descs - calculate the number of Tx descriptors for a packet
934 * @skb: the packet
935 *
936 * Returns the number of Tx descriptors needed for the given Ethernet
937 * packet. Ethernet packets require addition of WR and CPL headers.
938 */
calc_tx_descs(const struct sk_buff * skb)939 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
940 {
941 unsigned int flits;
942
943 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
944 return 1;
945
946 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
947 if (skb_shinfo(skb)->gso_size)
948 flits++;
949 return flits_to_desc(flits);
950 }
951
952 /**
953 * make_sgl - populate a scatter/gather list for a packet
954 * @skb: the packet
955 * @sgp: the SGL to populate
956 * @start: start address of skb main body data to include in the SGL
957 * @len: length of skb main body data to include in the SGL
958 * @pdev: the PCI device
959 *
960 * Generates a scatter/gather list for the buffers that make up a packet
961 * and returns the SGL size in 8-byte words. The caller must size the SGL
962 * appropriately.
963 */
make_sgl(const struct sk_buff * skb,struct sg_ent * sgp,unsigned char * start,unsigned int len,struct pci_dev * pdev)964 static inline unsigned int make_sgl(const struct sk_buff *skb,
965 struct sg_ent *sgp, unsigned char *start,
966 unsigned int len, struct pci_dev *pdev)
967 {
968 dma_addr_t mapping;
969 unsigned int i, j = 0, nfrags;
970
971 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
973 sgp->len[0] = cpu_to_be32(len);
974 sgp->addr[0] = cpu_to_be64(mapping);
975 j = 1;
976 }
977
978 nfrags = skb_shinfo(skb)->nr_frags;
979 for (i = 0; i < nfrags; i++) {
980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
981
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
985 sgp->addr[j] = cpu_to_be64(mapping);
986 j ^= 1;
987 if (j == 0)
988 ++sgp;
989 }
990 if (j)
991 sgp->len[j] = 0;
992 return ((nfrags + (len != 0)) * 3) / 2 + j;
993 }
994
995 /**
996 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
997 * @adap: the adapter
998 * @q: the Tx queue
999 *
1000 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1001 * where the HW is going to sleep just after we checked, however,
1002 * then the interrupt handler will detect the outstanding TX packet
1003 * and ring the doorbell for us.
1004 *
1005 * When GTS is disabled we unconditionally ring the doorbell.
1006 */
check_ring_tx_db(struct adapter * adap,struct sge_txq * q)1007 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1008 {
1009 #if USE_GTS
1010 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1011 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1012 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1013 t3_write_reg(adap, A_SG_KDOORBELL,
1014 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1015 }
1016 #else
1017 wmb(); /* write descriptors before telling HW */
1018 t3_write_reg(adap, A_SG_KDOORBELL,
1019 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1020 #endif
1021 }
1022
wr_gen2(struct tx_desc * d,unsigned int gen)1023 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1024 {
1025 #if SGE_NUM_GENBITS == 2
1026 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1027 #endif
1028 }
1029
1030 /**
1031 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1032 * @ndesc: number of Tx descriptors spanned by the SGL
1033 * @skb: the packet corresponding to the WR
1034 * @d: first Tx descriptor to be written
1035 * @pidx: index of above descriptors
1036 * @q: the SGE Tx queue
1037 * @sgl: the SGL
1038 * @flits: number of flits to the start of the SGL in the first descriptor
1039 * @sgl_flits: the SGL size in flits
1040 * @gen: the Tx descriptor generation
1041 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1042 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1043 *
1044 * Write a work request header and an associated SGL. If the SGL is
1045 * small enough to fit into one Tx descriptor it has already been written
1046 * and we just need to write the WR header. Otherwise we distribute the
1047 * SGL across the number of descriptors it spans.
1048 */
write_wr_hdr_sgl(unsigned int ndesc,struct sk_buff * skb,struct tx_desc * d,unsigned int pidx,const struct sge_txq * q,const struct sg_ent * sgl,unsigned int flits,unsigned int sgl_flits,unsigned int gen,__be32 wr_hi,__be32 wr_lo)1049 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1050 struct tx_desc *d, unsigned int pidx,
1051 const struct sge_txq *q,
1052 const struct sg_ent *sgl,
1053 unsigned int flits, unsigned int sgl_flits,
1054 unsigned int gen, __be32 wr_hi,
1055 __be32 wr_lo)
1056 {
1057 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1058 struct tx_sw_desc *sd = &q->sdesc[pidx];
1059
1060 sd->skb = skb;
1061 if (need_skb_unmap()) {
1062 sd->fragidx = 0;
1063 sd->addr_idx = 0;
1064 sd->sflit = flits;
1065 }
1066
1067 if (likely(ndesc == 1)) {
1068 sd->eop = 1;
1069 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1070 V_WR_SGLSFLT(flits)) | wr_hi;
1071 wmb();
1072 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1073 V_WR_GEN(gen)) | wr_lo;
1074 wr_gen2(d, gen);
1075 } else {
1076 unsigned int ogen = gen;
1077 const u64 *fp = (const u64 *)sgl;
1078 struct work_request_hdr *wp = wrp;
1079
1080 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1081 V_WR_SGLSFLT(flits)) | wr_hi;
1082
1083 while (sgl_flits) {
1084 unsigned int avail = WR_FLITS - flits;
1085
1086 if (avail > sgl_flits)
1087 avail = sgl_flits;
1088 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1089 sgl_flits -= avail;
1090 ndesc--;
1091 if (!sgl_flits)
1092 break;
1093
1094 fp += avail;
1095 d++;
1096 sd->eop = 0;
1097 sd++;
1098 if (++pidx == q->size) {
1099 pidx = 0;
1100 gen ^= 1;
1101 d = q->desc;
1102 sd = q->sdesc;
1103 }
1104
1105 sd->skb = skb;
1106 wrp = (struct work_request_hdr *)d;
1107 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1108 V_WR_SGLSFLT(1)) | wr_hi;
1109 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1110 sgl_flits + 1)) |
1111 V_WR_GEN(gen)) | wr_lo;
1112 wr_gen2(d, gen);
1113 flits = 1;
1114 }
1115 sd->eop = 1;
1116 wrp->wr_hi |= htonl(F_WR_EOP);
1117 wmb();
1118 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1119 wr_gen2((struct tx_desc *)wp, ogen);
1120 WARN_ON(ndesc != 0);
1121 }
1122 }
1123
1124 /**
1125 * write_tx_pkt_wr - write a TX_PKT work request
1126 * @adap: the adapter
1127 * @skb: the packet to send
1128 * @pi: the egress interface
1129 * @pidx: index of the first Tx descriptor to write
1130 * @gen: the generation value to use
1131 * @q: the Tx queue
1132 * @ndesc: number of descriptors the packet will occupy
1133 * @compl: the value of the COMPL bit to use
1134 *
1135 * Generate a TX_PKT work request to send the supplied packet.
1136 */
write_tx_pkt_wr(struct adapter * adap,struct sk_buff * skb,const struct port_info * pi,unsigned int pidx,unsigned int gen,struct sge_txq * q,unsigned int ndesc,unsigned int compl)1137 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138 const struct port_info *pi,
1139 unsigned int pidx, unsigned int gen,
1140 struct sge_txq *q, unsigned int ndesc,
1141 unsigned int compl)
1142 {
1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1145 struct tx_desc *d = &q->desc[pidx];
1146 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1147
1148 cpl->len = htonl(skb->len);
1149 cntrl = V_TXPKT_INTF(pi->port_id);
1150
1151 if (vlan_tx_tag_present(skb))
1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1153
1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1155 if (tso_info) {
1156 int eth_type;
1157 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1158
1159 d->flit[2] = 0;
1160 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1161 hdr->cntrl = htonl(cntrl);
1162 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1163 CPL_ETH_II : CPL_ETH_II_VLAN;
1164 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1165 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1166 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1167 hdr->lso_info = htonl(tso_info);
1168 flits = 3;
1169 } else {
1170 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1171 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1172 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1173 cpl->cntrl = htonl(cntrl);
1174
1175 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1176 q->sdesc[pidx].skb = NULL;
1177 if (!skb->data_len)
1178 skb_copy_from_linear_data(skb, &d->flit[2],
1179 skb->len);
1180 else
1181 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1182
1183 flits = (skb->len + 7) / 8 + 2;
1184 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1185 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1186 | F_WR_SOP | F_WR_EOP | compl);
1187 wmb();
1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1189 V_WR_TID(q->token));
1190 wr_gen2(d, gen);
1191 kfree_skb(skb);
1192 return;
1193 }
1194
1195 flits = 2;
1196 }
1197
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1200
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1203 htonl(V_WR_TID(q->token)));
1204 }
1205
t3_stop_tx_queue(struct netdev_queue * txq,struct sge_qset * qs,struct sge_txq * q)1206 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1207 struct sge_qset *qs, struct sge_txq *q)
1208 {
1209 netif_tx_stop_queue(txq);
1210 set_bit(TXQ_ETH, &qs->txq_stopped);
1211 q->stops++;
1212 }
1213
1214 /**
1215 * eth_xmit - add a packet to the Ethernet Tx queue
1216 * @skb: the packet
1217 * @dev: the egress net device
1218 *
1219 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1220 */
t3_eth_xmit(struct sk_buff * skb,struct net_device * dev)1221 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1222 {
1223 int qidx;
1224 unsigned int ndesc, pidx, credits, gen, compl;
1225 const struct port_info *pi = netdev_priv(dev);
1226 struct adapter *adap = pi->adapter;
1227 struct netdev_queue *txq;
1228 struct sge_qset *qs;
1229 struct sge_txq *q;
1230
1231 /*
1232 * The chip min packet length is 9 octets but play safe and reject
1233 * anything shorter than an Ethernet header.
1234 */
1235 if (unlikely(skb->len < ETH_HLEN)) {
1236 dev_kfree_skb(skb);
1237 return NETDEV_TX_OK;
1238 }
1239
1240 qidx = skb_get_queue_mapping(skb);
1241 qs = &pi->qs[qidx];
1242 q = &qs->txq[TXQ_ETH];
1243 txq = netdev_get_tx_queue(dev, qidx);
1244
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1246
1247 credits = q->size - q->in_use;
1248 ndesc = calc_tx_descs(skb);
1249
1250 if (unlikely(credits < ndesc)) {
1251 t3_stop_tx_queue(txq, qs, q);
1252 dev_err(&adap->pdev->dev,
1253 "%s: Tx ring %u full while queue awake!\n",
1254 dev->name, q->cntxt_id & 7);
1255 return NETDEV_TX_BUSY;
1256 }
1257
1258 q->in_use += ndesc;
1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1260 t3_stop_tx_queue(txq, qs, q);
1261
1262 if (should_restart_tx(q) &&
1263 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1264 q->restarts++;
1265 netif_tx_start_queue(txq);
1266 }
1267 }
1268
1269 gen = q->gen;
1270 q->unacked += ndesc;
1271 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1272 q->unacked &= 7;
1273 pidx = q->pidx;
1274 q->pidx += ndesc;
1275 if (q->pidx >= q->size) {
1276 q->pidx -= q->size;
1277 q->gen ^= 1;
1278 }
1279
1280 /* update port statistics */
1281 if (skb->ip_summed == CHECKSUM_COMPLETE)
1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1283 if (skb_shinfo(skb)->gso_size)
1284 qs->port_stats[SGE_PSTAT_TSO]++;
1285 if (vlan_tx_tag_present(skb))
1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1287
1288 /*
1289 * We do not use Tx completion interrupts to free DMAd Tx packets.
1290 * This is good for performance but means that we rely on new Tx
1291 * packets arriving to run the destructors of completed packets,
1292 * which open up space in their sockets' send queues. Sometimes
1293 * we do not get such new packets causing Tx to stall. A single
1294 * UDP transmitter is a good example of this situation. We have
1295 * a clean up timer that periodically reclaims completed packets
1296 * but it doesn't run often enough (nor do we want it to) to prevent
1297 * lengthy stalls. A solution to this problem is to run the
1298 * destructor early, after the packet is queued but before it's DMAd.
1299 * A cons is that we lie to socket memory accounting, but the amount
1300 * of extra memory is reasonable (limited by the number of Tx
1301 * descriptors), the packets do actually get freed quickly by new
1302 * packets almost always, and for protocols like TCP that wait for
1303 * acks to really free up the data the extra memory is even less.
1304 * On the positive side we run the destructors on the sending CPU
1305 * rather than on a potentially different completing CPU, usually a
1306 * good thing. We also run them without holding our Tx queue lock,
1307 * unlike what reclaim_completed_tx() would otherwise do.
1308 *
1309 * Run the destructor before telling the DMA engine about the packet
1310 * to make sure it doesn't complete and get freed prematurely.
1311 */
1312 if (likely(!skb_shared(skb)))
1313 skb_orphan(skb);
1314
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1316 check_ring_tx_db(adap, q);
1317 return NETDEV_TX_OK;
1318 }
1319
1320 /**
1321 * write_imm - write a packet into a Tx descriptor as immediate data
1322 * @d: the Tx descriptor to write
1323 * @skb: the packet
1324 * @len: the length of packet data to write as immediate data
1325 * @gen: the generation bit value to write
1326 *
1327 * Writes a packet as immediate data into a Tx descriptor. The packet
1328 * contains a work request at its beginning. We must write the packet
1329 * carefully so the SGE doesn't read it accidentally before it's written
1330 * in its entirety.
1331 */
write_imm(struct tx_desc * d,struct sk_buff * skb,unsigned int len,unsigned int gen)1332 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1333 unsigned int len, unsigned int gen)
1334 {
1335 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1336 struct work_request_hdr *to = (struct work_request_hdr *)d;
1337
1338 if (likely(!skb->data_len))
1339 memcpy(&to[1], &from[1], len - sizeof(*from));
1340 else
1341 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1342
1343 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1344 V_WR_BCNTLFLT(len & 7));
1345 wmb();
1346 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1347 V_WR_LEN((len + 7) / 8));
1348 wr_gen2(d, gen);
1349 kfree_skb(skb);
1350 }
1351
1352 /**
1353 * check_desc_avail - check descriptor availability on a send queue
1354 * @adap: the adapter
1355 * @q: the send queue
1356 * @skb: the packet needing the descriptors
1357 * @ndesc: the number of Tx descriptors needed
1358 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1359 *
1360 * Checks if the requested number of Tx descriptors is available on an
1361 * SGE send queue. If the queue is already suspended or not enough
1362 * descriptors are available the packet is queued for later transmission.
1363 * Must be called with the Tx queue locked.
1364 *
1365 * Returns 0 if enough descriptors are available, 1 if there aren't
1366 * enough descriptors and the packet has been queued, and 2 if the caller
1367 * needs to retry because there weren't enough descriptors at the
1368 * beginning of the call but some freed up in the mean time.
1369 */
check_desc_avail(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb,unsigned int ndesc,unsigned int qid)1370 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1371 struct sk_buff *skb, unsigned int ndesc,
1372 unsigned int qid)
1373 {
1374 if (unlikely(!skb_queue_empty(&q->sendq))) {
1375 addq_exit:__skb_queue_tail(&q->sendq, skb);
1376 return 1;
1377 }
1378 if (unlikely(q->size - q->in_use < ndesc)) {
1379 struct sge_qset *qs = txq_to_qset(q, qid);
1380
1381 set_bit(qid, &qs->txq_stopped);
1382 smp_mb__after_clear_bit();
1383
1384 if (should_restart_tx(q) &&
1385 test_and_clear_bit(qid, &qs->txq_stopped))
1386 return 2;
1387
1388 q->stops++;
1389 goto addq_exit;
1390 }
1391 return 0;
1392 }
1393
1394 /**
1395 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1396 * @q: the SGE control Tx queue
1397 *
1398 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1399 * that send only immediate data (presently just the control queues) and
1400 * thus do not have any sk_buffs to release.
1401 */
reclaim_completed_tx_imm(struct sge_txq * q)1402 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1403 {
1404 unsigned int reclaim = q->processed - q->cleaned;
1405
1406 q->in_use -= reclaim;
1407 q->cleaned += reclaim;
1408 }
1409
immediate(const struct sk_buff * skb)1410 static inline int immediate(const struct sk_buff *skb)
1411 {
1412 return skb->len <= WR_LEN;
1413 }
1414
1415 /**
1416 * ctrl_xmit - send a packet through an SGE control Tx queue
1417 * @adap: the adapter
1418 * @q: the control queue
1419 * @skb: the packet
1420 *
1421 * Send a packet through an SGE control Tx queue. Packets sent through
1422 * a control queue must fit entirely as immediate data in a single Tx
1423 * descriptor and have no page fragments.
1424 */
ctrl_xmit(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb)1425 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1426 struct sk_buff *skb)
1427 {
1428 int ret;
1429 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1430
1431 if (unlikely(!immediate(skb))) {
1432 WARN_ON(1);
1433 dev_kfree_skb(skb);
1434 return NET_XMIT_SUCCESS;
1435 }
1436
1437 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1438 wrp->wr_lo = htonl(V_WR_TID(q->token));
1439
1440 spin_lock(&q->lock);
1441 again:reclaim_completed_tx_imm(q);
1442
1443 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1444 if (unlikely(ret)) {
1445 if (ret == 1) {
1446 spin_unlock(&q->lock);
1447 return NET_XMIT_CN;
1448 }
1449 goto again;
1450 }
1451
1452 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1453
1454 q->in_use++;
1455 if (++q->pidx >= q->size) {
1456 q->pidx = 0;
1457 q->gen ^= 1;
1458 }
1459 spin_unlock(&q->lock);
1460 wmb();
1461 t3_write_reg(adap, A_SG_KDOORBELL,
1462 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1463 return NET_XMIT_SUCCESS;
1464 }
1465
1466 /**
1467 * restart_ctrlq - restart a suspended control queue
1468 * @qs: the queue set cotaining the control queue
1469 *
1470 * Resumes transmission on a suspended Tx control queue.
1471 */
restart_ctrlq(unsigned long data)1472 static void restart_ctrlq(unsigned long data)
1473 {
1474 struct sk_buff *skb;
1475 struct sge_qset *qs = (struct sge_qset *)data;
1476 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1477
1478 spin_lock(&q->lock);
1479 again:reclaim_completed_tx_imm(q);
1480
1481 while (q->in_use < q->size &&
1482 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1483
1484 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1485
1486 if (++q->pidx >= q->size) {
1487 q->pidx = 0;
1488 q->gen ^= 1;
1489 }
1490 q->in_use++;
1491 }
1492
1493 if (!skb_queue_empty(&q->sendq)) {
1494 set_bit(TXQ_CTRL, &qs->txq_stopped);
1495 smp_mb__after_clear_bit();
1496
1497 if (should_restart_tx(q) &&
1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1499 goto again;
1500 q->stops++;
1501 }
1502
1503 spin_unlock(&q->lock);
1504 wmb();
1505 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1506 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1507 }
1508
1509 /*
1510 * Send a management message through control queue 0
1511 */
t3_mgmt_tx(struct adapter * adap,struct sk_buff * skb)1512 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1513 {
1514 int ret;
1515 local_bh_disable();
1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1517 local_bh_enable();
1518
1519 return ret;
1520 }
1521
1522 /**
1523 * deferred_unmap_destructor - unmap a packet when it is freed
1524 * @skb: the packet
1525 *
1526 * This is the packet destructor used for Tx packets that need to remain
1527 * mapped until they are freed rather than until their Tx descriptors are
1528 * freed.
1529 */
deferred_unmap_destructor(struct sk_buff * skb)1530 static void deferred_unmap_destructor(struct sk_buff *skb)
1531 {
1532 int i;
1533 const dma_addr_t *p;
1534 const struct skb_shared_info *si;
1535 const struct deferred_unmap_info *dui;
1536
1537 dui = (struct deferred_unmap_info *)skb->head;
1538 p = dui->addr;
1539
1540 if (skb->tail - skb->transport_header)
1541 pci_unmap_single(dui->pdev, *p++,
1542 skb->tail - skb->transport_header,
1543 PCI_DMA_TODEVICE);
1544
1545 si = skb_shinfo(skb);
1546 for (i = 0; i < si->nr_frags; i++)
1547 pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1548 PCI_DMA_TODEVICE);
1549 }
1550
setup_deferred_unmapping(struct sk_buff * skb,struct pci_dev * pdev,const struct sg_ent * sgl,int sgl_flits)1551 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1552 const struct sg_ent *sgl, int sgl_flits)
1553 {
1554 dma_addr_t *p;
1555 struct deferred_unmap_info *dui;
1556
1557 dui = (struct deferred_unmap_info *)skb->head;
1558 dui->pdev = pdev;
1559 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1560 *p++ = be64_to_cpu(sgl->addr[0]);
1561 *p++ = be64_to_cpu(sgl->addr[1]);
1562 }
1563 if (sgl_flits)
1564 *p = be64_to_cpu(sgl->addr[0]);
1565 }
1566
1567 /**
1568 * write_ofld_wr - write an offload work request
1569 * @adap: the adapter
1570 * @skb: the packet to send
1571 * @q: the Tx queue
1572 * @pidx: index of the first Tx descriptor to write
1573 * @gen: the generation value to use
1574 * @ndesc: number of descriptors the packet will occupy
1575 *
1576 * Write an offload work request to send the supplied packet. The packet
1577 * data already carry the work request with most fields populated.
1578 */
write_ofld_wr(struct adapter * adap,struct sk_buff * skb,struct sge_txq * q,unsigned int pidx,unsigned int gen,unsigned int ndesc)1579 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1580 struct sge_txq *q, unsigned int pidx,
1581 unsigned int gen, unsigned int ndesc)
1582 {
1583 unsigned int sgl_flits, flits;
1584 struct work_request_hdr *from;
1585 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1586 struct tx_desc *d = &q->desc[pidx];
1587
1588 if (immediate(skb)) {
1589 q->sdesc[pidx].skb = NULL;
1590 write_imm(d, skb, skb->len, gen);
1591 return;
1592 }
1593
1594 /* Only TX_DATA builds SGLs */
1595
1596 from = (struct work_request_hdr *)skb->data;
1597 memcpy(&d->flit[1], &from[1],
1598 skb_transport_offset(skb) - sizeof(*from));
1599
1600 flits = skb_transport_offset(skb) / 8;
1601 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1602 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1603 skb->tail - skb->transport_header,
1604 adap->pdev);
1605 if (need_skb_unmap()) {
1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1607 skb->destructor = deferred_unmap_destructor;
1608 }
1609
1610 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1611 gen, from->wr_hi, from->wr_lo);
1612 }
1613
1614 /**
1615 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1616 * @skb: the packet
1617 *
1618 * Returns the number of Tx descriptors needed for the given offload
1619 * packet. These packets are already fully constructed.
1620 */
calc_tx_descs_ofld(const struct sk_buff * skb)1621 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1622 {
1623 unsigned int flits, cnt;
1624
1625 if (skb->len <= WR_LEN)
1626 return 1; /* packet fits as immediate data */
1627
1628 flits = skb_transport_offset(skb) / 8; /* headers */
1629 cnt = skb_shinfo(skb)->nr_frags;
1630 if (skb->tail != skb->transport_header)
1631 cnt++;
1632 return flits_to_desc(flits + sgl_len(cnt));
1633 }
1634
1635 /**
1636 * ofld_xmit - send a packet through an offload queue
1637 * @adap: the adapter
1638 * @q: the Tx offload queue
1639 * @skb: the packet
1640 *
1641 * Send an offload packet through an SGE offload queue.
1642 */
ofld_xmit(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb)1643 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1644 struct sk_buff *skb)
1645 {
1646 int ret;
1647 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1648
1649 spin_lock(&q->lock);
1650 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1651
1652 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1653 if (unlikely(ret)) {
1654 if (ret == 1) {
1655 skb->priority = ndesc; /* save for restart */
1656 spin_unlock(&q->lock);
1657 return NET_XMIT_CN;
1658 }
1659 goto again;
1660 }
1661
1662 gen = q->gen;
1663 q->in_use += ndesc;
1664 pidx = q->pidx;
1665 q->pidx += ndesc;
1666 if (q->pidx >= q->size) {
1667 q->pidx -= q->size;
1668 q->gen ^= 1;
1669 }
1670 spin_unlock(&q->lock);
1671
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1673 check_ring_tx_db(adap, q);
1674 return NET_XMIT_SUCCESS;
1675 }
1676
1677 /**
1678 * restart_offloadq - restart a suspended offload queue
1679 * @qs: the queue set cotaining the offload queue
1680 *
1681 * Resumes transmission on a suspended Tx offload queue.
1682 */
restart_offloadq(unsigned long data)1683 static void restart_offloadq(unsigned long data)
1684 {
1685 struct sk_buff *skb;
1686 struct sge_qset *qs = (struct sge_qset *)data;
1687 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1688 const struct port_info *pi = netdev_priv(qs->netdev);
1689 struct adapter *adap = pi->adapter;
1690
1691 spin_lock(&q->lock);
1692 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1693
1694 while ((skb = skb_peek(&q->sendq)) != NULL) {
1695 unsigned int gen, pidx;
1696 unsigned int ndesc = skb->priority;
1697
1698 if (unlikely(q->size - q->in_use < ndesc)) {
1699 set_bit(TXQ_OFLD, &qs->txq_stopped);
1700 smp_mb__after_clear_bit();
1701
1702 if (should_restart_tx(q) &&
1703 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1704 goto again;
1705 q->stops++;
1706 break;
1707 }
1708
1709 gen = q->gen;
1710 q->in_use += ndesc;
1711 pidx = q->pidx;
1712 q->pidx += ndesc;
1713 if (q->pidx >= q->size) {
1714 q->pidx -= q->size;
1715 q->gen ^= 1;
1716 }
1717 __skb_unlink(skb, &q->sendq);
1718 spin_unlock(&q->lock);
1719
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1721 spin_lock(&q->lock);
1722 }
1723 spin_unlock(&q->lock);
1724
1725 #if USE_GTS
1726 set_bit(TXQ_RUNNING, &q->flags);
1727 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1728 #endif
1729 wmb();
1730 t3_write_reg(adap, A_SG_KDOORBELL,
1731 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1732 }
1733
1734 /**
1735 * queue_set - return the queue set a packet should use
1736 * @skb: the packet
1737 *
1738 * Maps a packet to the SGE queue set it should use. The desired queue
1739 * set is carried in bits 1-3 in the packet's priority.
1740 */
queue_set(const struct sk_buff * skb)1741 static inline int queue_set(const struct sk_buff *skb)
1742 {
1743 return skb->priority >> 1;
1744 }
1745
1746 /**
1747 * is_ctrl_pkt - return whether an offload packet is a control packet
1748 * @skb: the packet
1749 *
1750 * Determines whether an offload packet should use an OFLD or a CTRL
1751 * Tx queue. This is indicated by bit 0 in the packet's priority.
1752 */
is_ctrl_pkt(const struct sk_buff * skb)1753 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1754 {
1755 return skb->priority & 1;
1756 }
1757
1758 /**
1759 * t3_offload_tx - send an offload packet
1760 * @tdev: the offload device to send to
1761 * @skb: the packet
1762 *
1763 * Sends an offload packet. We use the packet priority to select the
1764 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1765 * should be sent as regular or control, bits 1-3 select the queue set.
1766 */
t3_offload_tx(struct t3cdev * tdev,struct sk_buff * skb)1767 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1768 {
1769 struct adapter *adap = tdev2adap(tdev);
1770 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1771
1772 if (unlikely(is_ctrl_pkt(skb)))
1773 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1774
1775 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1776 }
1777
1778 /**
1779 * offload_enqueue - add an offload packet to an SGE offload receive queue
1780 * @q: the SGE response queue
1781 * @skb: the packet
1782 *
1783 * Add a new offload packet to an SGE response queue's offload packet
1784 * queue. If the packet is the first on the queue it schedules the RX
1785 * softirq to process the queue.
1786 */
offload_enqueue(struct sge_rspq * q,struct sk_buff * skb)1787 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1788 {
1789 int was_empty = skb_queue_empty(&q->rx_queue);
1790
1791 __skb_queue_tail(&q->rx_queue, skb);
1792
1793 if (was_empty) {
1794 struct sge_qset *qs = rspq_to_qset(q);
1795
1796 napi_schedule(&qs->napi);
1797 }
1798 }
1799
1800 /**
1801 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1802 * @tdev: the offload device that will be receiving the packets
1803 * @q: the SGE response queue that assembled the bundle
1804 * @skbs: the partial bundle
1805 * @n: the number of packets in the bundle
1806 *
1807 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1808 */
deliver_partial_bundle(struct t3cdev * tdev,struct sge_rspq * q,struct sk_buff * skbs[],int n)1809 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1810 struct sge_rspq *q,
1811 struct sk_buff *skbs[], int n)
1812 {
1813 if (n) {
1814 q->offload_bundles++;
1815 tdev->recv(tdev, skbs, n);
1816 }
1817 }
1818
1819 /**
1820 * ofld_poll - NAPI handler for offload packets in interrupt mode
1821 * @dev: the network device doing the polling
1822 * @budget: polling budget
1823 *
1824 * The NAPI handler for offload packets when a response queue is serviced
1825 * by the hard interrupt handler, i.e., when it's operating in non-polling
1826 * mode. Creates small packet batches and sends them through the offload
1827 * receive handler. Batches need to be of modest size as we do prefetches
1828 * on the packets in each.
1829 */
ofld_poll(struct napi_struct * napi,int budget)1830 static int ofld_poll(struct napi_struct *napi, int budget)
1831 {
1832 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1833 struct sge_rspq *q = &qs->rspq;
1834 struct adapter *adapter = qs->adap;
1835 int work_done = 0;
1836
1837 while (work_done < budget) {
1838 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1839 struct sk_buff_head queue;
1840 int ngathered;
1841
1842 spin_lock_irq(&q->lock);
1843 __skb_queue_head_init(&queue);
1844 skb_queue_splice_init(&q->rx_queue, &queue);
1845 if (skb_queue_empty(&queue)) {
1846 napi_complete(napi);
1847 spin_unlock_irq(&q->lock);
1848 return work_done;
1849 }
1850 spin_unlock_irq(&q->lock);
1851
1852 ngathered = 0;
1853 skb_queue_walk_safe(&queue, skb, tmp) {
1854 if (work_done >= budget)
1855 break;
1856 work_done++;
1857
1858 __skb_unlink(skb, &queue);
1859 prefetch(skb->data);
1860 skbs[ngathered] = skb;
1861 if (++ngathered == RX_BUNDLE_SIZE) {
1862 q->offload_bundles++;
1863 adapter->tdev.recv(&adapter->tdev, skbs,
1864 ngathered);
1865 ngathered = 0;
1866 }
1867 }
1868 if (!skb_queue_empty(&queue)) {
1869 /* splice remaining packets back onto Rx queue */
1870 spin_lock_irq(&q->lock);
1871 skb_queue_splice(&queue, &q->rx_queue);
1872 spin_unlock_irq(&q->lock);
1873 }
1874 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1875 }
1876
1877 return work_done;
1878 }
1879
1880 /**
1881 * rx_offload - process a received offload packet
1882 * @tdev: the offload device receiving the packet
1883 * @rq: the response queue that received the packet
1884 * @skb: the packet
1885 * @rx_gather: a gather list of packets if we are building a bundle
1886 * @gather_idx: index of the next available slot in the bundle
1887 *
1888 * Process an ingress offload pakcet and add it to the offload ingress
1889 * queue. Returns the index of the next available slot in the bundle.
1890 */
rx_offload(struct t3cdev * tdev,struct sge_rspq * rq,struct sk_buff * skb,struct sk_buff * rx_gather[],unsigned int gather_idx)1891 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1892 struct sk_buff *skb, struct sk_buff *rx_gather[],
1893 unsigned int gather_idx)
1894 {
1895 skb_reset_mac_header(skb);
1896 skb_reset_network_header(skb);
1897 skb_reset_transport_header(skb);
1898
1899 if (rq->polling) {
1900 rx_gather[gather_idx++] = skb;
1901 if (gather_idx == RX_BUNDLE_SIZE) {
1902 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1903 gather_idx = 0;
1904 rq->offload_bundles++;
1905 }
1906 } else
1907 offload_enqueue(rq, skb);
1908
1909 return gather_idx;
1910 }
1911
1912 /**
1913 * restart_tx - check whether to restart suspended Tx queues
1914 * @qs: the queue set to resume
1915 *
1916 * Restarts suspended Tx queues of an SGE queue set if they have enough
1917 * free resources to resume operation.
1918 */
restart_tx(struct sge_qset * qs)1919 static void restart_tx(struct sge_qset *qs)
1920 {
1921 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1922 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1923 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1924 qs->txq[TXQ_ETH].restarts++;
1925 if (netif_running(qs->netdev))
1926 netif_tx_wake_queue(qs->tx_q);
1927 }
1928
1929 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1930 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1931 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1932 qs->txq[TXQ_OFLD].restarts++;
1933 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1934 }
1935 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1936 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1937 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1938 qs->txq[TXQ_CTRL].restarts++;
1939 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1940 }
1941 }
1942
1943 /**
1944 * cxgb3_arp_process - process an ARP request probing a private IP address
1945 * @adapter: the adapter
1946 * @skb: the skbuff containing the ARP request
1947 *
1948 * Check if the ARP request is probing the private IP address
1949 * dedicated to iSCSI, generate an ARP reply if so.
1950 */
cxgb3_arp_process(struct port_info * pi,struct sk_buff * skb)1951 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1952 {
1953 struct net_device *dev = skb->dev;
1954 struct arphdr *arp;
1955 unsigned char *arp_ptr;
1956 unsigned char *sha;
1957 __be32 sip, tip;
1958
1959 if (!dev)
1960 return;
1961
1962 skb_reset_network_header(skb);
1963 arp = arp_hdr(skb);
1964
1965 if (arp->ar_op != htons(ARPOP_REQUEST))
1966 return;
1967
1968 arp_ptr = (unsigned char *)(arp + 1);
1969 sha = arp_ptr;
1970 arp_ptr += dev->addr_len;
1971 memcpy(&sip, arp_ptr, sizeof(sip));
1972 arp_ptr += sizeof(sip);
1973 arp_ptr += dev->addr_len;
1974 memcpy(&tip, arp_ptr, sizeof(tip));
1975
1976 if (tip != pi->iscsi_ipv4addr)
1977 return;
1978
1979 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1980 pi->iscsic.mac_addr, sha);
1981
1982 }
1983
is_arp(struct sk_buff * skb)1984 static inline int is_arp(struct sk_buff *skb)
1985 {
1986 return skb->protocol == htons(ETH_P_ARP);
1987 }
1988
cxgb3_process_iscsi_prov_pack(struct port_info * pi,struct sk_buff * skb)1989 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
1990 struct sk_buff *skb)
1991 {
1992 if (is_arp(skb)) {
1993 cxgb3_arp_process(pi, skb);
1994 return;
1995 }
1996
1997 if (pi->iscsic.recv)
1998 pi->iscsic.recv(pi, skb);
1999
2000 }
2001
2002 /**
2003 * rx_eth - process an ingress ethernet packet
2004 * @adap: the adapter
2005 * @rq: the response queue that received the packet
2006 * @skb: the packet
2007 * @pad: amount of padding at the start of the buffer
2008 *
2009 * Process an ingress ethernet pakcet and deliver it to the stack.
2010 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2011 * if it was immediate data in a response.
2012 */
rx_eth(struct adapter * adap,struct sge_rspq * rq,struct sk_buff * skb,int pad,int lro)2013 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2014 struct sk_buff *skb, int pad, int lro)
2015 {
2016 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2017 struct sge_qset *qs = rspq_to_qset(rq);
2018 struct port_info *pi;
2019
2020 skb_pull(skb, sizeof(*p) + pad);
2021 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2022 pi = netdev_priv(skb->dev);
2023 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2024 p->csum == htons(0xffff) && !p->fragment) {
2025 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2026 skb->ip_summed = CHECKSUM_UNNECESSARY;
2027 } else
2028 skb_checksum_none_assert(skb);
2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2030
2031 if (p->vlan_valid) {
2032 qs->port_stats[SGE_PSTAT_VLANEX]++;
2033 __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
2034 }
2035 if (rq->polling) {
2036 if (lro)
2037 napi_gro_receive(&qs->napi, skb);
2038 else {
2039 if (unlikely(pi->iscsic.flags))
2040 cxgb3_process_iscsi_prov_pack(pi, skb);
2041 netif_receive_skb(skb);
2042 }
2043 } else
2044 netif_rx(skb);
2045 }
2046
is_eth_tcp(u32 rss)2047 static inline int is_eth_tcp(u32 rss)
2048 {
2049 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2050 }
2051
2052 /**
2053 * lro_add_page - add a page chunk to an LRO session
2054 * @adap: the adapter
2055 * @qs: the associated queue set
2056 * @fl: the free list containing the page chunk to add
2057 * @len: packet length
2058 * @complete: Indicates the last fragment of a frame
2059 *
2060 * Add a received packet contained in a page chunk to an existing LRO
2061 * session.
2062 */
lro_add_page(struct adapter * adap,struct sge_qset * qs,struct sge_fl * fl,int len,int complete)2063 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2064 struct sge_fl *fl, int len, int complete)
2065 {
2066 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2067 struct port_info *pi = netdev_priv(qs->netdev);
2068 struct sk_buff *skb = NULL;
2069 struct cpl_rx_pkt *cpl;
2070 struct skb_frag_struct *rx_frag;
2071 int nr_frags;
2072 int offset = 0;
2073
2074 if (!qs->nomem) {
2075 skb = napi_get_frags(&qs->napi);
2076 qs->nomem = !skb;
2077 }
2078
2079 fl->credits--;
2080
2081 pci_dma_sync_single_for_cpu(adap->pdev,
2082 dma_unmap_addr(sd, dma_addr),
2083 fl->buf_size - SGE_PG_RSVD,
2084 PCI_DMA_FROMDEVICE);
2085
2086 (*sd->pg_chunk.p_cnt)--;
2087 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2088 pci_unmap_page(adap->pdev,
2089 sd->pg_chunk.mapping,
2090 fl->alloc_size,
2091 PCI_DMA_FROMDEVICE);
2092
2093 if (!skb) {
2094 put_page(sd->pg_chunk.page);
2095 if (complete)
2096 qs->nomem = 0;
2097 return;
2098 }
2099
2100 rx_frag = skb_shinfo(skb)->frags;
2101 nr_frags = skb_shinfo(skb)->nr_frags;
2102
2103 if (!nr_frags) {
2104 offset = 2 + sizeof(struct cpl_rx_pkt);
2105 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2106
2107 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2108 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2109 skb->ip_summed = CHECKSUM_UNNECESSARY;
2110 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2111 } else
2112 skb->ip_summed = CHECKSUM_NONE;
2113 } else
2114 cpl = qs->lro_va;
2115
2116 len -= offset;
2117
2118 rx_frag += nr_frags;
2119 __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2120 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2121 skb_frag_size_set(rx_frag, len);
2122
2123 skb->len += len;
2124 skb->data_len += len;
2125 skb->truesize += len;
2126 skb_shinfo(skb)->nr_frags++;
2127
2128 if (!complete)
2129 return;
2130
2131 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2132
2133 if (cpl->vlan_valid)
2134 __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
2135 napi_gro_frags(&qs->napi);
2136 }
2137
2138 /**
2139 * handle_rsp_cntrl_info - handles control information in a response
2140 * @qs: the queue set corresponding to the response
2141 * @flags: the response control flags
2142 *
2143 * Handles the control information of an SGE response, such as GTS
2144 * indications and completion credits for the queue set's Tx queues.
2145 * HW coalesces credits, we don't do any extra SW coalescing.
2146 */
handle_rsp_cntrl_info(struct sge_qset * qs,u32 flags)2147 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2148 {
2149 unsigned int credits;
2150
2151 #if USE_GTS
2152 if (flags & F_RSPD_TXQ0_GTS)
2153 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2154 #endif
2155
2156 credits = G_RSPD_TXQ0_CR(flags);
2157 if (credits)
2158 qs->txq[TXQ_ETH].processed += credits;
2159
2160 credits = G_RSPD_TXQ2_CR(flags);
2161 if (credits)
2162 qs->txq[TXQ_CTRL].processed += credits;
2163
2164 # if USE_GTS
2165 if (flags & F_RSPD_TXQ1_GTS)
2166 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2167 # endif
2168 credits = G_RSPD_TXQ1_CR(flags);
2169 if (credits)
2170 qs->txq[TXQ_OFLD].processed += credits;
2171 }
2172
2173 /**
2174 * check_ring_db - check if we need to ring any doorbells
2175 * @adapter: the adapter
2176 * @qs: the queue set whose Tx queues are to be examined
2177 * @sleeping: indicates which Tx queue sent GTS
2178 *
2179 * Checks if some of a queue set's Tx queues need to ring their doorbells
2180 * to resume transmission after idling while they still have unprocessed
2181 * descriptors.
2182 */
check_ring_db(struct adapter * adap,struct sge_qset * qs,unsigned int sleeping)2183 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2184 unsigned int sleeping)
2185 {
2186 if (sleeping & F_RSPD_TXQ0_GTS) {
2187 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2188
2189 if (txq->cleaned + txq->in_use != txq->processed &&
2190 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2191 set_bit(TXQ_RUNNING, &txq->flags);
2192 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2193 V_EGRCNTX(txq->cntxt_id));
2194 }
2195 }
2196
2197 if (sleeping & F_RSPD_TXQ1_GTS) {
2198 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2199
2200 if (txq->cleaned + txq->in_use != txq->processed &&
2201 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2202 set_bit(TXQ_RUNNING, &txq->flags);
2203 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2204 V_EGRCNTX(txq->cntxt_id));
2205 }
2206 }
2207 }
2208
2209 /**
2210 * is_new_response - check if a response is newly written
2211 * @r: the response descriptor
2212 * @q: the response queue
2213 *
2214 * Returns true if a response descriptor contains a yet unprocessed
2215 * response.
2216 */
is_new_response(const struct rsp_desc * r,const struct sge_rspq * q)2217 static inline int is_new_response(const struct rsp_desc *r,
2218 const struct sge_rspq *q)
2219 {
2220 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2221 }
2222
clear_rspq_bufstate(struct sge_rspq * const q)2223 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2224 {
2225 q->pg_skb = NULL;
2226 q->rx_recycle_buf = 0;
2227 }
2228
2229 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2230 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2231 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2232 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2233 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2234
2235 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2236 #define NOMEM_INTR_DELAY 2500
2237
2238 /**
2239 * process_responses - process responses from an SGE response queue
2240 * @adap: the adapter
2241 * @qs: the queue set to which the response queue belongs
2242 * @budget: how many responses can be processed in this round
2243 *
2244 * Process responses from an SGE response queue up to the supplied budget.
2245 * Responses include received packets as well as credits and other events
2246 * for the queues that belong to the response queue's queue set.
2247 * A negative budget is effectively unlimited.
2248 *
2249 * Additionally choose the interrupt holdoff time for the next interrupt
2250 * on this queue. If the system is under memory shortage use a fairly
2251 * long delay to help recovery.
2252 */
process_responses(struct adapter * adap,struct sge_qset * qs,int budget)2253 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2254 int budget)
2255 {
2256 struct sge_rspq *q = &qs->rspq;
2257 struct rsp_desc *r = &q->desc[q->cidx];
2258 int budget_left = budget;
2259 unsigned int sleeping = 0;
2260 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2261 int ngathered = 0;
2262
2263 q->next_holdoff = q->holdoff_tmr;
2264
2265 while (likely(budget_left && is_new_response(r, q))) {
2266 int packet_complete, eth, ethpad = 2;
2267 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2268 struct sk_buff *skb = NULL;
2269 u32 len, flags;
2270 __be32 rss_hi, rss_lo;
2271
2272 rmb();
2273 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2274 rss_hi = *(const __be32 *)r;
2275 rss_lo = r->rss_hdr.rss_hash_val;
2276 flags = ntohl(r->flags);
2277
2278 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2279 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2280 if (!skb)
2281 goto no_mem;
2282
2283 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2284 skb->data[0] = CPL_ASYNC_NOTIF;
2285 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2286 q->async_notif++;
2287 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2288 skb = get_imm_packet(r);
2289 if (unlikely(!skb)) {
2290 no_mem:
2291 q->next_holdoff = NOMEM_INTR_DELAY;
2292 q->nomem++;
2293 /* consume one credit since we tried */
2294 budget_left--;
2295 break;
2296 }
2297 q->imm_data++;
2298 ethpad = 0;
2299 } else if ((len = ntohl(r->len_cq)) != 0) {
2300 struct sge_fl *fl;
2301
2302 lro &= eth && is_eth_tcp(rss_hi);
2303
2304 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2305 if (fl->use_pages) {
2306 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2307
2308 prefetch(addr);
2309 #if L1_CACHE_BYTES < 128
2310 prefetch(addr + L1_CACHE_BYTES);
2311 #endif
2312 __refill_fl(adap, fl);
2313 if (lro > 0) {
2314 lro_add_page(adap, qs, fl,
2315 G_RSPD_LEN(len),
2316 flags & F_RSPD_EOP);
2317 goto next_fl;
2318 }
2319
2320 skb = get_packet_pg(adap, fl, q,
2321 G_RSPD_LEN(len),
2322 eth ?
2323 SGE_RX_DROP_THRES : 0);
2324 q->pg_skb = skb;
2325 } else
2326 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2327 eth ? SGE_RX_DROP_THRES : 0);
2328 if (unlikely(!skb)) {
2329 if (!eth)
2330 goto no_mem;
2331 q->rx_drops++;
2332 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2333 __skb_pull(skb, 2);
2334 next_fl:
2335 if (++fl->cidx == fl->size)
2336 fl->cidx = 0;
2337 } else
2338 q->pure_rsps++;
2339
2340 if (flags & RSPD_CTRL_MASK) {
2341 sleeping |= flags & RSPD_GTS_MASK;
2342 handle_rsp_cntrl_info(qs, flags);
2343 }
2344
2345 r++;
2346 if (unlikely(++q->cidx == q->size)) {
2347 q->cidx = 0;
2348 q->gen ^= 1;
2349 r = q->desc;
2350 }
2351 prefetch(r);
2352
2353 if (++q->credits >= (q->size / 4)) {
2354 refill_rspq(adap, q, q->credits);
2355 q->credits = 0;
2356 }
2357
2358 packet_complete = flags &
2359 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2360 F_RSPD_ASYNC_NOTIF);
2361
2362 if (skb != NULL && packet_complete) {
2363 if (eth)
2364 rx_eth(adap, q, skb, ethpad, lro);
2365 else {
2366 q->offload_pkts++;
2367 /* Preserve the RSS info in csum & priority */
2368 skb->csum = rss_hi;
2369 skb->priority = rss_lo;
2370 ngathered = rx_offload(&adap->tdev, q, skb,
2371 offload_skbs,
2372 ngathered);
2373 }
2374
2375 if (flags & F_RSPD_EOP)
2376 clear_rspq_bufstate(q);
2377 }
2378 --budget_left;
2379 }
2380
2381 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2382
2383 if (sleeping)
2384 check_ring_db(adap, qs, sleeping);
2385
2386 smp_mb(); /* commit Tx queue .processed updates */
2387 if (unlikely(qs->txq_stopped != 0))
2388 restart_tx(qs);
2389
2390 budget -= budget_left;
2391 return budget;
2392 }
2393
is_pure_response(const struct rsp_desc * r)2394 static inline int is_pure_response(const struct rsp_desc *r)
2395 {
2396 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2397
2398 return (n | r->len_cq) == 0;
2399 }
2400
2401 /**
2402 * napi_rx_handler - the NAPI handler for Rx processing
2403 * @napi: the napi instance
2404 * @budget: how many packets we can process in this round
2405 *
2406 * Handler for new data events when using NAPI.
2407 */
napi_rx_handler(struct napi_struct * napi,int budget)2408 static int napi_rx_handler(struct napi_struct *napi, int budget)
2409 {
2410 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2411 struct adapter *adap = qs->adap;
2412 int work_done = process_responses(adap, qs, budget);
2413
2414 if (likely(work_done < budget)) {
2415 napi_complete(napi);
2416
2417 /*
2418 * Because we don't atomically flush the following
2419 * write it is possible that in very rare cases it can
2420 * reach the device in a way that races with a new
2421 * response being written plus an error interrupt
2422 * causing the NAPI interrupt handler below to return
2423 * unhandled status to the OS. To protect against
2424 * this would require flushing the write and doing
2425 * both the write and the flush with interrupts off.
2426 * Way too expensive and unjustifiable given the
2427 * rarity of the race.
2428 *
2429 * The race cannot happen at all with MSI-X.
2430 */
2431 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2432 V_NEWTIMER(qs->rspq.next_holdoff) |
2433 V_NEWINDEX(qs->rspq.cidx));
2434 }
2435 return work_done;
2436 }
2437
2438 /*
2439 * Returns true if the device is already scheduled for polling.
2440 */
napi_is_scheduled(struct napi_struct * napi)2441 static inline int napi_is_scheduled(struct napi_struct *napi)
2442 {
2443 return test_bit(NAPI_STATE_SCHED, &napi->state);
2444 }
2445
2446 /**
2447 * process_pure_responses - process pure responses from a response queue
2448 * @adap: the adapter
2449 * @qs: the queue set owning the response queue
2450 * @r: the first pure response to process
2451 *
2452 * A simpler version of process_responses() that handles only pure (i.e.,
2453 * non data-carrying) responses. Such respones are too light-weight to
2454 * justify calling a softirq under NAPI, so we handle them specially in
2455 * the interrupt handler. The function is called with a pointer to a
2456 * response, which the caller must ensure is a valid pure response.
2457 *
2458 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2459 */
process_pure_responses(struct adapter * adap,struct sge_qset * qs,struct rsp_desc * r)2460 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2461 struct rsp_desc *r)
2462 {
2463 struct sge_rspq *q = &qs->rspq;
2464 unsigned int sleeping = 0;
2465
2466 do {
2467 u32 flags = ntohl(r->flags);
2468
2469 r++;
2470 if (unlikely(++q->cidx == q->size)) {
2471 q->cidx = 0;
2472 q->gen ^= 1;
2473 r = q->desc;
2474 }
2475 prefetch(r);
2476
2477 if (flags & RSPD_CTRL_MASK) {
2478 sleeping |= flags & RSPD_GTS_MASK;
2479 handle_rsp_cntrl_info(qs, flags);
2480 }
2481
2482 q->pure_rsps++;
2483 if (++q->credits >= (q->size / 4)) {
2484 refill_rspq(adap, q, q->credits);
2485 q->credits = 0;
2486 }
2487 if (!is_new_response(r, q))
2488 break;
2489 rmb();
2490 } while (is_pure_response(r));
2491
2492 if (sleeping)
2493 check_ring_db(adap, qs, sleeping);
2494
2495 smp_mb(); /* commit Tx queue .processed updates */
2496 if (unlikely(qs->txq_stopped != 0))
2497 restart_tx(qs);
2498
2499 return is_new_response(r, q);
2500 }
2501
2502 /**
2503 * handle_responses - decide what to do with new responses in NAPI mode
2504 * @adap: the adapter
2505 * @q: the response queue
2506 *
2507 * This is used by the NAPI interrupt handlers to decide what to do with
2508 * new SGE responses. If there are no new responses it returns -1. If
2509 * there are new responses and they are pure (i.e., non-data carrying)
2510 * it handles them straight in hard interrupt context as they are very
2511 * cheap and don't deliver any packets. Finally, if there are any data
2512 * signaling responses it schedules the NAPI handler. Returns 1 if it
2513 * schedules NAPI, 0 if all new responses were pure.
2514 *
2515 * The caller must ascertain NAPI is not already running.
2516 */
handle_responses(struct adapter * adap,struct sge_rspq * q)2517 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2518 {
2519 struct sge_qset *qs = rspq_to_qset(q);
2520 struct rsp_desc *r = &q->desc[q->cidx];
2521
2522 if (!is_new_response(r, q))
2523 return -1;
2524 rmb();
2525 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2526 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2527 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2528 return 0;
2529 }
2530 napi_schedule(&qs->napi);
2531 return 1;
2532 }
2533
2534 /*
2535 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2536 * (i.e., response queue serviced in hard interrupt).
2537 */
t3_sge_intr_msix(int irq,void * cookie)2538 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2539 {
2540 struct sge_qset *qs = cookie;
2541 struct adapter *adap = qs->adap;
2542 struct sge_rspq *q = &qs->rspq;
2543
2544 spin_lock(&q->lock);
2545 if (process_responses(adap, qs, -1) == 0)
2546 q->unhandled_irqs++;
2547 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2548 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2549 spin_unlock(&q->lock);
2550 return IRQ_HANDLED;
2551 }
2552
2553 /*
2554 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2555 * (i.e., response queue serviced by NAPI polling).
2556 */
t3_sge_intr_msix_napi(int irq,void * cookie)2557 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2558 {
2559 struct sge_qset *qs = cookie;
2560 struct sge_rspq *q = &qs->rspq;
2561
2562 spin_lock(&q->lock);
2563
2564 if (handle_responses(qs->adap, q) < 0)
2565 q->unhandled_irqs++;
2566 spin_unlock(&q->lock);
2567 return IRQ_HANDLED;
2568 }
2569
2570 /*
2571 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2572 * SGE response queues as well as error and other async events as they all use
2573 * the same MSI vector. We use one SGE response queue per port in this mode
2574 * and protect all response queues with queue 0's lock.
2575 */
t3_intr_msi(int irq,void * cookie)2576 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2577 {
2578 int new_packets = 0;
2579 struct adapter *adap = cookie;
2580 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2581
2582 spin_lock(&q->lock);
2583
2584 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2585 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2586 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2587 new_packets = 1;
2588 }
2589
2590 if (adap->params.nports == 2 &&
2591 process_responses(adap, &adap->sge.qs[1], -1)) {
2592 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2593
2594 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2595 V_NEWTIMER(q1->next_holdoff) |
2596 V_NEWINDEX(q1->cidx));
2597 new_packets = 1;
2598 }
2599
2600 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2601 q->unhandled_irqs++;
2602
2603 spin_unlock(&q->lock);
2604 return IRQ_HANDLED;
2605 }
2606
rspq_check_napi(struct sge_qset * qs)2607 static int rspq_check_napi(struct sge_qset *qs)
2608 {
2609 struct sge_rspq *q = &qs->rspq;
2610
2611 if (!napi_is_scheduled(&qs->napi) &&
2612 is_new_response(&q->desc[q->cidx], q)) {
2613 napi_schedule(&qs->napi);
2614 return 1;
2615 }
2616 return 0;
2617 }
2618
2619 /*
2620 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2621 * by NAPI polling). Handles data events from SGE response queues as well as
2622 * error and other async events as they all use the same MSI vector. We use
2623 * one SGE response queue per port in this mode and protect all response
2624 * queues with queue 0's lock.
2625 */
t3_intr_msi_napi(int irq,void * cookie)2626 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2627 {
2628 int new_packets;
2629 struct adapter *adap = cookie;
2630 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2631
2632 spin_lock(&q->lock);
2633
2634 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2635 if (adap->params.nports == 2)
2636 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2637 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2638 q->unhandled_irqs++;
2639
2640 spin_unlock(&q->lock);
2641 return IRQ_HANDLED;
2642 }
2643
2644 /*
2645 * A helper function that processes responses and issues GTS.
2646 */
process_responses_gts(struct adapter * adap,struct sge_rspq * rq)2647 static inline int process_responses_gts(struct adapter *adap,
2648 struct sge_rspq *rq)
2649 {
2650 int work;
2651
2652 work = process_responses(adap, rspq_to_qset(rq), -1);
2653 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2654 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2655 return work;
2656 }
2657
2658 /*
2659 * The legacy INTx interrupt handler. This needs to handle data events from
2660 * SGE response queues as well as error and other async events as they all use
2661 * the same interrupt pin. We use one SGE response queue per port in this mode
2662 * and protect all response queues with queue 0's lock.
2663 */
t3_intr(int irq,void * cookie)2664 static irqreturn_t t3_intr(int irq, void *cookie)
2665 {
2666 int work_done, w0, w1;
2667 struct adapter *adap = cookie;
2668 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2669 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2670
2671 spin_lock(&q0->lock);
2672
2673 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2674 w1 = adap->params.nports == 2 &&
2675 is_new_response(&q1->desc[q1->cidx], q1);
2676
2677 if (likely(w0 | w1)) {
2678 t3_write_reg(adap, A_PL_CLI, 0);
2679 t3_read_reg(adap, A_PL_CLI); /* flush */
2680
2681 if (likely(w0))
2682 process_responses_gts(adap, q0);
2683
2684 if (w1)
2685 process_responses_gts(adap, q1);
2686
2687 work_done = w0 | w1;
2688 } else
2689 work_done = t3_slow_intr_handler(adap);
2690
2691 spin_unlock(&q0->lock);
2692 return IRQ_RETVAL(work_done != 0);
2693 }
2694
2695 /*
2696 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2697 * Handles data events from SGE response queues as well as error and other
2698 * async events as they all use the same interrupt pin. We use one SGE
2699 * response queue per port in this mode and protect all response queues with
2700 * queue 0's lock.
2701 */
t3b_intr(int irq,void * cookie)2702 static irqreturn_t t3b_intr(int irq, void *cookie)
2703 {
2704 u32 map;
2705 struct adapter *adap = cookie;
2706 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2707
2708 t3_write_reg(adap, A_PL_CLI, 0);
2709 map = t3_read_reg(adap, A_SG_DATA_INTR);
2710
2711 if (unlikely(!map)) /* shared interrupt, most likely */
2712 return IRQ_NONE;
2713
2714 spin_lock(&q0->lock);
2715
2716 if (unlikely(map & F_ERRINTR))
2717 t3_slow_intr_handler(adap);
2718
2719 if (likely(map & 1))
2720 process_responses_gts(adap, q0);
2721
2722 if (map & 2)
2723 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2724
2725 spin_unlock(&q0->lock);
2726 return IRQ_HANDLED;
2727 }
2728
2729 /*
2730 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2731 * Handles data events from SGE response queues as well as error and other
2732 * async events as they all use the same interrupt pin. We use one SGE
2733 * response queue per port in this mode and protect all response queues with
2734 * queue 0's lock.
2735 */
t3b_intr_napi(int irq,void * cookie)2736 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2737 {
2738 u32 map;
2739 struct adapter *adap = cookie;
2740 struct sge_qset *qs0 = &adap->sge.qs[0];
2741 struct sge_rspq *q0 = &qs0->rspq;
2742
2743 t3_write_reg(adap, A_PL_CLI, 0);
2744 map = t3_read_reg(adap, A_SG_DATA_INTR);
2745
2746 if (unlikely(!map)) /* shared interrupt, most likely */
2747 return IRQ_NONE;
2748
2749 spin_lock(&q0->lock);
2750
2751 if (unlikely(map & F_ERRINTR))
2752 t3_slow_intr_handler(adap);
2753
2754 if (likely(map & 1))
2755 napi_schedule(&qs0->napi);
2756
2757 if (map & 2)
2758 napi_schedule(&adap->sge.qs[1].napi);
2759
2760 spin_unlock(&q0->lock);
2761 return IRQ_HANDLED;
2762 }
2763
2764 /**
2765 * t3_intr_handler - select the top-level interrupt handler
2766 * @adap: the adapter
2767 * @polling: whether using NAPI to service response queues
2768 *
2769 * Selects the top-level interrupt handler based on the type of interrupts
2770 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2771 * response queues.
2772 */
t3_intr_handler(struct adapter * adap,int polling)2773 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2774 {
2775 if (adap->flags & USING_MSIX)
2776 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2777 if (adap->flags & USING_MSI)
2778 return polling ? t3_intr_msi_napi : t3_intr_msi;
2779 if (adap->params.rev > 0)
2780 return polling ? t3b_intr_napi : t3b_intr;
2781 return t3_intr;
2782 }
2783
2784 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2785 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2786 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2787 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2788 F_HIRCQPARITYERROR)
2789 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2790 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2791 F_RSPQDISABLED)
2792
2793 /**
2794 * t3_sge_err_intr_handler - SGE async event interrupt handler
2795 * @adapter: the adapter
2796 *
2797 * Interrupt handler for SGE asynchronous (non-data) events.
2798 */
t3_sge_err_intr_handler(struct adapter * adapter)2799 void t3_sge_err_intr_handler(struct adapter *adapter)
2800 {
2801 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2802 ~F_FLEMPTY;
2803
2804 if (status & SGE_PARERR)
2805 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2806 status & SGE_PARERR);
2807 if (status & SGE_FRAMINGERR)
2808 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2809 status & SGE_FRAMINGERR);
2810
2811 if (status & F_RSPQCREDITOVERFOW)
2812 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2813
2814 if (status & F_RSPQDISABLED) {
2815 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2816
2817 CH_ALERT(adapter,
2818 "packet delivered to disabled response queue "
2819 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2820 }
2821
2822 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2823 queue_work(cxgb3_wq, &adapter->db_drop_task);
2824
2825 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2826 queue_work(cxgb3_wq, &adapter->db_full_task);
2827
2828 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2829 queue_work(cxgb3_wq, &adapter->db_empty_task);
2830
2831 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2832 if (status & SGE_FATALERR)
2833 t3_fatal_err(adapter);
2834 }
2835
2836 /**
2837 * sge_timer_tx - perform periodic maintenance of an SGE qset
2838 * @data: the SGE queue set to maintain
2839 *
2840 * Runs periodically from a timer to perform maintenance of an SGE queue
2841 * set. It performs two tasks:
2842 *
2843 * Cleans up any completed Tx descriptors that may still be pending.
2844 * Normal descriptor cleanup happens when new packets are added to a Tx
2845 * queue so this timer is relatively infrequent and does any cleanup only
2846 * if the Tx queue has not seen any new packets in a while. We make a
2847 * best effort attempt to reclaim descriptors, in that we don't wait
2848 * around if we cannot get a queue's lock (which most likely is because
2849 * someone else is queueing new packets and so will also handle the clean
2850 * up). Since control queues use immediate data exclusively we don't
2851 * bother cleaning them up here.
2852 *
2853 */
sge_timer_tx(unsigned long data)2854 static void sge_timer_tx(unsigned long data)
2855 {
2856 struct sge_qset *qs = (struct sge_qset *)data;
2857 struct port_info *pi = netdev_priv(qs->netdev);
2858 struct adapter *adap = pi->adapter;
2859 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2860 unsigned long next_period;
2861
2862 if (__netif_tx_trylock(qs->tx_q)) {
2863 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2864 TX_RECLAIM_TIMER_CHUNK);
2865 __netif_tx_unlock(qs->tx_q);
2866 }
2867
2868 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2869 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2870 TX_RECLAIM_TIMER_CHUNK);
2871 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2872 }
2873
2874 next_period = TX_RECLAIM_PERIOD >>
2875 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2876 TX_RECLAIM_TIMER_CHUNK);
2877 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2878 }
2879
2880 /*
2881 * sge_timer_rx - perform periodic maintenance of an SGE qset
2882 * @data: the SGE queue set to maintain
2883 *
2884 * a) Replenishes Rx queues that have run out due to memory shortage.
2885 * Normally new Rx buffers are added when existing ones are consumed but
2886 * when out of memory a queue can become empty. We try to add only a few
2887 * buffers here, the queue will be replenished fully as these new buffers
2888 * are used up if memory shortage has subsided.
2889 *
2890 * b) Return coalesced response queue credits in case a response queue is
2891 * starved.
2892 *
2893 */
sge_timer_rx(unsigned long data)2894 static void sge_timer_rx(unsigned long data)
2895 {
2896 spinlock_t *lock;
2897 struct sge_qset *qs = (struct sge_qset *)data;
2898 struct port_info *pi = netdev_priv(qs->netdev);
2899 struct adapter *adap = pi->adapter;
2900 u32 status;
2901
2902 lock = adap->params.rev > 0 ?
2903 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2904
2905 if (!spin_trylock_irq(lock))
2906 goto out;
2907
2908 if (napi_is_scheduled(&qs->napi))
2909 goto unlock;
2910
2911 if (adap->params.rev < 4) {
2912 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2913
2914 if (status & (1 << qs->rspq.cntxt_id)) {
2915 qs->rspq.starved++;
2916 if (qs->rspq.credits) {
2917 qs->rspq.credits--;
2918 refill_rspq(adap, &qs->rspq, 1);
2919 qs->rspq.restarted++;
2920 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2921 1 << qs->rspq.cntxt_id);
2922 }
2923 }
2924 }
2925
2926 if (qs->fl[0].credits < qs->fl[0].size)
2927 __refill_fl(adap, &qs->fl[0]);
2928 if (qs->fl[1].credits < qs->fl[1].size)
2929 __refill_fl(adap, &qs->fl[1]);
2930
2931 unlock:
2932 spin_unlock_irq(lock);
2933 out:
2934 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2935 }
2936
2937 /**
2938 * t3_update_qset_coalesce - update coalescing settings for a queue set
2939 * @qs: the SGE queue set
2940 * @p: new queue set parameters
2941 *
2942 * Update the coalescing settings for an SGE queue set. Nothing is done
2943 * if the queue set is not initialized yet.
2944 */
t3_update_qset_coalesce(struct sge_qset * qs,const struct qset_params * p)2945 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2946 {
2947 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2948 qs->rspq.polling = p->polling;
2949 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2950 }
2951
2952 /**
2953 * t3_sge_alloc_qset - initialize an SGE queue set
2954 * @adapter: the adapter
2955 * @id: the queue set id
2956 * @nports: how many Ethernet ports will be using this queue set
2957 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2958 * @p: configuration parameters for this queue set
2959 * @ntxq: number of Tx queues for the queue set
2960 * @netdev: net device associated with this queue set
2961 * @netdevq: net device TX queue associated with this queue set
2962 *
2963 * Allocate resources and initialize an SGE queue set. A queue set
2964 * comprises a response queue, two Rx free-buffer queues, and up to 3
2965 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2966 * queue, offload queue, and control queue.
2967 */
t3_sge_alloc_qset(struct adapter * adapter,unsigned int id,int nports,int irq_vec_idx,const struct qset_params * p,int ntxq,struct net_device * dev,struct netdev_queue * netdevq)2968 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2969 int irq_vec_idx, const struct qset_params *p,
2970 int ntxq, struct net_device *dev,
2971 struct netdev_queue *netdevq)
2972 {
2973 int i, avail, ret = -ENOMEM;
2974 struct sge_qset *q = &adapter->sge.qs[id];
2975
2976 init_qset_cntxt(q, id);
2977 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2978 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2979
2980 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2981 sizeof(struct rx_desc),
2982 sizeof(struct rx_sw_desc),
2983 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2984 if (!q->fl[0].desc)
2985 goto err;
2986
2987 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2988 sizeof(struct rx_desc),
2989 sizeof(struct rx_sw_desc),
2990 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2991 if (!q->fl[1].desc)
2992 goto err;
2993
2994 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2995 sizeof(struct rsp_desc), 0,
2996 &q->rspq.phys_addr, NULL);
2997 if (!q->rspq.desc)
2998 goto err;
2999
3000 for (i = 0; i < ntxq; ++i) {
3001 /*
3002 * The control queue always uses immediate data so does not
3003 * need to keep track of any sk_buffs.
3004 */
3005 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3006
3007 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3008 sizeof(struct tx_desc), sz,
3009 &q->txq[i].phys_addr,
3010 &q->txq[i].sdesc);
3011 if (!q->txq[i].desc)
3012 goto err;
3013
3014 q->txq[i].gen = 1;
3015 q->txq[i].size = p->txq_size[i];
3016 spin_lock_init(&q->txq[i].lock);
3017 skb_queue_head_init(&q->txq[i].sendq);
3018 }
3019
3020 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3021 (unsigned long)q);
3022 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3023 (unsigned long)q);
3024
3025 q->fl[0].gen = q->fl[1].gen = 1;
3026 q->fl[0].size = p->fl_size;
3027 q->fl[1].size = p->jumbo_size;
3028
3029 q->rspq.gen = 1;
3030 q->rspq.size = p->rspq_size;
3031 spin_lock_init(&q->rspq.lock);
3032 skb_queue_head_init(&q->rspq.rx_queue);
3033
3034 q->txq[TXQ_ETH].stop_thres = nports *
3035 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3036
3037 #if FL0_PG_CHUNK_SIZE > 0
3038 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3039 #else
3040 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3041 #endif
3042 #if FL1_PG_CHUNK_SIZE > 0
3043 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3044 #else
3045 q->fl[1].buf_size = is_offload(adapter) ?
3046 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3047 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3048 #endif
3049
3050 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3051 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3052 q->fl[0].order = FL0_PG_ORDER;
3053 q->fl[1].order = FL1_PG_ORDER;
3054 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3055 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3056
3057 spin_lock_irq(&adapter->sge.reg_lock);
3058
3059 /* FL threshold comparison uses < */
3060 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3061 q->rspq.phys_addr, q->rspq.size,
3062 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3063 if (ret)
3064 goto err_unlock;
3065
3066 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3067 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3068 q->fl[i].phys_addr, q->fl[i].size,
3069 q->fl[i].buf_size - SGE_PG_RSVD,
3070 p->cong_thres, 1, 0);
3071 if (ret)
3072 goto err_unlock;
3073 }
3074
3075 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3076 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3077 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3078 1, 0);
3079 if (ret)
3080 goto err_unlock;
3081
3082 if (ntxq > 1) {
3083 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3084 USE_GTS, SGE_CNTXT_OFLD, id,
3085 q->txq[TXQ_OFLD].phys_addr,
3086 q->txq[TXQ_OFLD].size, 0, 1, 0);
3087 if (ret)
3088 goto err_unlock;
3089 }
3090
3091 if (ntxq > 2) {
3092 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3093 SGE_CNTXT_CTRL, id,
3094 q->txq[TXQ_CTRL].phys_addr,
3095 q->txq[TXQ_CTRL].size,
3096 q->txq[TXQ_CTRL].token, 1, 0);
3097 if (ret)
3098 goto err_unlock;
3099 }
3100
3101 spin_unlock_irq(&adapter->sge.reg_lock);
3102
3103 q->adap = adapter;
3104 q->netdev = dev;
3105 q->tx_q = netdevq;
3106 t3_update_qset_coalesce(q, p);
3107
3108 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3109 GFP_KERNEL | __GFP_COMP);
3110 if (!avail) {
3111 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3112 goto err;
3113 }
3114 if (avail < q->fl[0].size)
3115 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3116 avail);
3117
3118 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3119 GFP_KERNEL | __GFP_COMP);
3120 if (avail < q->fl[1].size)
3121 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3122 avail);
3123 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3124
3125 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3126 V_NEWTIMER(q->rspq.holdoff_tmr));
3127
3128 return 0;
3129
3130 err_unlock:
3131 spin_unlock_irq(&adapter->sge.reg_lock);
3132 err:
3133 t3_free_qset(adapter, q);
3134 return ret;
3135 }
3136
3137 /**
3138 * t3_start_sge_timers - start SGE timer call backs
3139 * @adap: the adapter
3140 *
3141 * Starts each SGE queue set's timer call back
3142 */
t3_start_sge_timers(struct adapter * adap)3143 void t3_start_sge_timers(struct adapter *adap)
3144 {
3145 int i;
3146
3147 for (i = 0; i < SGE_QSETS; ++i) {
3148 struct sge_qset *q = &adap->sge.qs[i];
3149
3150 if (q->tx_reclaim_timer.function)
3151 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3152
3153 if (q->rx_reclaim_timer.function)
3154 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3155 }
3156 }
3157
3158 /**
3159 * t3_stop_sge_timers - stop SGE timer call backs
3160 * @adap: the adapter
3161 *
3162 * Stops each SGE queue set's timer call back
3163 */
t3_stop_sge_timers(struct adapter * adap)3164 void t3_stop_sge_timers(struct adapter *adap)
3165 {
3166 int i;
3167
3168 for (i = 0; i < SGE_QSETS; ++i) {
3169 struct sge_qset *q = &adap->sge.qs[i];
3170
3171 if (q->tx_reclaim_timer.function)
3172 del_timer_sync(&q->tx_reclaim_timer);
3173 if (q->rx_reclaim_timer.function)
3174 del_timer_sync(&q->rx_reclaim_timer);
3175 }
3176 }
3177
3178 /**
3179 * t3_free_sge_resources - free SGE resources
3180 * @adap: the adapter
3181 *
3182 * Frees resources used by the SGE queue sets.
3183 */
t3_free_sge_resources(struct adapter * adap)3184 void t3_free_sge_resources(struct adapter *adap)
3185 {
3186 int i;
3187
3188 for (i = 0; i < SGE_QSETS; ++i)
3189 t3_free_qset(adap, &adap->sge.qs[i]);
3190 }
3191
3192 /**
3193 * t3_sge_start - enable SGE
3194 * @adap: the adapter
3195 *
3196 * Enables the SGE for DMAs. This is the last step in starting packet
3197 * transfers.
3198 */
t3_sge_start(struct adapter * adap)3199 void t3_sge_start(struct adapter *adap)
3200 {
3201 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3202 }
3203
3204 /**
3205 * t3_sge_stop - disable SGE operation
3206 * @adap: the adapter
3207 *
3208 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3209 * from error interrupts) or from normal process context. In the latter
3210 * case it also disables any pending queue restart tasklets. Note that
3211 * if it is called in interrupt context it cannot disable the restart
3212 * tasklets as it cannot wait, however the tasklets will have no effect
3213 * since the doorbells are disabled and the driver will call this again
3214 * later from process context, at which time the tasklets will be stopped
3215 * if they are still running.
3216 */
t3_sge_stop(struct adapter * adap)3217 void t3_sge_stop(struct adapter *adap)
3218 {
3219 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3220 if (!in_interrupt()) {
3221 int i;
3222
3223 for (i = 0; i < SGE_QSETS; ++i) {
3224 struct sge_qset *qs = &adap->sge.qs[i];
3225
3226 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3227 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3228 }
3229 }
3230 }
3231
3232 /**
3233 * t3_sge_init - initialize SGE
3234 * @adap: the adapter
3235 * @p: the SGE parameters
3236 *
3237 * Performs SGE initialization needed every time after a chip reset.
3238 * We do not initialize any of the queue sets here, instead the driver
3239 * top-level must request those individually. We also do not enable DMA
3240 * here, that should be done after the queues have been set up.
3241 */
t3_sge_init(struct adapter * adap,struct sge_params * p)3242 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3243 {
3244 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3245
3246 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3247 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3248 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3249 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3250 #if SGE_NUM_GENBITS == 1
3251 ctrl |= F_EGRGENCTRL;
3252 #endif
3253 if (adap->params.rev > 0) {
3254 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3255 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3256 }
3257 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3258 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3259 V_LORCQDRBTHRSH(512));
3260 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3261 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3262 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3263 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3264 adap->params.rev < T3_REV_C ? 1000 : 500);
3265 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3266 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3267 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3268 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3269 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3270 }
3271
3272 /**
3273 * t3_sge_prep - one-time SGE initialization
3274 * @adap: the associated adapter
3275 * @p: SGE parameters
3276 *
3277 * Performs one-time initialization of SGE SW state. Includes determining
3278 * defaults for the assorted SGE parameters, which admins can change until
3279 * they are used to initialize the SGE.
3280 */
t3_sge_prep(struct adapter * adap,struct sge_params * p)3281 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3282 {
3283 int i;
3284
3285 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3286 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3287
3288 for (i = 0; i < SGE_QSETS; ++i) {
3289 struct qset_params *q = p->qset + i;
3290
3291 q->polling = adap->params.rev > 0;
3292 q->coalesce_usecs = 5;
3293 q->rspq_size = 1024;
3294 q->fl_size = 1024;
3295 q->jumbo_size = 512;
3296 q->txq_size[TXQ_ETH] = 1024;
3297 q->txq_size[TXQ_OFLD] = 1024;
3298 q->txq_size[TXQ_CTRL] = 256;
3299 q->cong_thres = 0;
3300 }
3301
3302 spin_lock_init(&adap->sge.reg_lock);
3303 }
3304