1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <net/ipv6.h>
43 #include <net/tcp.h>
44 #include "cxgb4.h"
45 #include "t4_regs.h"
46 #include "t4_msg.h"
47 #include "t4fw_api.h"
48 
49 /*
50  * Rx buffer size.  We use largish buffers if possible but settle for single
51  * pages under memory shortage.
52  */
53 #if PAGE_SHIFT >= 16
54 # define FL_PG_ORDER 0
55 #else
56 # define FL_PG_ORDER (16 - PAGE_SHIFT)
57 #endif
58 
59 /* RX_PULL_LEN should be <= RX_COPY_THRES */
60 #define RX_COPY_THRES    256
61 #define RX_PULL_LEN      128
62 
63 /*
64  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
65  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
66  */
67 #define RX_PKT_SKB_LEN   512
68 
69 /* Ethernet header padding prepended to RX_PKTs */
70 #define RX_PKT_PAD 2
71 
72 /*
73  * Max number of Tx descriptors we clean up at a time.  Should be modest as
74  * freeing skbs isn't cheap and it happens while holding locks.  We just need
75  * to free packets faster than they arrive, we eventually catch up and keep
76  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
77  */
78 #define MAX_TX_RECLAIM 16
79 
80 /*
81  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
82  * allocating buffers isn't cheap either.
83  */
84 #define MAX_RX_REFILL 16U
85 
86 /*
87  * Period of the Rx queue check timer.  This timer is infrequent as it has
88  * something to do only when the system experiences severe memory shortage.
89  */
90 #define RX_QCHECK_PERIOD (HZ / 2)
91 
92 /*
93  * Period of the Tx queue check timer.
94  */
95 #define TX_QCHECK_PERIOD (HZ / 2)
96 
97 /*
98  * Max number of Tx descriptors to be reclaimed by the Tx timer.
99  */
100 #define MAX_TIMER_TX_RECLAIM 100
101 
102 /*
103  * Timer index used when backing off due to memory shortage.
104  */
105 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
106 
107 /*
108  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
109  * attempt to refill it.
110  */
111 #define FL_STARVE_THRES 4
112 
113 /*
114  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
115  * This is the same as calc_tx_descs() for a TSO packet with
116  * nr_frags == MAX_SKB_FRAGS.
117  */
118 #define ETHTXQ_STOP_THRES \
119 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
120 
121 /*
122  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
123  * for a full sized WR.
124  */
125 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
126 
127 /*
128  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
129  * into a WR.
130  */
131 #define MAX_IMM_TX_PKT_LEN 128
132 
133 /*
134  * Max size of a WR sent through a control Tx queue.
135  */
136 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137 
138 enum {
139 	/* packet alignment in FL buffers */
140 	FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
141 	/* egress status entry size */
142 	STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
143 };
144 
145 struct tx_sw_desc {                /* SW state per Tx descriptor */
146 	struct sk_buff *skb;
147 	struct ulptx_sgl *sgl;
148 };
149 
150 struct rx_sw_desc {                /* SW state per Rx descriptor */
151 	struct page *page;
152 	dma_addr_t dma_addr;
153 };
154 
155 /*
156  * The low bits of rx_sw_desc.dma_addr have special meaning.
157  */
158 enum {
159 	RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
160 	RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
161 };
162 
get_buf_addr(const struct rx_sw_desc * d)163 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
164 {
165 	return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
166 }
167 
is_buf_mapped(const struct rx_sw_desc * d)168 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
169 {
170 	return !(d->dma_addr & RX_UNMAPPED_BUF);
171 }
172 
173 /**
174  *	txq_avail - return the number of available slots in a Tx queue
175  *	@q: the Tx queue
176  *
177  *	Returns the number of descriptors in a Tx queue available to write new
178  *	packets.
179  */
txq_avail(const struct sge_txq * q)180 static inline unsigned int txq_avail(const struct sge_txq *q)
181 {
182 	return q->size - 1 - q->in_use;
183 }
184 
185 /**
186  *	fl_cap - return the capacity of a free-buffer list
187  *	@fl: the FL
188  *
189  *	Returns the capacity of a free-buffer list.  The capacity is less than
190  *	the size because one descriptor needs to be left unpopulated, otherwise
191  *	HW will think the FL is empty.
192  */
fl_cap(const struct sge_fl * fl)193 static inline unsigned int fl_cap(const struct sge_fl *fl)
194 {
195 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
196 }
197 
fl_starving(const struct sge_fl * fl)198 static inline bool fl_starving(const struct sge_fl *fl)
199 {
200 	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
201 }
202 
map_skb(struct device * dev,const struct sk_buff * skb,dma_addr_t * addr)203 static int map_skb(struct device *dev, const struct sk_buff *skb,
204 		   dma_addr_t *addr)
205 {
206 	const skb_frag_t *fp, *end;
207 	const struct skb_shared_info *si;
208 
209 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
210 	if (dma_mapping_error(dev, *addr))
211 		goto out_err;
212 
213 	si = skb_shinfo(skb);
214 	end = &si->frags[si->nr_frags];
215 
216 	for (fp = si->frags; fp < end; fp++) {
217 		*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
218 				       DMA_TO_DEVICE);
219 		if (dma_mapping_error(dev, *addr))
220 			goto unwind;
221 	}
222 	return 0;
223 
224 unwind:
225 	while (fp-- > si->frags)
226 		dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
227 
228 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
229 out_err:
230 	return -ENOMEM;
231 }
232 
233 #ifdef CONFIG_NEED_DMA_MAP_STATE
unmap_skb(struct device * dev,const struct sk_buff * skb,const dma_addr_t * addr)234 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
235 		      const dma_addr_t *addr)
236 {
237 	const skb_frag_t *fp, *end;
238 	const struct skb_shared_info *si;
239 
240 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
241 
242 	si = skb_shinfo(skb);
243 	end = &si->frags[si->nr_frags];
244 	for (fp = si->frags; fp < end; fp++)
245 		dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
246 }
247 
248 /**
249  *	deferred_unmap_destructor - unmap a packet when it is freed
250  *	@skb: the packet
251  *
252  *	This is the packet destructor used for Tx packets that need to remain
253  *	mapped until they are freed rather than until their Tx descriptors are
254  *	freed.
255  */
deferred_unmap_destructor(struct sk_buff * skb)256 static void deferred_unmap_destructor(struct sk_buff *skb)
257 {
258 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
259 }
260 #endif
261 
unmap_sgl(struct device * dev,const struct sk_buff * skb,const struct ulptx_sgl * sgl,const struct sge_txq * q)262 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
263 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
264 {
265 	const struct ulptx_sge_pair *p;
266 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
267 
268 	if (likely(skb_headlen(skb)))
269 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
270 				 DMA_TO_DEVICE);
271 	else {
272 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
273 			       DMA_TO_DEVICE);
274 		nfrags--;
275 	}
276 
277 	/*
278 	 * the complexity below is because of the possibility of a wrap-around
279 	 * in the middle of an SGL
280 	 */
281 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
282 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
283 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
284 				       ntohl(p->len[0]), DMA_TO_DEVICE);
285 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
286 				       ntohl(p->len[1]), DMA_TO_DEVICE);
287 			p++;
288 		} else if ((u8 *)p == (u8 *)q->stat) {
289 			p = (const struct ulptx_sge_pair *)q->desc;
290 			goto unmap;
291 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
292 			const __be64 *addr = (const __be64 *)q->desc;
293 
294 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
295 				       ntohl(p->len[0]), DMA_TO_DEVICE);
296 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
297 				       ntohl(p->len[1]), DMA_TO_DEVICE);
298 			p = (const struct ulptx_sge_pair *)&addr[2];
299 		} else {
300 			const __be64 *addr = (const __be64 *)q->desc;
301 
302 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
303 				       ntohl(p->len[0]), DMA_TO_DEVICE);
304 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
305 				       ntohl(p->len[1]), DMA_TO_DEVICE);
306 			p = (const struct ulptx_sge_pair *)&addr[1];
307 		}
308 	}
309 	if (nfrags) {
310 		__be64 addr;
311 
312 		if ((u8 *)p == (u8 *)q->stat)
313 			p = (const struct ulptx_sge_pair *)q->desc;
314 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
315 						       *(const __be64 *)q->desc;
316 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
317 			       DMA_TO_DEVICE);
318 	}
319 }
320 
321 /**
322  *	free_tx_desc - reclaims Tx descriptors and their buffers
323  *	@adapter: the adapter
324  *	@q: the Tx queue to reclaim descriptors from
325  *	@n: the number of descriptors to reclaim
326  *	@unmap: whether the buffers should be unmapped for DMA
327  *
328  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
329  *	Tx buffers.  Called with the Tx queue lock held.
330  */
free_tx_desc(struct adapter * adap,struct sge_txq * q,unsigned int n,bool unmap)331 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
332 			 unsigned int n, bool unmap)
333 {
334 	struct tx_sw_desc *d;
335 	unsigned int cidx = q->cidx;
336 	struct device *dev = adap->pdev_dev;
337 
338 	d = &q->sdesc[cidx];
339 	while (n--) {
340 		if (d->skb) {                       /* an SGL is present */
341 			if (unmap)
342 				unmap_sgl(dev, d->skb, d->sgl, q);
343 			kfree_skb(d->skb);
344 			d->skb = NULL;
345 		}
346 		++d;
347 		if (++cidx == q->size) {
348 			cidx = 0;
349 			d = q->sdesc;
350 		}
351 	}
352 	q->cidx = cidx;
353 }
354 
355 /*
356  * Return the number of reclaimable descriptors in a Tx queue.
357  */
reclaimable(const struct sge_txq * q)358 static inline int reclaimable(const struct sge_txq *q)
359 {
360 	int hw_cidx = ntohs(q->stat->cidx);
361 	hw_cidx -= q->cidx;
362 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
363 }
364 
365 /**
366  *	reclaim_completed_tx - reclaims completed Tx descriptors
367  *	@adap: the adapter
368  *	@q: the Tx queue to reclaim completed descriptors from
369  *	@unmap: whether the buffers should be unmapped for DMA
370  *
371  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
372  *	and frees the associated buffers if possible.  Called with the Tx
373  *	queue locked.
374  */
reclaim_completed_tx(struct adapter * adap,struct sge_txq * q,bool unmap)375 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
376 					bool unmap)
377 {
378 	int avail = reclaimable(q);
379 
380 	if (avail) {
381 		/*
382 		 * Limit the amount of clean up work we do at a time to keep
383 		 * the Tx lock hold time O(1).
384 		 */
385 		if (avail > MAX_TX_RECLAIM)
386 			avail = MAX_TX_RECLAIM;
387 
388 		free_tx_desc(adap, q, avail, unmap);
389 		q->in_use -= avail;
390 	}
391 }
392 
get_buf_size(const struct rx_sw_desc * d)393 static inline int get_buf_size(const struct rx_sw_desc *d)
394 {
395 #if FL_PG_ORDER > 0
396 	return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
397 					      PAGE_SIZE;
398 #else
399 	return PAGE_SIZE;
400 #endif
401 }
402 
403 /**
404  *	free_rx_bufs - free the Rx buffers on an SGE free list
405  *	@adap: the adapter
406  *	@q: the SGE free list to free buffers from
407  *	@n: how many buffers to free
408  *
409  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
410  *	buffers must be made inaccessible to HW before calling this function.
411  */
free_rx_bufs(struct adapter * adap,struct sge_fl * q,int n)412 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
413 {
414 	while (n--) {
415 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
416 
417 		if (is_buf_mapped(d))
418 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
419 				       get_buf_size(d), PCI_DMA_FROMDEVICE);
420 		put_page(d->page);
421 		d->page = NULL;
422 		if (++q->cidx == q->size)
423 			q->cidx = 0;
424 		q->avail--;
425 	}
426 }
427 
428 /**
429  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
430  *	@adap: the adapter
431  *	@q: the SGE free list
432  *
433  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
434  *	buffer must be made inaccessible to HW before calling this function.
435  *
436  *	This is similar to @free_rx_bufs above but does not free the buffer.
437  *	Do note that the FL still loses any further access to the buffer.
438  */
unmap_rx_buf(struct adapter * adap,struct sge_fl * q)439 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
440 {
441 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
442 
443 	if (is_buf_mapped(d))
444 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 			       get_buf_size(d), PCI_DMA_FROMDEVICE);
446 	d->page = NULL;
447 	if (++q->cidx == q->size)
448 		q->cidx = 0;
449 	q->avail--;
450 }
451 
ring_fl_db(struct adapter * adap,struct sge_fl * q)452 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
453 {
454 	if (q->pend_cred >= 8) {
455 		wmb();
456 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
457 			     QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
458 		q->pend_cred &= 7;
459 	}
460 }
461 
set_rx_sw_desc(struct rx_sw_desc * sd,struct page * pg,dma_addr_t mapping)462 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
463 				  dma_addr_t mapping)
464 {
465 	sd->page = pg;
466 	sd->dma_addr = mapping;      /* includes size low bits */
467 }
468 
469 /**
470  *	refill_fl - refill an SGE Rx buffer ring
471  *	@adap: the adapter
472  *	@q: the ring to refill
473  *	@n: the number of new buffers to allocate
474  *	@gfp: the gfp flags for the allocations
475  *
476  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
477  *	allocated with the supplied gfp flags.  The caller must assure that
478  *	@n does not exceed the queue's capacity.  If afterwards the queue is
479  *	found critically low mark it as starving in the bitmap of starving FLs.
480  *
481  *	Returns the number of buffers allocated.
482  */
refill_fl(struct adapter * adap,struct sge_fl * q,int n,gfp_t gfp)483 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
484 			      gfp_t gfp)
485 {
486 	struct page *pg;
487 	dma_addr_t mapping;
488 	unsigned int cred = q->avail;
489 	__be64 *d = &q->desc[q->pidx];
490 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
491 
492 	gfp |= __GFP_NOWARN;         /* failures are expected */
493 
494 #if FL_PG_ORDER > 0
495 	/*
496 	 * Prefer large buffers
497 	 */
498 	while (n) {
499 		pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
500 		if (unlikely(!pg)) {
501 			q->large_alloc_failed++;
502 			break;       /* fall back to single pages */
503 		}
504 
505 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
506 				       PAGE_SIZE << FL_PG_ORDER,
507 				       PCI_DMA_FROMDEVICE);
508 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
509 			__free_pages(pg, FL_PG_ORDER);
510 			goto out;   /* do not try small pages for this error */
511 		}
512 		mapping |= RX_LARGE_BUF;
513 		*d++ = cpu_to_be64(mapping);
514 
515 		set_rx_sw_desc(sd, pg, mapping);
516 		sd++;
517 
518 		q->avail++;
519 		if (++q->pidx == q->size) {
520 			q->pidx = 0;
521 			sd = q->sdesc;
522 			d = q->desc;
523 		}
524 		n--;
525 	}
526 #endif
527 
528 	while (n--) {
529 		pg = __netdev_alloc_page(adap->port[0], gfp);
530 		if (unlikely(!pg)) {
531 			q->alloc_failed++;
532 			break;
533 		}
534 
535 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
536 				       PCI_DMA_FROMDEVICE);
537 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
538 			netdev_free_page(adap->port[0], pg);
539 			goto out;
540 		}
541 		*d++ = cpu_to_be64(mapping);
542 
543 		set_rx_sw_desc(sd, pg, mapping);
544 		sd++;
545 
546 		q->avail++;
547 		if (++q->pidx == q->size) {
548 			q->pidx = 0;
549 			sd = q->sdesc;
550 			d = q->desc;
551 		}
552 	}
553 
554 out:	cred = q->avail - cred;
555 	q->pend_cred += cred;
556 	ring_fl_db(adap, q);
557 
558 	if (unlikely(fl_starving(q))) {
559 		smp_wmb();
560 		set_bit(q->cntxt_id - adap->sge.egr_start,
561 			adap->sge.starving_fl);
562 	}
563 
564 	return cred;
565 }
566 
__refill_fl(struct adapter * adap,struct sge_fl * fl)567 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
568 {
569 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
570 		  GFP_ATOMIC);
571 }
572 
573 /**
574  *	alloc_ring - allocate resources for an SGE descriptor ring
575  *	@dev: the PCI device's core device
576  *	@nelem: the number of descriptors
577  *	@elem_size: the size of each descriptor
578  *	@sw_size: the size of the SW state associated with each ring element
579  *	@phys: the physical address of the allocated ring
580  *	@metadata: address of the array holding the SW state for the ring
581  *	@stat_size: extra space in HW ring for status information
582  *	@node: preferred node for memory allocations
583  *
584  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
585  *	free buffer lists, or response queues.  Each SGE ring requires
586  *	space for its HW descriptors plus, optionally, space for the SW state
587  *	associated with each HW entry (the metadata).  The function returns
588  *	three values: the virtual address for the HW ring (the return value
589  *	of the function), the bus address of the HW ring, and the address
590  *	of the SW ring.
591  */
alloc_ring(struct device * dev,size_t nelem,size_t elem_size,size_t sw_size,dma_addr_t * phys,void * metadata,size_t stat_size,int node)592 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
593 			size_t sw_size, dma_addr_t *phys, void *metadata,
594 			size_t stat_size, int node)
595 {
596 	size_t len = nelem * elem_size + stat_size;
597 	void *s = NULL;
598 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
599 
600 	if (!p)
601 		return NULL;
602 	if (sw_size) {
603 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
604 
605 		if (!s) {
606 			dma_free_coherent(dev, len, p, *phys);
607 			return NULL;
608 		}
609 	}
610 	if (metadata)
611 		*(void **)metadata = s;
612 	memset(p, 0, len);
613 	return p;
614 }
615 
616 /**
617  *	sgl_len - calculates the size of an SGL of the given capacity
618  *	@n: the number of SGL entries
619  *
620  *	Calculates the number of flits needed for a scatter/gather list that
621  *	can hold the given number of entries.
622  */
sgl_len(unsigned int n)623 static inline unsigned int sgl_len(unsigned int n)
624 {
625 	n--;
626 	return (3 * n) / 2 + (n & 1) + 2;
627 }
628 
629 /**
630  *	flits_to_desc - returns the num of Tx descriptors for the given flits
631  *	@n: the number of flits
632  *
633  *	Returns the number of Tx descriptors needed for the supplied number
634  *	of flits.
635  */
flits_to_desc(unsigned int n)636 static inline unsigned int flits_to_desc(unsigned int n)
637 {
638 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
639 	return DIV_ROUND_UP(n, 8);
640 }
641 
642 /**
643  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
644  *	@skb: the packet
645  *
646  *	Returns whether an Ethernet packet is small enough to fit as
647  *	immediate data.
648  */
is_eth_imm(const struct sk_buff * skb)649 static inline int is_eth_imm(const struct sk_buff *skb)
650 {
651 	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
652 }
653 
654 /**
655  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
656  *	@skb: the packet
657  *
658  *	Returns the number of flits needed for a Tx WR for the given Ethernet
659  *	packet, including the needed WR and CPL headers.
660  */
calc_tx_flits(const struct sk_buff * skb)661 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
662 {
663 	unsigned int flits;
664 
665 	if (is_eth_imm(skb))
666 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
667 
668 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
669 	if (skb_shinfo(skb)->gso_size)
670 		flits += 2;
671 	return flits;
672 }
673 
674 /**
675  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
676  *	@skb: the packet
677  *
678  *	Returns the number of Tx descriptors needed for the given Ethernet
679  *	packet, including the needed WR and CPL headers.
680  */
calc_tx_descs(const struct sk_buff * skb)681 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
682 {
683 	return flits_to_desc(calc_tx_flits(skb));
684 }
685 
686 /**
687  *	write_sgl - populate a scatter/gather list for a packet
688  *	@skb: the packet
689  *	@q: the Tx queue we are writing into
690  *	@sgl: starting location for writing the SGL
691  *	@end: points right after the end of the SGL
692  *	@start: start offset into skb main-body data to include in the SGL
693  *	@addr: the list of bus addresses for the SGL elements
694  *
695  *	Generates a gather list for the buffers that make up a packet.
696  *	The caller must provide adequate space for the SGL that will be written.
697  *	The SGL includes all of the packet's page fragments and the data in its
698  *	main body except for the first @start bytes.  @sgl must be 16-byte
699  *	aligned and within a Tx descriptor with available space.  @end points
700  *	right after the end of the SGL but does not account for any potential
701  *	wrap around, i.e., @end > @sgl.
702  */
write_sgl(const struct sk_buff * skb,struct sge_txq * q,struct ulptx_sgl * sgl,u64 * end,unsigned int start,const dma_addr_t * addr)703 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
704 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
705 		      const dma_addr_t *addr)
706 {
707 	unsigned int i, len;
708 	struct ulptx_sge_pair *to;
709 	const struct skb_shared_info *si = skb_shinfo(skb);
710 	unsigned int nfrags = si->nr_frags;
711 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
712 
713 	len = skb_headlen(skb) - start;
714 	if (likely(len)) {
715 		sgl->len0 = htonl(len);
716 		sgl->addr0 = cpu_to_be64(addr[0] + start);
717 		nfrags++;
718 	} else {
719 		sgl->len0 = htonl(si->frags[0].size);
720 		sgl->addr0 = cpu_to_be64(addr[1]);
721 	}
722 
723 	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
724 	if (likely(--nfrags == 0))
725 		return;
726 	/*
727 	 * Most of the complexity below deals with the possibility we hit the
728 	 * end of the queue in the middle of writing the SGL.  For this case
729 	 * only we create the SGL in a temporary buffer and then copy it.
730 	 */
731 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
732 
733 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
734 		to->len[0] = cpu_to_be32(si->frags[i].size);
735 		to->len[1] = cpu_to_be32(si->frags[++i].size);
736 		to->addr[0] = cpu_to_be64(addr[i]);
737 		to->addr[1] = cpu_to_be64(addr[++i]);
738 	}
739 	if (nfrags) {
740 		to->len[0] = cpu_to_be32(si->frags[i].size);
741 		to->len[1] = cpu_to_be32(0);
742 		to->addr[0] = cpu_to_be64(addr[i + 1]);
743 	}
744 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
745 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
746 
747 		if (likely(part0))
748 			memcpy(sgl->sge, buf, part0);
749 		part1 = (u8 *)end - (u8 *)q->stat;
750 		memcpy(q->desc, (u8 *)buf + part0, part1);
751 		end = (void *)q->desc + part1;
752 	}
753 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
754 		*(u64 *)end = 0;
755 }
756 
757 /**
758  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
759  *	@adap: the adapter
760  *	@q: the Tx queue
761  *	@n: number of new descriptors to give to HW
762  *
763  *	Ring the doorbel for a Tx queue.
764  */
ring_tx_db(struct adapter * adap,struct sge_txq * q,int n)765 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
766 {
767 	wmb();            /* write descriptors before telling HW */
768 	t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
769 		     QID(q->cntxt_id) | PIDX(n));
770 }
771 
772 /**
773  *	inline_tx_skb - inline a packet's data into Tx descriptors
774  *	@skb: the packet
775  *	@q: the Tx queue where the packet will be inlined
776  *	@pos: starting position in the Tx queue where to inline the packet
777  *
778  *	Inline a packet's contents directly into Tx descriptors, starting at
779  *	the given position within the Tx DMA ring.
780  *	Most of the complexity of this operation is dealing with wrap arounds
781  *	in the middle of the packet we want to inline.
782  */
inline_tx_skb(const struct sk_buff * skb,const struct sge_txq * q,void * pos)783 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
784 			  void *pos)
785 {
786 	u64 *p;
787 	int left = (void *)q->stat - pos;
788 
789 	if (likely(skb->len <= left)) {
790 		if (likely(!skb->data_len))
791 			skb_copy_from_linear_data(skb, pos, skb->len);
792 		else
793 			skb_copy_bits(skb, 0, pos, skb->len);
794 		pos += skb->len;
795 	} else {
796 		skb_copy_bits(skb, 0, pos, left);
797 		skb_copy_bits(skb, left, q->desc, skb->len - left);
798 		pos = (void *)q->desc + (skb->len - left);
799 	}
800 
801 	/* 0-pad to multiple of 16 */
802 	p = PTR_ALIGN(pos, 8);
803 	if ((uintptr_t)p & 8)
804 		*p = 0;
805 }
806 
807 /*
808  * Figure out what HW csum a packet wants and return the appropriate control
809  * bits.
810  */
hwcsum(const struct sk_buff * skb)811 static u64 hwcsum(const struct sk_buff *skb)
812 {
813 	int csum_type;
814 	const struct iphdr *iph = ip_hdr(skb);
815 
816 	if (iph->version == 4) {
817 		if (iph->protocol == IPPROTO_TCP)
818 			csum_type = TX_CSUM_TCPIP;
819 		else if (iph->protocol == IPPROTO_UDP)
820 			csum_type = TX_CSUM_UDPIP;
821 		else {
822 nocsum:			/*
823 			 * unknown protocol, disable HW csum
824 			 * and hope a bad packet is detected
825 			 */
826 			return TXPKT_L4CSUM_DIS;
827 		}
828 	} else {
829 		/*
830 		 * this doesn't work with extension headers
831 		 */
832 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
833 
834 		if (ip6h->nexthdr == IPPROTO_TCP)
835 			csum_type = TX_CSUM_TCPIP6;
836 		else if (ip6h->nexthdr == IPPROTO_UDP)
837 			csum_type = TX_CSUM_UDPIP6;
838 		else
839 			goto nocsum;
840 	}
841 
842 	if (likely(csum_type >= TX_CSUM_TCPIP))
843 		return TXPKT_CSUM_TYPE(csum_type) |
844 			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
845 			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
846 	else {
847 		int start = skb_transport_offset(skb);
848 
849 		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
850 			TXPKT_CSUM_LOC(start + skb->csum_offset);
851 	}
852 }
853 
eth_txq_stop(struct sge_eth_txq * q)854 static void eth_txq_stop(struct sge_eth_txq *q)
855 {
856 	netif_tx_stop_queue(q->txq);
857 	q->q.stops++;
858 }
859 
txq_advance(struct sge_txq * q,unsigned int n)860 static inline void txq_advance(struct sge_txq *q, unsigned int n)
861 {
862 	q->in_use += n;
863 	q->pidx += n;
864 	if (q->pidx >= q->size)
865 		q->pidx -= q->size;
866 }
867 
868 /**
869  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
870  *	@skb: the packet
871  *	@dev: the egress net device
872  *
873  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
874  */
t4_eth_xmit(struct sk_buff * skb,struct net_device * dev)875 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
876 {
877 	u32 wr_mid;
878 	u64 cntrl, *end;
879 	int qidx, credits;
880 	unsigned int flits, ndesc;
881 	struct adapter *adap;
882 	struct sge_eth_txq *q;
883 	const struct port_info *pi;
884 	struct fw_eth_tx_pkt_wr *wr;
885 	struct cpl_tx_pkt_core *cpl;
886 	const struct skb_shared_info *ssi;
887 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
888 
889 	/*
890 	 * The chip min packet length is 10 octets but play safe and reject
891 	 * anything shorter than an Ethernet header.
892 	 */
893 	if (unlikely(skb->len < ETH_HLEN)) {
894 out_free:	dev_kfree_skb(skb);
895 		return NETDEV_TX_OK;
896 	}
897 
898 	pi = netdev_priv(dev);
899 	adap = pi->adapter;
900 	qidx = skb_get_queue_mapping(skb);
901 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
902 
903 	reclaim_completed_tx(adap, &q->q, true);
904 
905 	flits = calc_tx_flits(skb);
906 	ndesc = flits_to_desc(flits);
907 	credits = txq_avail(&q->q) - ndesc;
908 
909 	if (unlikely(credits < 0)) {
910 		eth_txq_stop(q);
911 		dev_err(adap->pdev_dev,
912 			"%s: Tx ring %u full while queue awake!\n",
913 			dev->name, qidx);
914 		return NETDEV_TX_BUSY;
915 	}
916 
917 	if (!is_eth_imm(skb) &&
918 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
919 		q->mapping_err++;
920 		goto out_free;
921 	}
922 
923 	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
924 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
925 		eth_txq_stop(q);
926 		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
927 	}
928 
929 	wr = (void *)&q->q.desc[q->q.pidx];
930 	wr->equiq_to_len16 = htonl(wr_mid);
931 	wr->r3 = cpu_to_be64(0);
932 	end = (u64 *)wr + flits;
933 
934 	ssi = skb_shinfo(skb);
935 	if (ssi->gso_size) {
936 		struct cpl_tx_pkt_lso *lso = (void *)wr;
937 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
938 		int l3hdr_len = skb_network_header_len(skb);
939 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
940 
941 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
942 				       FW_WR_IMMDLEN(sizeof(*lso)));
943 		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
944 					LSO_FIRST_SLICE | LSO_LAST_SLICE |
945 					LSO_IPV6(v6) |
946 					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
947 					LSO_IPHDR_LEN(l3hdr_len / 4) |
948 					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
949 		lso->c.ipid_ofst = htons(0);
950 		lso->c.mss = htons(ssi->gso_size);
951 		lso->c.seqno_offset = htonl(0);
952 		lso->c.len = htonl(skb->len);
953 		cpl = (void *)(lso + 1);
954 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
955 			TXPKT_IPHDR_LEN(l3hdr_len) |
956 			TXPKT_ETHHDR_LEN(eth_xtra_len);
957 		q->tso++;
958 		q->tx_cso += ssi->gso_segs;
959 	} else {
960 		int len;
961 
962 		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
963 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
964 				       FW_WR_IMMDLEN(len));
965 		cpl = (void *)(wr + 1);
966 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
967 			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
968 			q->tx_cso++;
969 		} else
970 			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
971 	}
972 
973 	if (vlan_tx_tag_present(skb)) {
974 		q->vlan_ins++;
975 		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
976 	}
977 
978 	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
979 			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
980 	cpl->pack = htons(0);
981 	cpl->len = htons(skb->len);
982 	cpl->ctrl1 = cpu_to_be64(cntrl);
983 
984 	if (is_eth_imm(skb)) {
985 		inline_tx_skb(skb, &q->q, cpl + 1);
986 		dev_kfree_skb(skb);
987 	} else {
988 		int last_desc;
989 
990 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
991 			  addr);
992 		skb_orphan(skb);
993 
994 		last_desc = q->q.pidx + ndesc - 1;
995 		if (last_desc >= q->q.size)
996 			last_desc -= q->q.size;
997 		q->q.sdesc[last_desc].skb = skb;
998 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
999 	}
1000 
1001 	txq_advance(&q->q, ndesc);
1002 
1003 	ring_tx_db(adap, &q->q, ndesc);
1004 	return NETDEV_TX_OK;
1005 }
1006 
1007 /**
1008  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1009  *	@q: the SGE control Tx queue
1010  *
1011  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1012  *	that send only immediate data (presently just the control queues) and
1013  *	thus do not have any sk_buffs to release.
1014  */
reclaim_completed_tx_imm(struct sge_txq * q)1015 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1016 {
1017 	int hw_cidx = ntohs(q->stat->cidx);
1018 	int reclaim = hw_cidx - q->cidx;
1019 
1020 	if (reclaim < 0)
1021 		reclaim += q->size;
1022 
1023 	q->in_use -= reclaim;
1024 	q->cidx = hw_cidx;
1025 }
1026 
1027 /**
1028  *	is_imm - check whether a packet can be sent as immediate data
1029  *	@skb: the packet
1030  *
1031  *	Returns true if a packet can be sent as a WR with immediate data.
1032  */
is_imm(const struct sk_buff * skb)1033 static inline int is_imm(const struct sk_buff *skb)
1034 {
1035 	return skb->len <= MAX_CTRL_WR_LEN;
1036 }
1037 
1038 /**
1039  *	ctrlq_check_stop - check if a control queue is full and should stop
1040  *	@q: the queue
1041  *	@wr: most recent WR written to the queue
1042  *
1043  *	Check if a control queue has become full and should be stopped.
1044  *	We clean up control queue descriptors very lazily, only when we are out.
1045  *	If the queue is still full after reclaiming any completed descriptors
1046  *	we suspend it and have the last WR wake it up.
1047  */
ctrlq_check_stop(struct sge_ctrl_txq * q,struct fw_wr_hdr * wr)1048 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1049 {
1050 	reclaim_completed_tx_imm(&q->q);
1051 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1052 		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1053 		q->q.stops++;
1054 		q->full = 1;
1055 	}
1056 }
1057 
1058 /**
1059  *	ctrl_xmit - send a packet through an SGE control Tx queue
1060  *	@q: the control queue
1061  *	@skb: the packet
1062  *
1063  *	Send a packet through an SGE control Tx queue.  Packets sent through
1064  *	a control queue must fit entirely as immediate data.
1065  */
ctrl_xmit(struct sge_ctrl_txq * q,struct sk_buff * skb)1066 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1067 {
1068 	unsigned int ndesc;
1069 	struct fw_wr_hdr *wr;
1070 
1071 	if (unlikely(!is_imm(skb))) {
1072 		WARN_ON(1);
1073 		dev_kfree_skb(skb);
1074 		return NET_XMIT_DROP;
1075 	}
1076 
1077 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1078 	spin_lock(&q->sendq.lock);
1079 
1080 	if (unlikely(q->full)) {
1081 		skb->priority = ndesc;                  /* save for restart */
1082 		__skb_queue_tail(&q->sendq, skb);
1083 		spin_unlock(&q->sendq.lock);
1084 		return NET_XMIT_CN;
1085 	}
1086 
1087 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1088 	inline_tx_skb(skb, &q->q, wr);
1089 
1090 	txq_advance(&q->q, ndesc);
1091 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1092 		ctrlq_check_stop(q, wr);
1093 
1094 	ring_tx_db(q->adap, &q->q, ndesc);
1095 	spin_unlock(&q->sendq.lock);
1096 
1097 	kfree_skb(skb);
1098 	return NET_XMIT_SUCCESS;
1099 }
1100 
1101 /**
1102  *	restart_ctrlq - restart a suspended control queue
1103  *	@data: the control queue to restart
1104  *
1105  *	Resumes transmission on a suspended Tx control queue.
1106  */
restart_ctrlq(unsigned long data)1107 static void restart_ctrlq(unsigned long data)
1108 {
1109 	struct sk_buff *skb;
1110 	unsigned int written = 0;
1111 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1112 
1113 	spin_lock(&q->sendq.lock);
1114 	reclaim_completed_tx_imm(&q->q);
1115 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1116 
1117 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1118 		struct fw_wr_hdr *wr;
1119 		unsigned int ndesc = skb->priority;     /* previously saved */
1120 
1121 		/*
1122 		 * Write descriptors and free skbs outside the lock to limit
1123 		 * wait times.  q->full is still set so new skbs will be queued.
1124 		 */
1125 		spin_unlock(&q->sendq.lock);
1126 
1127 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1128 		inline_tx_skb(skb, &q->q, wr);
1129 		kfree_skb(skb);
1130 
1131 		written += ndesc;
1132 		txq_advance(&q->q, ndesc);
1133 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1134 			unsigned long old = q->q.stops;
1135 
1136 			ctrlq_check_stop(q, wr);
1137 			if (q->q.stops != old) {          /* suspended anew */
1138 				spin_lock(&q->sendq.lock);
1139 				goto ringdb;
1140 			}
1141 		}
1142 		if (written > 16) {
1143 			ring_tx_db(q->adap, &q->q, written);
1144 			written = 0;
1145 		}
1146 		spin_lock(&q->sendq.lock);
1147 	}
1148 	q->full = 0;
1149 ringdb: if (written)
1150 		ring_tx_db(q->adap, &q->q, written);
1151 	spin_unlock(&q->sendq.lock);
1152 }
1153 
1154 /**
1155  *	t4_mgmt_tx - send a management message
1156  *	@adap: the adapter
1157  *	@skb: the packet containing the management message
1158  *
1159  *	Send a management message through control queue 0.
1160  */
t4_mgmt_tx(struct adapter * adap,struct sk_buff * skb)1161 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1162 {
1163 	int ret;
1164 
1165 	local_bh_disable();
1166 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1167 	local_bh_enable();
1168 	return ret;
1169 }
1170 
1171 /**
1172  *	is_ofld_imm - check whether a packet can be sent as immediate data
1173  *	@skb: the packet
1174  *
1175  *	Returns true if a packet can be sent as an offload WR with immediate
1176  *	data.  We currently use the same limit as for Ethernet packets.
1177  */
is_ofld_imm(const struct sk_buff * skb)1178 static inline int is_ofld_imm(const struct sk_buff *skb)
1179 {
1180 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1181 }
1182 
1183 /**
1184  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1185  *	@skb: the packet
1186  *
1187  *	Returns the number of flits needed for the given offload packet.
1188  *	These packets are already fully constructed and no additional headers
1189  *	will be added.
1190  */
calc_tx_flits_ofld(const struct sk_buff * skb)1191 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1192 {
1193 	unsigned int flits, cnt;
1194 
1195 	if (is_ofld_imm(skb))
1196 		return DIV_ROUND_UP(skb->len, 8);
1197 
1198 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1199 	cnt = skb_shinfo(skb)->nr_frags;
1200 	if (skb->tail != skb->transport_header)
1201 		cnt++;
1202 	return flits + sgl_len(cnt);
1203 }
1204 
1205 /**
1206  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1207  *	@adap: the adapter
1208  *	@q: the queue to stop
1209  *
1210  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1211  *	inability to map packets.  A periodic timer attempts to restart
1212  *	queues so marked.
1213  */
txq_stop_maperr(struct sge_ofld_txq * q)1214 static void txq_stop_maperr(struct sge_ofld_txq *q)
1215 {
1216 	q->mapping_err++;
1217 	q->q.stops++;
1218 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1219 		q->adap->sge.txq_maperr);
1220 }
1221 
1222 /**
1223  *	ofldtxq_stop - stop an offload Tx queue that has become full
1224  *	@q: the queue to stop
1225  *	@skb: the packet causing the queue to become full
1226  *
1227  *	Stops an offload Tx queue that has become full and modifies the packet
1228  *	being written to request a wakeup.
1229  */
ofldtxq_stop(struct sge_ofld_txq * q,struct sk_buff * skb)1230 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1231 {
1232 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1233 
1234 	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1235 	q->q.stops++;
1236 	q->full = 1;
1237 }
1238 
1239 /**
1240  *	service_ofldq - restart a suspended offload queue
1241  *	@q: the offload queue
1242  *
1243  *	Services an offload Tx queue by moving packets from its packet queue
1244  *	to the HW Tx ring.  The function starts and ends with the queue locked.
1245  */
service_ofldq(struct sge_ofld_txq * q)1246 static void service_ofldq(struct sge_ofld_txq *q)
1247 {
1248 	u64 *pos;
1249 	int credits;
1250 	struct sk_buff *skb;
1251 	unsigned int written = 0;
1252 	unsigned int flits, ndesc;
1253 
1254 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1255 		/*
1256 		 * We drop the lock but leave skb on sendq, thus retaining
1257 		 * exclusive access to the state of the queue.
1258 		 */
1259 		spin_unlock(&q->sendq.lock);
1260 
1261 		reclaim_completed_tx(q->adap, &q->q, false);
1262 
1263 		flits = skb->priority;                /* previously saved */
1264 		ndesc = flits_to_desc(flits);
1265 		credits = txq_avail(&q->q) - ndesc;
1266 		BUG_ON(credits < 0);
1267 		if (unlikely(credits < TXQ_STOP_THRES))
1268 			ofldtxq_stop(q, skb);
1269 
1270 		pos = (u64 *)&q->q.desc[q->q.pidx];
1271 		if (is_ofld_imm(skb))
1272 			inline_tx_skb(skb, &q->q, pos);
1273 		else if (map_skb(q->adap->pdev_dev, skb,
1274 				 (dma_addr_t *)skb->head)) {
1275 			txq_stop_maperr(q);
1276 			spin_lock(&q->sendq.lock);
1277 			break;
1278 		} else {
1279 			int last_desc, hdr_len = skb_transport_offset(skb);
1280 
1281 			memcpy(pos, skb->data, hdr_len);
1282 			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1283 				  pos + flits, hdr_len,
1284 				  (dma_addr_t *)skb->head);
1285 #ifdef CONFIG_NEED_DMA_MAP_STATE
1286 			skb->dev = q->adap->port[0];
1287 			skb->destructor = deferred_unmap_destructor;
1288 #endif
1289 			last_desc = q->q.pidx + ndesc - 1;
1290 			if (last_desc >= q->q.size)
1291 				last_desc -= q->q.size;
1292 			q->q.sdesc[last_desc].skb = skb;
1293 		}
1294 
1295 		txq_advance(&q->q, ndesc);
1296 		written += ndesc;
1297 		if (unlikely(written > 32)) {
1298 			ring_tx_db(q->adap, &q->q, written);
1299 			written = 0;
1300 		}
1301 
1302 		spin_lock(&q->sendq.lock);
1303 		__skb_unlink(skb, &q->sendq);
1304 		if (is_ofld_imm(skb))
1305 			kfree_skb(skb);
1306 	}
1307 	if (likely(written))
1308 		ring_tx_db(q->adap, &q->q, written);
1309 }
1310 
1311 /**
1312  *	ofld_xmit - send a packet through an offload queue
1313  *	@q: the Tx offload queue
1314  *	@skb: the packet
1315  *
1316  *	Send an offload packet through an SGE offload queue.
1317  */
ofld_xmit(struct sge_ofld_txq * q,struct sk_buff * skb)1318 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1319 {
1320 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1321 	spin_lock(&q->sendq.lock);
1322 	__skb_queue_tail(&q->sendq, skb);
1323 	if (q->sendq.qlen == 1)
1324 		service_ofldq(q);
1325 	spin_unlock(&q->sendq.lock);
1326 	return NET_XMIT_SUCCESS;
1327 }
1328 
1329 /**
1330  *	restart_ofldq - restart a suspended offload queue
1331  *	@data: the offload queue to restart
1332  *
1333  *	Resumes transmission on a suspended Tx offload queue.
1334  */
restart_ofldq(unsigned long data)1335 static void restart_ofldq(unsigned long data)
1336 {
1337 	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1338 
1339 	spin_lock(&q->sendq.lock);
1340 	q->full = 0;            /* the queue actually is completely empty now */
1341 	service_ofldq(q);
1342 	spin_unlock(&q->sendq.lock);
1343 }
1344 
1345 /**
1346  *	skb_txq - return the Tx queue an offload packet should use
1347  *	@skb: the packet
1348  *
1349  *	Returns the Tx queue an offload packet should use as indicated by bits
1350  *	1-15 in the packet's queue_mapping.
1351  */
skb_txq(const struct sk_buff * skb)1352 static inline unsigned int skb_txq(const struct sk_buff *skb)
1353 {
1354 	return skb->queue_mapping >> 1;
1355 }
1356 
1357 /**
1358  *	is_ctrl_pkt - return whether an offload packet is a control packet
1359  *	@skb: the packet
1360  *
1361  *	Returns whether an offload packet should use an OFLD or a CTRL
1362  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1363  */
is_ctrl_pkt(const struct sk_buff * skb)1364 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1365 {
1366 	return skb->queue_mapping & 1;
1367 }
1368 
ofld_send(struct adapter * adap,struct sk_buff * skb)1369 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1370 {
1371 	unsigned int idx = skb_txq(skb);
1372 
1373 	if (unlikely(is_ctrl_pkt(skb)))
1374 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1375 	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1376 }
1377 
1378 /**
1379  *	t4_ofld_send - send an offload packet
1380  *	@adap: the adapter
1381  *	@skb: the packet
1382  *
1383  *	Sends an offload packet.  We use the packet queue_mapping to select the
1384  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1385  *	should be sent as regular or control, bits 1-15 select the queue.
1386  */
t4_ofld_send(struct adapter * adap,struct sk_buff * skb)1387 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1388 {
1389 	int ret;
1390 
1391 	local_bh_disable();
1392 	ret = ofld_send(adap, skb);
1393 	local_bh_enable();
1394 	return ret;
1395 }
1396 
1397 /**
1398  *	cxgb4_ofld_send - send an offload packet
1399  *	@dev: the net device
1400  *	@skb: the packet
1401  *
1402  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1403  *	intended for ULDs.
1404  */
cxgb4_ofld_send(struct net_device * dev,struct sk_buff * skb)1405 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1406 {
1407 	return t4_ofld_send(netdev2adap(dev), skb);
1408 }
1409 EXPORT_SYMBOL(cxgb4_ofld_send);
1410 
copy_frags(struct skb_shared_info * ssi,const struct pkt_gl * gl,unsigned int offset)1411 static inline void copy_frags(struct skb_shared_info *ssi,
1412 			      const struct pkt_gl *gl, unsigned int offset)
1413 {
1414 	unsigned int n;
1415 
1416 	/* usually there's just one frag */
1417 	ssi->frags[0].page = gl->frags[0].page;
1418 	ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
1419 	ssi->frags[0].size = gl->frags[0].size - offset;
1420 	ssi->nr_frags = gl->nfrags;
1421 	n = gl->nfrags - 1;
1422 	if (n)
1423 		memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1424 
1425 	/* get a reference to the last page, we don't own it */
1426 	get_page(gl->frags[n].page);
1427 }
1428 
1429 /**
1430  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1431  *	@gl: the gather list
1432  *	@skb_len: size of sk_buff main body if it carries fragments
1433  *	@pull_len: amount of data to move to the sk_buff's main body
1434  *
1435  *	Builds an sk_buff from the given packet gather list.  Returns the
1436  *	sk_buff or %NULL if sk_buff allocation failed.
1437  */
cxgb4_pktgl_to_skb(const struct pkt_gl * gl,unsigned int skb_len,unsigned int pull_len)1438 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1439 				   unsigned int skb_len, unsigned int pull_len)
1440 {
1441 	struct sk_buff *skb;
1442 
1443 	/*
1444 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1445 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1446 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1447 	 */
1448 	if (gl->tot_len <= RX_COPY_THRES) {
1449 		skb = dev_alloc_skb(gl->tot_len);
1450 		if (unlikely(!skb))
1451 			goto out;
1452 		__skb_put(skb, gl->tot_len);
1453 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1454 	} else {
1455 		skb = dev_alloc_skb(skb_len);
1456 		if (unlikely(!skb))
1457 			goto out;
1458 		__skb_put(skb, pull_len);
1459 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1460 
1461 		copy_frags(skb_shinfo(skb), gl, pull_len);
1462 		skb->len = gl->tot_len;
1463 		skb->data_len = skb->len - pull_len;
1464 		skb->truesize += skb->data_len;
1465 	}
1466 out:	return skb;
1467 }
1468 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1469 
1470 /**
1471  *	t4_pktgl_free - free a packet gather list
1472  *	@gl: the gather list
1473  *
1474  *	Releases the pages of a packet gather list.  We do not own the last
1475  *	page on the list and do not free it.
1476  */
t4_pktgl_free(const struct pkt_gl * gl)1477 static void t4_pktgl_free(const struct pkt_gl *gl)
1478 {
1479 	int n;
1480 	const skb_frag_t *p;
1481 
1482 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1483 		put_page(p->page);
1484 }
1485 
1486 /*
1487  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1488  * be delivered to anyone and send it to the stack for capture.
1489  */
handle_trace_pkt(struct adapter * adap,const struct pkt_gl * gl)1490 static noinline int handle_trace_pkt(struct adapter *adap,
1491 				     const struct pkt_gl *gl)
1492 {
1493 	struct sk_buff *skb;
1494 	struct cpl_trace_pkt *p;
1495 
1496 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1497 	if (unlikely(!skb)) {
1498 		t4_pktgl_free(gl);
1499 		return 0;
1500 	}
1501 
1502 	p = (struct cpl_trace_pkt *)skb->data;
1503 	__skb_pull(skb, sizeof(*p));
1504 	skb_reset_mac_header(skb);
1505 	skb->protocol = htons(0xffff);
1506 	skb->dev = adap->port[0];
1507 	netif_receive_skb(skb);
1508 	return 0;
1509 }
1510 
do_gro(struct sge_eth_rxq * rxq,const struct pkt_gl * gl,const struct cpl_rx_pkt * pkt)1511 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1512 		   const struct cpl_rx_pkt *pkt)
1513 {
1514 	int ret;
1515 	struct sk_buff *skb;
1516 
1517 	skb = napi_get_frags(&rxq->rspq.napi);
1518 	if (unlikely(!skb)) {
1519 		t4_pktgl_free(gl);
1520 		rxq->stats.rx_drops++;
1521 		return;
1522 	}
1523 
1524 	copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
1525 	skb->len = gl->tot_len - RX_PKT_PAD;
1526 	skb->data_len = skb->len;
1527 	skb->truesize += skb->data_len;
1528 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1529 	skb_record_rx_queue(skb, rxq->rspq.idx);
1530 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1531 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1532 
1533 	if (unlikely(pkt->vlan_ex)) {
1534 		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1535 		rxq->stats.vlan_ex++;
1536 	}
1537 	ret = napi_gro_frags(&rxq->rspq.napi);
1538 	if (ret == GRO_HELD)
1539 		rxq->stats.lro_pkts++;
1540 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1541 		rxq->stats.lro_merged++;
1542 	rxq->stats.pkts++;
1543 	rxq->stats.rx_cso++;
1544 }
1545 
1546 /**
1547  *	t4_ethrx_handler - process an ingress ethernet packet
1548  *	@q: the response queue that received the packet
1549  *	@rsp: the response queue descriptor holding the RX_PKT message
1550  *	@si: the gather list of packet fragments
1551  *
1552  *	Process an ingress ethernet packet and deliver it to the stack.
1553  */
t4_ethrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * si)1554 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1555 		     const struct pkt_gl *si)
1556 {
1557 	bool csum_ok;
1558 	struct sk_buff *skb;
1559 	struct port_info *pi;
1560 	const struct cpl_rx_pkt *pkt;
1561 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1562 
1563 	if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1564 		return handle_trace_pkt(q->adap, si);
1565 
1566 	pkt = (const struct cpl_rx_pkt *)rsp;
1567 	csum_ok = pkt->csum_calc && !pkt->err_vec;
1568 	if ((pkt->l2info & htonl(RXF_TCP)) &&
1569 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1570 		do_gro(rxq, si, pkt);
1571 		return 0;
1572 	}
1573 
1574 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1575 	if (unlikely(!skb)) {
1576 		t4_pktgl_free(si);
1577 		rxq->stats.rx_drops++;
1578 		return 0;
1579 	}
1580 
1581 	__skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
1582 	skb->protocol = eth_type_trans(skb, q->netdev);
1583 	skb_record_rx_queue(skb, q->idx);
1584 	if (skb->dev->features & NETIF_F_RXHASH)
1585 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1586 
1587 	pi = netdev_priv(skb->dev);
1588 	rxq->stats.pkts++;
1589 
1590 	if (csum_ok && (pi->rx_offload & RX_CSO) &&
1591 	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1592 		if (!pkt->ip_frag) {
1593 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1594 			rxq->stats.rx_cso++;
1595 		} else if (pkt->l2info & htonl(RXF_IP)) {
1596 			__sum16 c = (__force __sum16)pkt->csum;
1597 			skb->csum = csum_unfold(c);
1598 			skb->ip_summed = CHECKSUM_COMPLETE;
1599 			rxq->stats.rx_cso++;
1600 		}
1601 	} else
1602 		skb_checksum_none_assert(skb);
1603 
1604 	if (unlikely(pkt->vlan_ex)) {
1605 		__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1606 		rxq->stats.vlan_ex++;
1607 	}
1608 	netif_receive_skb(skb);
1609 	return 0;
1610 }
1611 
1612 /**
1613  *	restore_rx_bufs - put back a packet's Rx buffers
1614  *	@si: the packet gather list
1615  *	@q: the SGE free list
1616  *	@frags: number of FL buffers to restore
1617  *
1618  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1619  *	have already been unmapped and are left unmapped, we mark them so to
1620  *	prevent further unmapping attempts.
1621  *
1622  *	This function undoes a series of @unmap_rx_buf calls when we find out
1623  *	that the current packet can't be processed right away afterall and we
1624  *	need to come back to it later.  This is a very rare event and there's
1625  *	no effort to make this particularly efficient.
1626  */
restore_rx_bufs(const struct pkt_gl * si,struct sge_fl * q,int frags)1627 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1628 			    int frags)
1629 {
1630 	struct rx_sw_desc *d;
1631 
1632 	while (frags--) {
1633 		if (q->cidx == 0)
1634 			q->cidx = q->size - 1;
1635 		else
1636 			q->cidx--;
1637 		d = &q->sdesc[q->cidx];
1638 		d->page = si->frags[frags].page;
1639 		d->dma_addr |= RX_UNMAPPED_BUF;
1640 		q->avail++;
1641 	}
1642 }
1643 
1644 /**
1645  *	is_new_response - check if a response is newly written
1646  *	@r: the response descriptor
1647  *	@q: the response queue
1648  *
1649  *	Returns true if a response descriptor contains a yet unprocessed
1650  *	response.
1651  */
is_new_response(const struct rsp_ctrl * r,const struct sge_rspq * q)1652 static inline bool is_new_response(const struct rsp_ctrl *r,
1653 				   const struct sge_rspq *q)
1654 {
1655 	return RSPD_GEN(r->type_gen) == q->gen;
1656 }
1657 
1658 /**
1659  *	rspq_next - advance to the next entry in a response queue
1660  *	@q: the queue
1661  *
1662  *	Updates the state of a response queue to advance it to the next entry.
1663  */
rspq_next(struct sge_rspq * q)1664 static inline void rspq_next(struct sge_rspq *q)
1665 {
1666 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1667 	if (unlikely(++q->cidx == q->size)) {
1668 		q->cidx = 0;
1669 		q->gen ^= 1;
1670 		q->cur_desc = q->desc;
1671 	}
1672 }
1673 
1674 /**
1675  *	process_responses - process responses from an SGE response queue
1676  *	@q: the ingress queue to process
1677  *	@budget: how many responses can be processed in this round
1678  *
1679  *	Process responses from an SGE response queue up to the supplied budget.
1680  *	Responses include received packets as well as control messages from FW
1681  *	or HW.
1682  *
1683  *	Additionally choose the interrupt holdoff time for the next interrupt
1684  *	on this queue.  If the system is under memory shortage use a fairly
1685  *	long delay to help recovery.
1686  */
process_responses(struct sge_rspq * q,int budget)1687 static int process_responses(struct sge_rspq *q, int budget)
1688 {
1689 	int ret, rsp_type;
1690 	int budget_left = budget;
1691 	const struct rsp_ctrl *rc;
1692 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1693 
1694 	while (likely(budget_left)) {
1695 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1696 		if (!is_new_response(rc, q))
1697 			break;
1698 
1699 		rmb();
1700 		rsp_type = RSPD_TYPE(rc->type_gen);
1701 		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1702 			skb_frag_t *fp;
1703 			struct pkt_gl si;
1704 			const struct rx_sw_desc *rsd;
1705 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1706 
1707 			if (len & RSPD_NEWBUF) {
1708 				if (likely(q->offset > 0)) {
1709 					free_rx_bufs(q->adap, &rxq->fl, 1);
1710 					q->offset = 0;
1711 				}
1712 				len = RSPD_LEN(len);
1713 			}
1714 			si.tot_len = len;
1715 
1716 			/* gather packet fragments */
1717 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1718 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1719 				bufsz = get_buf_size(rsd);
1720 				fp->page = rsd->page;
1721 				fp->page_offset = q->offset;
1722 				fp->size = min(bufsz, len);
1723 				len -= fp->size;
1724 				if (!len)
1725 					break;
1726 				unmap_rx_buf(q->adap, &rxq->fl);
1727 			}
1728 
1729 			/*
1730 			 * Last buffer remains mapped so explicitly make it
1731 			 * coherent for CPU access.
1732 			 */
1733 			dma_sync_single_for_cpu(q->adap->pdev_dev,
1734 						get_buf_addr(rsd),
1735 						fp->size, DMA_FROM_DEVICE);
1736 
1737 			si.va = page_address(si.frags[0].page) +
1738 				si.frags[0].page_offset;
1739 			prefetch(si.va);
1740 
1741 			si.nfrags = frags + 1;
1742 			ret = q->handler(q, q->cur_desc, &si);
1743 			if (likely(ret == 0))
1744 				q->offset += ALIGN(fp->size, FL_ALIGN);
1745 			else
1746 				restore_rx_bufs(&si, &rxq->fl, frags);
1747 		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1748 			ret = q->handler(q, q->cur_desc, NULL);
1749 		} else {
1750 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1751 		}
1752 
1753 		if (unlikely(ret)) {
1754 			/* couldn't process descriptor, back off for recovery */
1755 			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1756 			break;
1757 		}
1758 
1759 		rspq_next(q);
1760 		budget_left--;
1761 	}
1762 
1763 	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1764 		__refill_fl(q->adap, &rxq->fl);
1765 	return budget - budget_left;
1766 }
1767 
1768 /**
1769  *	napi_rx_handler - the NAPI handler for Rx processing
1770  *	@napi: the napi instance
1771  *	@budget: how many packets we can process in this round
1772  *
1773  *	Handler for new data events when using NAPI.  This does not need any
1774  *	locking or protection from interrupts as data interrupts are off at
1775  *	this point and other adapter interrupts do not interfere (the latter
1776  *	in not a concern at all with MSI-X as non-data interrupts then have
1777  *	a separate handler).
1778  */
napi_rx_handler(struct napi_struct * napi,int budget)1779 static int napi_rx_handler(struct napi_struct *napi, int budget)
1780 {
1781 	unsigned int params;
1782 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1783 	int work_done = process_responses(q, budget);
1784 
1785 	if (likely(work_done < budget)) {
1786 		napi_complete(napi);
1787 		params = q->next_intr_params;
1788 		q->next_intr_params = q->intr_params;
1789 	} else
1790 		params = QINTR_TIMER_IDX(7);
1791 
1792 	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1793 		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1794 	return work_done;
1795 }
1796 
1797 /*
1798  * The MSI-X interrupt handler for an SGE response queue.
1799  */
t4_sge_intr_msix(int irq,void * cookie)1800 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1801 {
1802 	struct sge_rspq *q = cookie;
1803 
1804 	napi_schedule(&q->napi);
1805 	return IRQ_HANDLED;
1806 }
1807 
1808 /*
1809  * Process the indirect interrupt entries in the interrupt queue and kick off
1810  * NAPI for each queue that has generated an entry.
1811  */
process_intrq(struct adapter * adap)1812 static unsigned int process_intrq(struct adapter *adap)
1813 {
1814 	unsigned int credits;
1815 	const struct rsp_ctrl *rc;
1816 	struct sge_rspq *q = &adap->sge.intrq;
1817 
1818 	spin_lock(&adap->sge.intrq_lock);
1819 	for (credits = 0; ; credits++) {
1820 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1821 		if (!is_new_response(rc, q))
1822 			break;
1823 
1824 		rmb();
1825 		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1826 			unsigned int qid = ntohl(rc->pldbuflen_qid);
1827 
1828 			qid -= adap->sge.ingr_start;
1829 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
1830 		}
1831 
1832 		rspq_next(q);
1833 	}
1834 
1835 	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1836 		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1837 	spin_unlock(&adap->sge.intrq_lock);
1838 	return credits;
1839 }
1840 
1841 /*
1842  * The MSI interrupt handler, which handles data events from SGE response queues
1843  * as well as error and other async events as they all use the same MSI vector.
1844  */
t4_intr_msi(int irq,void * cookie)1845 static irqreturn_t t4_intr_msi(int irq, void *cookie)
1846 {
1847 	struct adapter *adap = cookie;
1848 
1849 	t4_slow_intr_handler(adap);
1850 	process_intrq(adap);
1851 	return IRQ_HANDLED;
1852 }
1853 
1854 /*
1855  * Interrupt handler for legacy INTx interrupts.
1856  * Handles data events from SGE response queues as well as error and other
1857  * async events as they all use the same interrupt line.
1858  */
t4_intr_intx(int irq,void * cookie)1859 static irqreturn_t t4_intr_intx(int irq, void *cookie)
1860 {
1861 	struct adapter *adap = cookie;
1862 
1863 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1864 	if (t4_slow_intr_handler(adap) | process_intrq(adap))
1865 		return IRQ_HANDLED;
1866 	return IRQ_NONE;             /* probably shared interrupt */
1867 }
1868 
1869 /**
1870  *	t4_intr_handler - select the top-level interrupt handler
1871  *	@adap: the adapter
1872  *
1873  *	Selects the top-level interrupt handler based on the type of interrupts
1874  *	(MSI-X, MSI, or INTx).
1875  */
t4_intr_handler(struct adapter * adap)1876 irq_handler_t t4_intr_handler(struct adapter *adap)
1877 {
1878 	if (adap->flags & USING_MSIX)
1879 		return t4_sge_intr_msix;
1880 	if (adap->flags & USING_MSI)
1881 		return t4_intr_msi;
1882 	return t4_intr_intx;
1883 }
1884 
sge_rx_timer_cb(unsigned long data)1885 static void sge_rx_timer_cb(unsigned long data)
1886 {
1887 	unsigned long m;
1888 	unsigned int i, cnt[2];
1889 	struct adapter *adap = (struct adapter *)data;
1890 	struct sge *s = &adap->sge;
1891 
1892 	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1893 		for (m = s->starving_fl[i]; m; m &= m - 1) {
1894 			struct sge_eth_rxq *rxq;
1895 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1896 			struct sge_fl *fl = s->egr_map[id];
1897 
1898 			clear_bit(id, s->starving_fl);
1899 			smp_mb__after_clear_bit();
1900 
1901 			if (fl_starving(fl)) {
1902 				rxq = container_of(fl, struct sge_eth_rxq, fl);
1903 				if (napi_reschedule(&rxq->rspq.napi))
1904 					fl->starving++;
1905 				else
1906 					set_bit(id, s->starving_fl);
1907 			}
1908 		}
1909 
1910 	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1911 	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1912 	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1913 
1914 	for (i = 0; i < 2; i++)
1915 		if (cnt[i] >= s->starve_thres) {
1916 			if (s->idma_state[i] || cnt[i] == 0xffffffff)
1917 				continue;
1918 			s->idma_state[i] = 1;
1919 			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1920 			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1921 			dev_warn(adap->pdev_dev,
1922 				 "SGE idma%u starvation detected for "
1923 				 "queue %lu\n", i, m & 0xffff);
1924 		} else if (s->idma_state[i])
1925 			s->idma_state[i] = 0;
1926 
1927 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1928 }
1929 
sge_tx_timer_cb(unsigned long data)1930 static void sge_tx_timer_cb(unsigned long data)
1931 {
1932 	unsigned long m;
1933 	unsigned int i, budget;
1934 	struct adapter *adap = (struct adapter *)data;
1935 	struct sge *s = &adap->sge;
1936 
1937 	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1938 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
1939 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1940 			struct sge_ofld_txq *txq = s->egr_map[id];
1941 
1942 			clear_bit(id, s->txq_maperr);
1943 			tasklet_schedule(&txq->qresume_tsk);
1944 		}
1945 
1946 	budget = MAX_TIMER_TX_RECLAIM;
1947 	i = s->ethtxq_rover;
1948 	do {
1949 		struct sge_eth_txq *q = &s->ethtxq[i];
1950 
1951 		if (q->q.in_use &&
1952 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1953 		    __netif_tx_trylock(q->txq)) {
1954 			int avail = reclaimable(&q->q);
1955 
1956 			if (avail) {
1957 				if (avail > budget)
1958 					avail = budget;
1959 
1960 				free_tx_desc(adap, &q->q, avail, true);
1961 				q->q.in_use -= avail;
1962 				budget -= avail;
1963 			}
1964 			__netif_tx_unlock(q->txq);
1965 		}
1966 
1967 		if (++i >= s->ethqsets)
1968 			i = 0;
1969 	} while (budget && i != s->ethtxq_rover);
1970 	s->ethtxq_rover = i;
1971 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1972 }
1973 
t4_sge_alloc_rxq(struct adapter * adap,struct sge_rspq * iq,bool fwevtq,struct net_device * dev,int intr_idx,struct sge_fl * fl,rspq_handler_t hnd)1974 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1975 		     struct net_device *dev, int intr_idx,
1976 		     struct sge_fl *fl, rspq_handler_t hnd)
1977 {
1978 	int ret, flsz = 0;
1979 	struct fw_iq_cmd c;
1980 	struct port_info *pi = netdev_priv(dev);
1981 
1982 	/* Size needs to be multiple of 16, including status entry. */
1983 	iq->size = roundup(iq->size, 16);
1984 
1985 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1986 			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1987 	if (!iq->desc)
1988 		return -ENOMEM;
1989 
1990 	memset(&c, 0, sizeof(c));
1991 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1992 			    FW_CMD_WRITE | FW_CMD_EXEC |
1993 			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
1994 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1995 				 FW_LEN16(c));
1996 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1997 		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
1998 		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
1999 		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2000 							-intr_idx - 1));
2001 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2002 		FW_IQ_CMD_IQGTSMODE |
2003 		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2004 		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2005 	c.iqsize = htons(iq->size);
2006 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2007 
2008 	if (fl) {
2009 		fl->size = roundup(fl->size, 8);
2010 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2011 				      sizeof(struct rx_sw_desc), &fl->addr,
2012 				      &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2013 		if (!fl->desc)
2014 			goto fl_nomem;
2015 
2016 		flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2017 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2018 					    FW_IQ_CMD_FL0FETCHRO(1) |
2019 					    FW_IQ_CMD_FL0DATARO(1) |
2020 					    FW_IQ_CMD_FL0PADEN);
2021 		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2022 				FW_IQ_CMD_FL0FBMAX(3));
2023 		c.fl0size = htons(flsz);
2024 		c.fl0addr = cpu_to_be64(fl->addr);
2025 	}
2026 
2027 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2028 	if (ret)
2029 		goto err;
2030 
2031 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2032 	iq->cur_desc = iq->desc;
2033 	iq->cidx = 0;
2034 	iq->gen = 1;
2035 	iq->next_intr_params = iq->intr_params;
2036 	iq->cntxt_id = ntohs(c.iqid);
2037 	iq->abs_id = ntohs(c.physiqid);
2038 	iq->size--;                           /* subtract status entry */
2039 	iq->adap = adap;
2040 	iq->netdev = dev;
2041 	iq->handler = hnd;
2042 
2043 	/* set offset to -1 to distinguish ingress queues without FL */
2044 	iq->offset = fl ? 0 : -1;
2045 
2046 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2047 
2048 	if (fl) {
2049 		fl->cntxt_id = ntohs(c.fl0id);
2050 		fl->avail = fl->pend_cred = 0;
2051 		fl->pidx = fl->cidx = 0;
2052 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2053 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2054 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2055 	}
2056 	return 0;
2057 
2058 fl_nomem:
2059 	ret = -ENOMEM;
2060 err:
2061 	if (iq->desc) {
2062 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2063 				  iq->desc, iq->phys_addr);
2064 		iq->desc = NULL;
2065 	}
2066 	if (fl && fl->desc) {
2067 		kfree(fl->sdesc);
2068 		fl->sdesc = NULL;
2069 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2070 				  fl->desc, fl->addr);
2071 		fl->desc = NULL;
2072 	}
2073 	return ret;
2074 }
2075 
init_txq(struct adapter * adap,struct sge_txq * q,unsigned int id)2076 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2077 {
2078 	q->in_use = 0;
2079 	q->cidx = q->pidx = 0;
2080 	q->stops = q->restarts = 0;
2081 	q->stat = (void *)&q->desc[q->size];
2082 	q->cntxt_id = id;
2083 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2084 }
2085 
t4_sge_alloc_eth_txq(struct adapter * adap,struct sge_eth_txq * txq,struct net_device * dev,struct netdev_queue * netdevq,unsigned int iqid)2086 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2087 			 struct net_device *dev, struct netdev_queue *netdevq,
2088 			 unsigned int iqid)
2089 {
2090 	int ret, nentries;
2091 	struct fw_eq_eth_cmd c;
2092 	struct port_info *pi = netdev_priv(dev);
2093 
2094 	/* Add status entries */
2095 	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2096 
2097 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2098 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2099 			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2100 			netdev_queue_numa_node_read(netdevq));
2101 	if (!txq->q.desc)
2102 		return -ENOMEM;
2103 
2104 	memset(&c, 0, sizeof(c));
2105 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2106 			    FW_CMD_WRITE | FW_CMD_EXEC |
2107 			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2108 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2109 				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2110 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2111 	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2112 				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2113 				   FW_EQ_ETH_CMD_FETCHRO(1) |
2114 				   FW_EQ_ETH_CMD_IQID(iqid));
2115 	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2116 				  FW_EQ_ETH_CMD_FBMAX(3) |
2117 				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2118 				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2119 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2120 
2121 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2122 	if (ret) {
2123 		kfree(txq->q.sdesc);
2124 		txq->q.sdesc = NULL;
2125 		dma_free_coherent(adap->pdev_dev,
2126 				  nentries * sizeof(struct tx_desc),
2127 				  txq->q.desc, txq->q.phys_addr);
2128 		txq->q.desc = NULL;
2129 		return ret;
2130 	}
2131 
2132 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2133 	txq->txq = netdevq;
2134 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2135 	txq->mapping_err = 0;
2136 	return 0;
2137 }
2138 
t4_sge_alloc_ctrl_txq(struct adapter * adap,struct sge_ctrl_txq * txq,struct net_device * dev,unsigned int iqid,unsigned int cmplqid)2139 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2140 			  struct net_device *dev, unsigned int iqid,
2141 			  unsigned int cmplqid)
2142 {
2143 	int ret, nentries;
2144 	struct fw_eq_ctrl_cmd c;
2145 	struct port_info *pi = netdev_priv(dev);
2146 
2147 	/* Add status entries */
2148 	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2149 
2150 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2151 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2152 				 NULL, 0, NUMA_NO_NODE);
2153 	if (!txq->q.desc)
2154 		return -ENOMEM;
2155 
2156 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2157 			    FW_CMD_WRITE | FW_CMD_EXEC |
2158 			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2159 			    FW_EQ_CTRL_CMD_VFN(0));
2160 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2161 				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2162 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2163 	c.physeqid_pkd = htonl(0);
2164 	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2165 				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2166 				   FW_EQ_CTRL_CMD_FETCHRO |
2167 				   FW_EQ_CTRL_CMD_IQID(iqid));
2168 	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2169 				  FW_EQ_CTRL_CMD_FBMAX(3) |
2170 				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2171 				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2172 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2173 
2174 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2175 	if (ret) {
2176 		dma_free_coherent(adap->pdev_dev,
2177 				  nentries * sizeof(struct tx_desc),
2178 				  txq->q.desc, txq->q.phys_addr);
2179 		txq->q.desc = NULL;
2180 		return ret;
2181 	}
2182 
2183 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2184 	txq->adap = adap;
2185 	skb_queue_head_init(&txq->sendq);
2186 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2187 	txq->full = 0;
2188 	return 0;
2189 }
2190 
t4_sge_alloc_ofld_txq(struct adapter * adap,struct sge_ofld_txq * txq,struct net_device * dev,unsigned int iqid)2191 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2192 			  struct net_device *dev, unsigned int iqid)
2193 {
2194 	int ret, nentries;
2195 	struct fw_eq_ofld_cmd c;
2196 	struct port_info *pi = netdev_priv(dev);
2197 
2198 	/* Add status entries */
2199 	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2200 
2201 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2202 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2203 			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2204 			NUMA_NO_NODE);
2205 	if (!txq->q.desc)
2206 		return -ENOMEM;
2207 
2208 	memset(&c, 0, sizeof(c));
2209 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2210 			    FW_CMD_WRITE | FW_CMD_EXEC |
2211 			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2212 			    FW_EQ_OFLD_CMD_VFN(0));
2213 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2214 				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2215 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2216 				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2217 				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2218 				   FW_EQ_OFLD_CMD_IQID(iqid));
2219 	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2220 				  FW_EQ_OFLD_CMD_FBMAX(3) |
2221 				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2222 				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2223 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2224 
2225 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2226 	if (ret) {
2227 		kfree(txq->q.sdesc);
2228 		txq->q.sdesc = NULL;
2229 		dma_free_coherent(adap->pdev_dev,
2230 				  nentries * sizeof(struct tx_desc),
2231 				  txq->q.desc, txq->q.phys_addr);
2232 		txq->q.desc = NULL;
2233 		return ret;
2234 	}
2235 
2236 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2237 	txq->adap = adap;
2238 	skb_queue_head_init(&txq->sendq);
2239 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2240 	txq->full = 0;
2241 	txq->mapping_err = 0;
2242 	return 0;
2243 }
2244 
free_txq(struct adapter * adap,struct sge_txq * q)2245 static void free_txq(struct adapter *adap, struct sge_txq *q)
2246 {
2247 	dma_free_coherent(adap->pdev_dev,
2248 			  q->size * sizeof(struct tx_desc) + STAT_LEN,
2249 			  q->desc, q->phys_addr);
2250 	q->cntxt_id = 0;
2251 	q->sdesc = NULL;
2252 	q->desc = NULL;
2253 }
2254 
free_rspq_fl(struct adapter * adap,struct sge_rspq * rq,struct sge_fl * fl)2255 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2256 			 struct sge_fl *fl)
2257 {
2258 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2259 
2260 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2261 	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2262 		   rq->cntxt_id, fl_id, 0xffff);
2263 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2264 			  rq->desc, rq->phys_addr);
2265 	netif_napi_del(&rq->napi);
2266 	rq->netdev = NULL;
2267 	rq->cntxt_id = rq->abs_id = 0;
2268 	rq->desc = NULL;
2269 
2270 	if (fl) {
2271 		free_rx_bufs(adap, fl, fl->avail);
2272 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2273 				  fl->desc, fl->addr);
2274 		kfree(fl->sdesc);
2275 		fl->sdesc = NULL;
2276 		fl->cntxt_id = 0;
2277 		fl->desc = NULL;
2278 	}
2279 }
2280 
2281 /**
2282  *	t4_free_sge_resources - free SGE resources
2283  *	@adap: the adapter
2284  *
2285  *	Frees resources used by the SGE queue sets.
2286  */
t4_free_sge_resources(struct adapter * adap)2287 void t4_free_sge_resources(struct adapter *adap)
2288 {
2289 	int i;
2290 	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2291 	struct sge_eth_txq *etq = adap->sge.ethtxq;
2292 	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2293 
2294 	/* clean up Ethernet Tx/Rx queues */
2295 	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2296 		if (eq->rspq.desc)
2297 			free_rspq_fl(adap, &eq->rspq, &eq->fl);
2298 		if (etq->q.desc) {
2299 			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2300 				       etq->q.cntxt_id);
2301 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2302 			kfree(etq->q.sdesc);
2303 			free_txq(adap, &etq->q);
2304 		}
2305 	}
2306 
2307 	/* clean up RDMA and iSCSI Rx queues */
2308 	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2309 		if (oq->rspq.desc)
2310 			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2311 	}
2312 	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2313 		if (oq->rspq.desc)
2314 			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2315 	}
2316 
2317 	/* clean up offload Tx queues */
2318 	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2319 		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2320 
2321 		if (q->q.desc) {
2322 			tasklet_kill(&q->qresume_tsk);
2323 			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2324 					q->q.cntxt_id);
2325 			free_tx_desc(adap, &q->q, q->q.in_use, false);
2326 			kfree(q->q.sdesc);
2327 			__skb_queue_purge(&q->sendq);
2328 			free_txq(adap, &q->q);
2329 		}
2330 	}
2331 
2332 	/* clean up control Tx queues */
2333 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2334 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2335 
2336 		if (cq->q.desc) {
2337 			tasklet_kill(&cq->qresume_tsk);
2338 			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2339 					cq->q.cntxt_id);
2340 			__skb_queue_purge(&cq->sendq);
2341 			free_txq(adap, &cq->q);
2342 		}
2343 	}
2344 
2345 	if (adap->sge.fw_evtq.desc)
2346 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2347 
2348 	if (adap->sge.intrq.desc)
2349 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2350 
2351 	/* clear the reverse egress queue map */
2352 	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2353 }
2354 
t4_sge_start(struct adapter * adap)2355 void t4_sge_start(struct adapter *adap)
2356 {
2357 	adap->sge.ethtxq_rover = 0;
2358 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2359 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2360 }
2361 
2362 /**
2363  *	t4_sge_stop - disable SGE operation
2364  *	@adap: the adapter
2365  *
2366  *	Stop tasklets and timers associated with the DMA engine.  Note that
2367  *	this is effective only if measures have been taken to disable any HW
2368  *	events that may restart them.
2369  */
t4_sge_stop(struct adapter * adap)2370 void t4_sge_stop(struct adapter *adap)
2371 {
2372 	int i;
2373 	struct sge *s = &adap->sge;
2374 
2375 	if (in_interrupt())  /* actions below require waiting */
2376 		return;
2377 
2378 	if (s->rx_timer.function)
2379 		del_timer_sync(&s->rx_timer);
2380 	if (s->tx_timer.function)
2381 		del_timer_sync(&s->tx_timer);
2382 
2383 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2384 		struct sge_ofld_txq *q = &s->ofldtxq[i];
2385 
2386 		if (q->q.desc)
2387 			tasklet_kill(&q->qresume_tsk);
2388 	}
2389 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2390 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2391 
2392 		if (cq->q.desc)
2393 			tasklet_kill(&cq->qresume_tsk);
2394 	}
2395 }
2396 
2397 /**
2398  *	t4_sge_init - initialize SGE
2399  *	@adap: the adapter
2400  *
2401  *	Performs SGE initialization needed every time after a chip reset.
2402  *	We do not initialize any of the queues here, instead the driver
2403  *	top-level must request them individually.
2404  */
t4_sge_init(struct adapter * adap)2405 void t4_sge_init(struct adapter *adap)
2406 {
2407 	unsigned int i, v;
2408 	struct sge *s = &adap->sge;
2409 	unsigned int fl_align_log = ilog2(FL_ALIGN);
2410 
2411 	t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2412 			 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2413 			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2414 			 RXPKTCPLMODE |
2415 			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2416 
2417 	for (i = v = 0; i < 32; i += 4)
2418 		v |= (PAGE_SHIFT - 10) << i;
2419 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
2420 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2421 #if FL_PG_ORDER > 0
2422 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2423 #endif
2424 	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2425 		     THRESHOLD_0(s->counter_val[0]) |
2426 		     THRESHOLD_1(s->counter_val[1]) |
2427 		     THRESHOLD_2(s->counter_val[2]) |
2428 		     THRESHOLD_3(s->counter_val[3]));
2429 	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2430 		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2431 		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2432 	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2433 		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2434 		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2435 	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2436 		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2437 		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2438 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2439 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2440 	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2441 	s->idma_state[0] = s->idma_state[1] = 0;
2442 	spin_lock_init(&s->intrq_lock);
2443 }
2444