1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12 
ionic_txq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
14 				  ionic_desc_cb cb_func, void *cb_arg)
15 {
16 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
17 }
18 
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
20 				  ionic_desc_cb cb_func, void *cb_arg)
21 {
22 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
23 }
24 
q_to_ndq(struct ionic_queue * q)25 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
26 {
27 	return netdev_get_tx_queue(q->lif->netdev, q->index);
28 }
29 
ionic_rx_page_alloc(struct ionic_queue * q,struct ionic_buf_info * buf_info)30 static int ionic_rx_page_alloc(struct ionic_queue *q,
31 			       struct ionic_buf_info *buf_info)
32 {
33 	struct net_device *netdev = q->lif->netdev;
34 	struct ionic_rx_stats *stats;
35 	struct device *dev;
36 	struct page *page;
37 
38 	dev = q->dev;
39 	stats = q_to_rx_stats(q);
40 
41 	if (unlikely(!buf_info)) {
42 		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
43 				    netdev->name, q->name);
44 		return -EINVAL;
45 	}
46 
47 	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
48 	if (unlikely(!page)) {
49 		net_err_ratelimited("%s: %s page alloc failed\n",
50 				    netdev->name, q->name);
51 		stats->alloc_err++;
52 		return -ENOMEM;
53 	}
54 
55 	buf_info->dma_addr = dma_map_page(dev, page, 0,
56 					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
57 	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
58 		__free_pages(page, 0);
59 		net_err_ratelimited("%s: %s dma map failed\n",
60 				    netdev->name, q->name);
61 		stats->dma_map_err++;
62 		return -EIO;
63 	}
64 
65 	buf_info->page = page;
66 	buf_info->page_offset = 0;
67 
68 	return 0;
69 }
70 
ionic_rx_page_free(struct ionic_queue * q,struct ionic_buf_info * buf_info)71 static void ionic_rx_page_free(struct ionic_queue *q,
72 			       struct ionic_buf_info *buf_info)
73 {
74 	struct net_device *netdev = q->lif->netdev;
75 	struct device *dev = q->dev;
76 
77 	if (unlikely(!buf_info)) {
78 		net_err_ratelimited("%s: %s invalid buf_info in free\n",
79 				    netdev->name, q->name);
80 		return;
81 	}
82 
83 	if (!buf_info->page)
84 		return;
85 
86 	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
87 	__free_pages(buf_info->page, 0);
88 	buf_info->page = NULL;
89 }
90 
ionic_rx_buf_recycle(struct ionic_queue * q,struct ionic_buf_info * buf_info,u32 used)91 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
92 				 struct ionic_buf_info *buf_info, u32 used)
93 {
94 	u32 size;
95 
96 	/* don't re-use pages allocated in low-mem condition */
97 	if (page_is_pfmemalloc(buf_info->page))
98 		return false;
99 
100 	/* don't re-use buffers from non-local numa nodes */
101 	if (page_to_nid(buf_info->page) != numa_mem_id())
102 		return false;
103 
104 	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
105 	buf_info->page_offset += size;
106 	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
107 		return false;
108 
109 	get_page(buf_info->page);
110 
111 	return true;
112 }
113 
ionic_rx_frags(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)114 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
115 				      struct ionic_desc_info *desc_info,
116 				      struct ionic_rxq_comp *comp)
117 {
118 	struct net_device *netdev = q->lif->netdev;
119 	struct ionic_buf_info *buf_info;
120 	struct ionic_rx_stats *stats;
121 	struct device *dev = q->dev;
122 	struct sk_buff *skb;
123 	unsigned int i;
124 	u16 frag_len;
125 	u16 len;
126 
127 	stats = q_to_rx_stats(q);
128 
129 	buf_info = &desc_info->bufs[0];
130 	len = le16_to_cpu(comp->len);
131 
132 	prefetchw(buf_info->page);
133 
134 	skb = napi_get_frags(&q_to_qcq(q)->napi);
135 	if (unlikely(!skb)) {
136 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
137 				     netdev->name, q->name);
138 		stats->alloc_err++;
139 		return NULL;
140 	}
141 
142 	i = comp->num_sg_elems + 1;
143 	do {
144 		if (unlikely(!buf_info->page)) {
145 			dev_kfree_skb(skb);
146 			return NULL;
147 		}
148 
149 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
150 		len -= frag_len;
151 
152 		dma_sync_single_for_cpu(dev,
153 					buf_info->dma_addr + buf_info->page_offset,
154 					frag_len, DMA_FROM_DEVICE);
155 
156 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
157 				buf_info->page, buf_info->page_offset, frag_len,
158 				IONIC_PAGE_SIZE);
159 
160 		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
161 			dma_unmap_page(dev, buf_info->dma_addr,
162 				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
163 			buf_info->page = NULL;
164 		}
165 
166 		buf_info++;
167 
168 		i--;
169 	} while (i > 0);
170 
171 	return skb;
172 }
173 
ionic_rx_copybreak(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)174 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
175 					  struct ionic_desc_info *desc_info,
176 					  struct ionic_rxq_comp *comp)
177 {
178 	struct net_device *netdev = q->lif->netdev;
179 	struct ionic_buf_info *buf_info;
180 	struct ionic_rx_stats *stats;
181 	struct device *dev = q->dev;
182 	struct sk_buff *skb;
183 	u16 len;
184 
185 	stats = q_to_rx_stats(q);
186 
187 	buf_info = &desc_info->bufs[0];
188 	len = le16_to_cpu(comp->len);
189 
190 	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
191 	if (unlikely(!skb)) {
192 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
193 				     netdev->name, q->name);
194 		stats->alloc_err++;
195 		return NULL;
196 	}
197 
198 	if (unlikely(!buf_info->page)) {
199 		dev_kfree_skb(skb);
200 		return NULL;
201 	}
202 
203 	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
204 				len, DMA_FROM_DEVICE);
205 	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
206 	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
207 				   len, DMA_FROM_DEVICE);
208 
209 	skb_put(skb, len);
210 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
211 
212 	return skb;
213 }
214 
ionic_rx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)215 static void ionic_rx_clean(struct ionic_queue *q,
216 			   struct ionic_desc_info *desc_info,
217 			   struct ionic_cq_info *cq_info,
218 			   void *cb_arg)
219 {
220 	struct net_device *netdev = q->lif->netdev;
221 	struct ionic_qcq *qcq = q_to_qcq(q);
222 	struct ionic_rx_stats *stats;
223 	struct ionic_rxq_comp *comp;
224 	struct sk_buff *skb;
225 
226 	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
227 
228 	stats = q_to_rx_stats(q);
229 
230 	if (comp->status) {
231 		stats->dropped++;
232 		return;
233 	}
234 
235 	stats->pkts++;
236 	stats->bytes += le16_to_cpu(comp->len);
237 
238 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
239 		skb = ionic_rx_copybreak(q, desc_info, comp);
240 	else
241 		skb = ionic_rx_frags(q, desc_info, comp);
242 
243 	if (unlikely(!skb)) {
244 		stats->dropped++;
245 		return;
246 	}
247 
248 	skb_record_rx_queue(skb, q->index);
249 
250 	if (likely(netdev->features & NETIF_F_RXHASH)) {
251 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
252 		case IONIC_PKT_TYPE_IPV4:
253 		case IONIC_PKT_TYPE_IPV6:
254 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
255 				     PKT_HASH_TYPE_L3);
256 			break;
257 		case IONIC_PKT_TYPE_IPV4_TCP:
258 		case IONIC_PKT_TYPE_IPV6_TCP:
259 		case IONIC_PKT_TYPE_IPV4_UDP:
260 		case IONIC_PKT_TYPE_IPV6_UDP:
261 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
262 				     PKT_HASH_TYPE_L4);
263 			break;
264 		}
265 	}
266 
267 	if (likely(netdev->features & NETIF_F_RXCSUM) &&
268 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
269 		skb->ip_summed = CHECKSUM_COMPLETE;
270 		skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
271 		stats->csum_complete++;
272 	} else {
273 		stats->csum_none++;
274 	}
275 
276 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
277 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
278 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
279 		stats->csum_error++;
280 
281 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
282 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
283 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
284 				       le16_to_cpu(comp->vlan_tci));
285 		stats->vlan_stripped++;
286 	}
287 
288 	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
289 		__le64 *cq_desc_hwstamp;
290 		u64 hwstamp;
291 
292 		cq_desc_hwstamp =
293 			cq_info->cq_desc +
294 			qcq->cq.desc_size -
295 			sizeof(struct ionic_rxq_comp) -
296 			IONIC_HWSTAMP_CQ_NEGOFFSET;
297 
298 		hwstamp = le64_to_cpu(*cq_desc_hwstamp);
299 
300 		if (hwstamp != IONIC_HWSTAMP_INVALID) {
301 			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
302 			stats->hwstamp_valid++;
303 		} else {
304 			stats->hwstamp_invalid++;
305 		}
306 	}
307 
308 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
309 		napi_gro_receive(&qcq->napi, skb);
310 	else
311 		napi_gro_frags(&qcq->napi);
312 }
313 
ionic_rx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)314 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
315 {
316 	struct ionic_queue *q = cq->bound_q;
317 	struct ionic_desc_info *desc_info;
318 	struct ionic_rxq_comp *comp;
319 
320 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
321 
322 	if (!color_match(comp->pkt_type_color, cq->done_color))
323 		return false;
324 
325 	/* check for empty queue */
326 	if (q->tail_idx == q->head_idx)
327 		return false;
328 
329 	if (q->tail_idx != le16_to_cpu(comp->comp_index))
330 		return false;
331 
332 	desc_info = &q->info[q->tail_idx];
333 	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
334 
335 	/* clean the related q entry, only one per qc completion */
336 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
337 
338 	desc_info->cb = NULL;
339 	desc_info->cb_arg = NULL;
340 
341 	return true;
342 }
343 
ionic_rx_fill(struct ionic_queue * q)344 void ionic_rx_fill(struct ionic_queue *q)
345 {
346 	struct net_device *netdev = q->lif->netdev;
347 	struct ionic_desc_info *desc_info;
348 	struct ionic_rxq_sg_desc *sg_desc;
349 	struct ionic_rxq_sg_elem *sg_elem;
350 	struct ionic_buf_info *buf_info;
351 	struct ionic_rxq_desc *desc;
352 	unsigned int remain_len;
353 	unsigned int frag_len;
354 	unsigned int nfrags;
355 	unsigned int i, j;
356 	unsigned int len;
357 
358 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
359 
360 	for (i = ionic_q_space_avail(q); i; i--) {
361 		nfrags = 0;
362 		remain_len = len;
363 		desc_info = &q->info[q->head_idx];
364 		desc = desc_info->desc;
365 		buf_info = &desc_info->bufs[0];
366 
367 		if (!buf_info->page) { /* alloc a new buffer? */
368 			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
369 				desc->addr = 0;
370 				desc->len = 0;
371 				return;
372 			}
373 		}
374 
375 		/* fill main descriptor - buf[0] */
376 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
377 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
378 		desc->len = cpu_to_le16(frag_len);
379 		remain_len -= frag_len;
380 		buf_info++;
381 		nfrags++;
382 
383 		/* fill sg descriptors - buf[1..n] */
384 		sg_desc = desc_info->sg_desc;
385 		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
386 			sg_elem = &sg_desc->elems[j];
387 			if (!buf_info->page) { /* alloc a new sg buffer? */
388 				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
389 					sg_elem->addr = 0;
390 					sg_elem->len = 0;
391 					return;
392 				}
393 			}
394 
395 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
396 			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
397 			sg_elem->len = cpu_to_le16(frag_len);
398 			remain_len -= frag_len;
399 			buf_info++;
400 			nfrags++;
401 		}
402 
403 		/* clear end sg element as a sentinel */
404 		if (j < q->max_sg_elems) {
405 			sg_elem = &sg_desc->elems[j];
406 			memset(sg_elem, 0, sizeof(*sg_elem));
407 		}
408 
409 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
410 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
411 		desc_info->nbufs = nfrags;
412 
413 		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
414 	}
415 
416 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
417 			 q->dbval | q->head_idx);
418 }
419 
ionic_rx_empty(struct ionic_queue * q)420 void ionic_rx_empty(struct ionic_queue *q)
421 {
422 	struct ionic_desc_info *desc_info;
423 	struct ionic_buf_info *buf_info;
424 	unsigned int i, j;
425 
426 	for (i = 0; i < q->num_descs; i++) {
427 		desc_info = &q->info[i];
428 		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
429 			buf_info = &desc_info->bufs[j];
430 			if (buf_info->page)
431 				ionic_rx_page_free(q, buf_info);
432 		}
433 
434 		desc_info->nbufs = 0;
435 		desc_info->cb = NULL;
436 		desc_info->cb_arg = NULL;
437 	}
438 
439 	q->head_idx = 0;
440 	q->tail_idx = 0;
441 }
442 
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)443 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
444 {
445 	struct dim_sample dim_sample;
446 	struct ionic_lif *lif;
447 	unsigned int qi;
448 	u64 pkts, bytes;
449 
450 	if (!qcq->intr.dim_coal_hw)
451 		return;
452 
453 	lif = qcq->q.lif;
454 	qi = qcq->cq.bound_q->index;
455 
456 	switch (napi_mode) {
457 	case IONIC_LIF_F_TX_DIM_INTR:
458 		pkts = lif->txqstats[qi].pkts;
459 		bytes = lif->txqstats[qi].bytes;
460 		break;
461 	case IONIC_LIF_F_RX_DIM_INTR:
462 		pkts = lif->rxqstats[qi].pkts;
463 		bytes = lif->rxqstats[qi].bytes;
464 		break;
465 	default:
466 		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
467 		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
468 		break;
469 	}
470 
471 	dim_update_sample(qcq->cq.bound_intr->rearm_count,
472 			  pkts, bytes, &dim_sample);
473 
474 	net_dim(&qcq->dim, dim_sample);
475 }
476 
ionic_tx_napi(struct napi_struct * napi,int budget)477 int ionic_tx_napi(struct napi_struct *napi, int budget)
478 {
479 	struct ionic_qcq *qcq = napi_to_qcq(napi);
480 	struct ionic_cq *cq = napi_to_cq(napi);
481 	struct ionic_dev *idev;
482 	struct ionic_lif *lif;
483 	u32 work_done = 0;
484 	u32 flags = 0;
485 
486 	lif = cq->bound_q->lif;
487 	idev = &lif->ionic->idev;
488 
489 	work_done = ionic_cq_service(cq, budget,
490 				     ionic_tx_service, NULL, NULL);
491 
492 	if (work_done < budget && napi_complete_done(napi, work_done)) {
493 		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
494 		flags |= IONIC_INTR_CRED_UNMASK;
495 		cq->bound_intr->rearm_count++;
496 	}
497 
498 	if (work_done || flags) {
499 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
500 		ionic_intr_credits(idev->intr_ctrl,
501 				   cq->bound_intr->index,
502 				   work_done, flags);
503 	}
504 
505 	return work_done;
506 }
507 
ionic_rx_napi(struct napi_struct * napi,int budget)508 int ionic_rx_napi(struct napi_struct *napi, int budget)
509 {
510 	struct ionic_qcq *qcq = napi_to_qcq(napi);
511 	struct ionic_cq *cq = napi_to_cq(napi);
512 	struct ionic_dev *idev;
513 	struct ionic_lif *lif;
514 	u16 rx_fill_threshold;
515 	u32 work_done = 0;
516 	u32 flags = 0;
517 
518 	lif = cq->bound_q->lif;
519 	idev = &lif->ionic->idev;
520 
521 	work_done = ionic_cq_service(cq, budget,
522 				     ionic_rx_service, NULL, NULL);
523 
524 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
525 				  cq->num_descs / IONIC_RX_FILL_DIV);
526 	if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
527 		ionic_rx_fill(cq->bound_q);
528 
529 	if (work_done < budget && napi_complete_done(napi, work_done)) {
530 		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
531 		flags |= IONIC_INTR_CRED_UNMASK;
532 		cq->bound_intr->rearm_count++;
533 	}
534 
535 	if (work_done || flags) {
536 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
537 		ionic_intr_credits(idev->intr_ctrl,
538 				   cq->bound_intr->index,
539 				   work_done, flags);
540 	}
541 
542 	return work_done;
543 }
544 
ionic_txrx_napi(struct napi_struct * napi,int budget)545 int ionic_txrx_napi(struct napi_struct *napi, int budget)
546 {
547 	struct ionic_qcq *qcq = napi_to_qcq(napi);
548 	struct ionic_cq *rxcq = napi_to_cq(napi);
549 	unsigned int qi = rxcq->bound_q->index;
550 	struct ionic_dev *idev;
551 	struct ionic_lif *lif;
552 	struct ionic_cq *txcq;
553 	u16 rx_fill_threshold;
554 	u32 rx_work_done = 0;
555 	u32 tx_work_done = 0;
556 	u32 flags = 0;
557 
558 	lif = rxcq->bound_q->lif;
559 	idev = &lif->ionic->idev;
560 	txcq = &lif->txqcqs[qi]->cq;
561 
562 	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
563 					ionic_tx_service, NULL, NULL);
564 
565 	rx_work_done = ionic_cq_service(rxcq, budget,
566 					ionic_rx_service, NULL, NULL);
567 
568 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
569 				  rxcq->num_descs / IONIC_RX_FILL_DIV);
570 	if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
571 		ionic_rx_fill(rxcq->bound_q);
572 
573 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
574 		ionic_dim_update(qcq, 0);
575 		flags |= IONIC_INTR_CRED_UNMASK;
576 		rxcq->bound_intr->rearm_count++;
577 	}
578 
579 	if (rx_work_done || flags) {
580 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
581 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
582 				   tx_work_done + rx_work_done, flags);
583 	}
584 
585 	return rx_work_done;
586 }
587 
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)588 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
589 				      void *data, size_t len)
590 {
591 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
592 	struct device *dev = q->dev;
593 	dma_addr_t dma_addr;
594 
595 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
596 	if (dma_mapping_error(dev, dma_addr)) {
597 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
598 				     q->lif->netdev->name, q->name);
599 		stats->dma_map_err++;
600 		return 0;
601 	}
602 	return dma_addr;
603 }
604 
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)605 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
606 				    const skb_frag_t *frag,
607 				    size_t offset, size_t len)
608 {
609 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
610 	struct device *dev = q->dev;
611 	dma_addr_t dma_addr;
612 
613 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
614 	if (dma_mapping_error(dev, dma_addr)) {
615 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
616 				     q->lif->netdev->name, q->name);
617 		stats->dma_map_err++;
618 	}
619 	return dma_addr;
620 }
621 
ionic_tx_map_skb(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)622 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
623 			    struct ionic_desc_info *desc_info)
624 {
625 	struct ionic_buf_info *buf_info = desc_info->bufs;
626 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
627 	struct device *dev = q->dev;
628 	dma_addr_t dma_addr;
629 	unsigned int nfrags;
630 	skb_frag_t *frag;
631 	int frag_idx;
632 
633 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
634 	if (dma_mapping_error(dev, dma_addr)) {
635 		stats->dma_map_err++;
636 		return -EIO;
637 	}
638 	buf_info->dma_addr = dma_addr;
639 	buf_info->len = skb_headlen(skb);
640 	buf_info++;
641 
642 	frag = skb_shinfo(skb)->frags;
643 	nfrags = skb_shinfo(skb)->nr_frags;
644 	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
645 		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
646 		if (dma_mapping_error(dev, dma_addr)) {
647 			stats->dma_map_err++;
648 			goto dma_fail;
649 		}
650 		buf_info->dma_addr = dma_addr;
651 		buf_info->len = skb_frag_size(frag);
652 		buf_info++;
653 	}
654 
655 	desc_info->nbufs = 1 + nfrags;
656 
657 	return 0;
658 
659 dma_fail:
660 	/* unwind the frag mappings and the head mapping */
661 	while (frag_idx > 0) {
662 		frag_idx--;
663 		buf_info--;
664 		dma_unmap_page(dev, buf_info->dma_addr,
665 			       buf_info->len, DMA_TO_DEVICE);
666 	}
667 	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
668 	return -EIO;
669 }
670 
ionic_tx_desc_unmap_bufs(struct ionic_queue * q,struct ionic_desc_info * desc_info)671 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
672 				     struct ionic_desc_info *desc_info)
673 {
674 	struct ionic_buf_info *buf_info = desc_info->bufs;
675 	struct device *dev = q->dev;
676 	unsigned int i;
677 
678 	if (!desc_info->nbufs)
679 		return;
680 
681 	dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
682 			 buf_info->len, DMA_TO_DEVICE);
683 	buf_info++;
684 	for (i = 1; i < desc_info->nbufs; i++, buf_info++)
685 		dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
686 			       buf_info->len, DMA_TO_DEVICE);
687 
688 	desc_info->nbufs = 0;
689 }
690 
ionic_tx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)691 static void ionic_tx_clean(struct ionic_queue *q,
692 			   struct ionic_desc_info *desc_info,
693 			   struct ionic_cq_info *cq_info,
694 			   void *cb_arg)
695 {
696 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
697 	struct ionic_qcq *qcq = q_to_qcq(q);
698 	struct sk_buff *skb = cb_arg;
699 	u16 qi;
700 
701 	ionic_tx_desc_unmap_bufs(q, desc_info);
702 
703 	if (!skb)
704 		return;
705 
706 	qi = skb_get_queue_mapping(skb);
707 
708 	if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
709 		if (cq_info) {
710 			struct skb_shared_hwtstamps hwts = {};
711 			__le64 *cq_desc_hwstamp;
712 			u64 hwstamp;
713 
714 			cq_desc_hwstamp =
715 				cq_info->cq_desc +
716 				qcq->cq.desc_size -
717 				sizeof(struct ionic_txq_comp) -
718 				IONIC_HWSTAMP_CQ_NEGOFFSET;
719 
720 			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
721 
722 			if (hwstamp != IONIC_HWSTAMP_INVALID) {
723 				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
724 
725 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
726 				skb_tstamp_tx(skb, &hwts);
727 
728 				stats->hwstamp_valid++;
729 			} else {
730 				stats->hwstamp_invalid++;
731 			}
732 		}
733 
734 	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
735 		netif_wake_subqueue(q->lif->netdev, qi);
736 	}
737 
738 	desc_info->bytes = skb->len;
739 	stats->clean++;
740 
741 	dev_consume_skb_any(skb);
742 }
743 
ionic_tx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)744 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
745 {
746 	struct ionic_queue *q = cq->bound_q;
747 	struct ionic_desc_info *desc_info;
748 	struct ionic_txq_comp *comp;
749 	int bytes = 0;
750 	int pkts = 0;
751 	u16 index;
752 
753 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
754 
755 	if (!color_match(comp->color, cq->done_color))
756 		return false;
757 
758 	/* clean the related q entries, there could be
759 	 * several q entries completed for each cq completion
760 	 */
761 	do {
762 		desc_info = &q->info[q->tail_idx];
763 		desc_info->bytes = 0;
764 		index = q->tail_idx;
765 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
766 		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
767 		if (desc_info->cb_arg) {
768 			pkts++;
769 			bytes += desc_info->bytes;
770 		}
771 		desc_info->cb = NULL;
772 		desc_info->cb_arg = NULL;
773 	} while (index != le16_to_cpu(comp->comp_index));
774 
775 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
776 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
777 
778 	return true;
779 }
780 
ionic_tx_flush(struct ionic_cq * cq)781 void ionic_tx_flush(struct ionic_cq *cq)
782 {
783 	struct ionic_dev *idev = &cq->lif->ionic->idev;
784 	u32 work_done;
785 
786 	work_done = ionic_cq_service(cq, cq->num_descs,
787 				     ionic_tx_service, NULL, NULL);
788 	if (work_done)
789 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
790 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
791 }
792 
ionic_tx_empty(struct ionic_queue * q)793 void ionic_tx_empty(struct ionic_queue *q)
794 {
795 	struct ionic_desc_info *desc_info;
796 	int bytes = 0;
797 	int pkts = 0;
798 
799 	/* walk the not completed tx entries, if any */
800 	while (q->head_idx != q->tail_idx) {
801 		desc_info = &q->info[q->tail_idx];
802 		desc_info->bytes = 0;
803 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
804 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
805 		if (desc_info->cb_arg) {
806 			pkts++;
807 			bytes += desc_info->bytes;
808 		}
809 		desc_info->cb = NULL;
810 		desc_info->cb_arg = NULL;
811 	}
812 
813 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
814 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
815 }
816 
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)817 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
818 {
819 	int err;
820 
821 	err = skb_cow_head(skb, 0);
822 	if (err)
823 		return err;
824 
825 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
826 		inner_ip_hdr(skb)->check = 0;
827 		inner_tcp_hdr(skb)->check =
828 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
829 					   inner_ip_hdr(skb)->daddr,
830 					   0, IPPROTO_TCP, 0);
831 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
832 		inner_tcp_hdr(skb)->check =
833 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
834 					 &inner_ipv6_hdr(skb)->daddr,
835 					 0, IPPROTO_TCP, 0);
836 	}
837 
838 	return 0;
839 }
840 
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)841 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
842 {
843 	int err;
844 
845 	err = skb_cow_head(skb, 0);
846 	if (err)
847 		return err;
848 
849 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
850 		ip_hdr(skb)->check = 0;
851 		tcp_hdr(skb)->check =
852 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
853 					   ip_hdr(skb)->daddr,
854 					   0, IPPROTO_TCP, 0);
855 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
856 		tcp_v6_gso_csum_prep(skb);
857 	}
858 
859 	return 0;
860 }
861 
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_txq_desc * desc,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)862 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
863 			      struct sk_buff *skb,
864 			      dma_addr_t addr, u8 nsge, u16 len,
865 			      unsigned int hdrlen, unsigned int mss,
866 			      bool outer_csum,
867 			      u16 vlan_tci, bool has_vlan,
868 			      bool start, bool done)
869 {
870 	u8 flags = 0;
871 	u64 cmd;
872 
873 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
874 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
875 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
876 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
877 
878 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
879 	desc->cmd = cpu_to_le64(cmd);
880 	desc->len = cpu_to_le16(len);
881 	desc->vlan_tci = cpu_to_le16(vlan_tci);
882 	desc->hdr_len = cpu_to_le16(hdrlen);
883 	desc->mss = cpu_to_le16(mss);
884 
885 	if (start) {
886 		skb_tx_timestamp(skb);
887 		if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
888 			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
889 		ionic_txq_post(q, false, ionic_tx_clean, skb);
890 	} else {
891 		ionic_txq_post(q, done, NULL, NULL);
892 	}
893 }
894 
ionic_tx_tso(struct ionic_queue * q,struct sk_buff * skb)895 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
896 {
897 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
898 	struct ionic_desc_info *desc_info;
899 	struct ionic_buf_info *buf_info;
900 	struct ionic_txq_sg_elem *elem;
901 	struct ionic_txq_desc *desc;
902 	unsigned int chunk_len;
903 	unsigned int frag_rem;
904 	unsigned int tso_rem;
905 	unsigned int seg_rem;
906 	dma_addr_t desc_addr;
907 	dma_addr_t frag_addr;
908 	unsigned int hdrlen;
909 	unsigned int len;
910 	unsigned int mss;
911 	bool start, done;
912 	bool outer_csum;
913 	bool has_vlan;
914 	u16 desc_len;
915 	u8 desc_nsge;
916 	u16 vlan_tci;
917 	bool encap;
918 	int err;
919 
920 	desc_info = &q->info[q->head_idx];
921 	buf_info = desc_info->bufs;
922 
923 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
924 		return -EIO;
925 
926 	len = skb->len;
927 	mss = skb_shinfo(skb)->gso_size;
928 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
929 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
930 	has_vlan = !!skb_vlan_tag_present(skb);
931 	vlan_tci = skb_vlan_tag_get(skb);
932 	encap = skb->encapsulation;
933 
934 	/* Preload inner-most TCP csum field with IP pseudo hdr
935 	 * calculated with IP length set to zero.  HW will later
936 	 * add in length to each TCP segment resulting from the TSO.
937 	 */
938 
939 	if (encap)
940 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
941 	else
942 		err = ionic_tx_tcp_pseudo_csum(skb);
943 	if (err) {
944 		/* clean up mapping from ionic_tx_map_skb */
945 		ionic_tx_desc_unmap_bufs(q, desc_info);
946 		return err;
947 	}
948 
949 	if (encap)
950 		hdrlen = skb_inner_tcp_all_headers(skb);
951 	else
952 		hdrlen = skb_tcp_all_headers(skb);
953 
954 	tso_rem = len;
955 	seg_rem = min(tso_rem, hdrlen + mss);
956 
957 	frag_addr = 0;
958 	frag_rem = 0;
959 
960 	start = true;
961 
962 	while (tso_rem > 0) {
963 		desc = NULL;
964 		elem = NULL;
965 		desc_addr = 0;
966 		desc_len = 0;
967 		desc_nsge = 0;
968 		/* use fragments until we have enough to post a single descriptor */
969 		while (seg_rem > 0) {
970 			/* if the fragment is exhausted then move to the next one */
971 			if (frag_rem == 0) {
972 				/* grab the next fragment */
973 				frag_addr = buf_info->dma_addr;
974 				frag_rem = buf_info->len;
975 				buf_info++;
976 			}
977 			chunk_len = min(frag_rem, seg_rem);
978 			if (!desc) {
979 				/* fill main descriptor */
980 				desc = desc_info->txq_desc;
981 				elem = desc_info->txq_sg_desc->elems;
982 				desc_addr = frag_addr;
983 				desc_len = chunk_len;
984 			} else {
985 				/* fill sg descriptor */
986 				elem->addr = cpu_to_le64(frag_addr);
987 				elem->len = cpu_to_le16(chunk_len);
988 				elem++;
989 				desc_nsge++;
990 			}
991 			frag_addr += chunk_len;
992 			frag_rem -= chunk_len;
993 			tso_rem -= chunk_len;
994 			seg_rem -= chunk_len;
995 		}
996 		seg_rem = min(tso_rem, mss);
997 		done = (tso_rem == 0);
998 		/* post descriptor */
999 		ionic_tx_tso_post(q, desc, skb,
1000 				  desc_addr, desc_nsge, desc_len,
1001 				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1002 				  start, done);
1003 		start = false;
1004 		/* Buffer information is stored with the first tso descriptor */
1005 		desc_info = &q->info[q->head_idx];
1006 		desc_info->nbufs = 0;
1007 	}
1008 
1009 	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1010 	stats->bytes += len;
1011 	stats->tso++;
1012 	stats->tso_bytes = len;
1013 
1014 	return 0;
1015 }
1016 
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1017 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1018 			       struct ionic_desc_info *desc_info)
1019 {
1020 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1021 	struct ionic_buf_info *buf_info = desc_info->bufs;
1022 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1023 	bool has_vlan;
1024 	u8 flags = 0;
1025 	bool encap;
1026 	u64 cmd;
1027 
1028 	has_vlan = !!skb_vlan_tag_present(skb);
1029 	encap = skb->encapsulation;
1030 
1031 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1032 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1033 
1034 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1035 				  flags, skb_shinfo(skb)->nr_frags,
1036 				  buf_info->dma_addr);
1037 	desc->cmd = cpu_to_le64(cmd);
1038 	desc->len = cpu_to_le16(buf_info->len);
1039 	if (has_vlan) {
1040 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1041 		stats->vlan_inserted++;
1042 	} else {
1043 		desc->vlan_tci = 0;
1044 	}
1045 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1046 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
1047 
1048 	if (skb_csum_is_sctp(skb))
1049 		stats->crc32_csum++;
1050 	else
1051 		stats->csum++;
1052 }
1053 
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1054 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1055 				  struct ionic_desc_info *desc_info)
1056 {
1057 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1058 	struct ionic_buf_info *buf_info = desc_info->bufs;
1059 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1060 	bool has_vlan;
1061 	u8 flags = 0;
1062 	bool encap;
1063 	u64 cmd;
1064 
1065 	has_vlan = !!skb_vlan_tag_present(skb);
1066 	encap = skb->encapsulation;
1067 
1068 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1069 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1070 
1071 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1072 				  flags, skb_shinfo(skb)->nr_frags,
1073 				  buf_info->dma_addr);
1074 	desc->cmd = cpu_to_le64(cmd);
1075 	desc->len = cpu_to_le16(buf_info->len);
1076 	if (has_vlan) {
1077 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1078 		stats->vlan_inserted++;
1079 	} else {
1080 		desc->vlan_tci = 0;
1081 	}
1082 	desc->csum_start = 0;
1083 	desc->csum_offset = 0;
1084 
1085 	stats->csum_none++;
1086 }
1087 
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1088 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1089 			       struct ionic_desc_info *desc_info)
1090 {
1091 	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1092 	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1093 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1094 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1095 	unsigned int i;
1096 
1097 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1098 		elem->addr = cpu_to_le64(buf_info->dma_addr);
1099 		elem->len = cpu_to_le16(buf_info->len);
1100 	}
1101 
1102 	stats->frags += skb_shinfo(skb)->nr_frags;
1103 }
1104 
ionic_tx(struct ionic_queue * q,struct sk_buff * skb)1105 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1106 {
1107 	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1108 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1109 
1110 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1111 		return -EIO;
1112 
1113 	/* set up the initial descriptor */
1114 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1115 		ionic_tx_calc_csum(q, skb, desc_info);
1116 	else
1117 		ionic_tx_calc_no_csum(q, skb, desc_info);
1118 
1119 	/* add frags */
1120 	ionic_tx_skb_frags(q, skb, desc_info);
1121 
1122 	skb_tx_timestamp(skb);
1123 	stats->pkts++;
1124 	stats->bytes += skb->len;
1125 
1126 	if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1127 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1128 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1129 
1130 	return 0;
1131 }
1132 
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1133 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1134 {
1135 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1136 	int ndescs;
1137 	int err;
1138 
1139 	/* Each desc is mss long max, so a descriptor for each gso_seg */
1140 	if (skb_is_gso(skb))
1141 		ndescs = skb_shinfo(skb)->gso_segs;
1142 	else
1143 		ndescs = 1;
1144 
1145 	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1146 	if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1147 		return ndescs;
1148 
1149 	/* Too many frags, so linearize */
1150 	err = skb_linearize(skb);
1151 	if (err)
1152 		return err;
1153 
1154 	stats->linearize++;
1155 
1156 	return ndescs;
1157 }
1158 
ionic_maybe_stop_tx(struct ionic_queue * q,int ndescs)1159 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1160 {
1161 	int stopped = 0;
1162 
1163 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1164 		netif_stop_subqueue(q->lif->netdev, q->index);
1165 		stopped = 1;
1166 
1167 		/* Might race with ionic_tx_clean, check again */
1168 		smp_rmb();
1169 		if (ionic_q_has_space(q, ndescs)) {
1170 			netif_wake_subqueue(q->lif->netdev, q->index);
1171 			stopped = 0;
1172 		}
1173 	}
1174 
1175 	return stopped;
1176 }
1177 
ionic_start_hwstamp_xmit(struct sk_buff * skb,struct net_device * netdev)1178 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1179 					    struct net_device *netdev)
1180 {
1181 	struct ionic_lif *lif = netdev_priv(netdev);
1182 	struct ionic_queue *q = &lif->hwstamp_txq->q;
1183 	int err, ndescs;
1184 
1185 	/* Does not stop/start txq, because we post to a separate tx queue
1186 	 * for timestamping, and if a packet can't be posted immediately to
1187 	 * the timestamping queue, it is dropped.
1188 	 */
1189 
1190 	ndescs = ionic_tx_descs_needed(q, skb);
1191 	if (unlikely(ndescs < 0))
1192 		goto err_out_drop;
1193 
1194 	if (unlikely(!ionic_q_has_space(q, ndescs)))
1195 		goto err_out_drop;
1196 
1197 	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1198 	if (skb_is_gso(skb))
1199 		err = ionic_tx_tso(q, skb);
1200 	else
1201 		err = ionic_tx(q, skb);
1202 
1203 	if (err)
1204 		goto err_out_drop;
1205 
1206 	return NETDEV_TX_OK;
1207 
1208 err_out_drop:
1209 	q->drop++;
1210 	dev_kfree_skb(skb);
1211 	return NETDEV_TX_OK;
1212 }
1213 
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1214 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1215 {
1216 	u16 queue_index = skb_get_queue_mapping(skb);
1217 	struct ionic_lif *lif = netdev_priv(netdev);
1218 	struct ionic_queue *q;
1219 	int ndescs;
1220 	int err;
1221 
1222 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1223 		dev_kfree_skb(skb);
1224 		return NETDEV_TX_OK;
1225 	}
1226 
1227 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1228 		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1229 			return ionic_start_hwstamp_xmit(skb, netdev);
1230 
1231 	if (unlikely(queue_index >= lif->nxqs))
1232 		queue_index = 0;
1233 	q = &lif->txqcqs[queue_index]->q;
1234 
1235 	ndescs = ionic_tx_descs_needed(q, skb);
1236 	if (ndescs < 0)
1237 		goto err_out_drop;
1238 
1239 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1240 		return NETDEV_TX_BUSY;
1241 
1242 	if (skb_is_gso(skb))
1243 		err = ionic_tx_tso(q, skb);
1244 	else
1245 		err = ionic_tx(q, skb);
1246 
1247 	if (err)
1248 		goto err_out_drop;
1249 
1250 	/* Stop the queue if there aren't descriptors for the next packet.
1251 	 * Since our SG lists per descriptor take care of most of the possible
1252 	 * fragmentation, we don't need to have many descriptors available.
1253 	 */
1254 	ionic_maybe_stop_tx(q, 4);
1255 
1256 	return NETDEV_TX_OK;
1257 
1258 err_out_drop:
1259 	q->drop++;
1260 	dev_kfree_skb(skb);
1261 	return NETDEV_TX_OK;
1262 }
1263