1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include <linux/prefetch.h>
5
6 #include "iavf.h"
7 #include "iavf_trace.h"
8 #include "iavf_prototype.h"
9
build_ctob(u32 td_cmd,u32 td_offset,unsigned int size,u32 td_tag)10 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12 {
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18 }
19
20 #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22 /**
23 * iavf_unmap_and_free_tx_resource - Release a Tx buffer
24 * @ring: the ring that owns the buffer
25 * @tx_buffer: the buffer to free
26 **/
iavf_unmap_and_free_tx_resource(struct iavf_ring * ring,struct iavf_tx_buffer * tx_buffer)27 static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29 {
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50 /* tx_buffer must be completely set up in the transmit path */
51 }
52
53 /**
54 * iavf_clean_tx_ring - Free any empty Tx buffers
55 * @tx_ring: ring to be cleaned
56 **/
iavf_clean_tx_ring(struct iavf_ring * tx_ring)57 void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58 {
59 unsigned long bi_size;
60 u16 i;
61
62 /* ring already cleared, nothing to do */
63 if (!tx_ring->tx_bi)
64 return;
65
66 /* Free all the Tx ring sk_buffs */
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73 /* Zero out the descriptor ring */
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82 /* cleanup Tx queue statistics */
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84 }
85
86 /**
87 * iavf_free_tx_resources - Free Tx resources per queue
88 * @tx_ring: Tx descriptor ring for a specific queue
89 *
90 * Free all transmit software resources
91 **/
iavf_free_tx_resources(struct iavf_ring * tx_ring)92 void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93 {
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103 }
104
105 /**
106 * iavf_get_tx_pending - how many Tx descriptors not processed
107 * @ring: the ring of descriptors
108 * @in_sw: is tx_pending being checked in SW or HW
109 *
110 * Since there is no access to the ring head register
111 * in XL710, we need to use our local copies
112 **/
iavf_get_tx_pending(struct iavf_ring * ring,bool in_sw)113 u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114 {
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125 }
126
127 /**
128 * iavf_detect_recover_hung - Function to detect and recover hung_queues
129 * @vsi: pointer to vsi struct with tx queues
130 *
131 * VSI has netdev and netdev has TX queues. This function is to check each of
132 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
133 **/
iavf_detect_recover_hung(struct iavf_vsi * vsi)134 void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135 {
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157 /* If packet counter has not changed the queue is
158 * likely stalled, so force an interrupt for this
159 * queue.
160 *
161 * prev_pkt_ctr would be negative if there was no
162 * pending work.
163 */
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170 /* Memory barrier between read of packet count and call
171 * to iavf_get_tx_pending()
172 */
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178 }
179
180 #define WB_STRIDE 4
181
182 /**
183 * iavf_clean_tx_irq - Reclaim resources after transmit completes
184 * @vsi: the VSI we care about
185 * @tx_ring: Tx ring to clean
186 * @napi_budget: Used to determine if we are in netpoll
187 *
188 * Returns true if there's any budget left (e.g. the clean is finished)
189 **/
iavf_clean_tx_irq(struct iavf_vsi * vsi,struct iavf_ring * tx_ring,int napi_budget)190 static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192 {
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206 /* if next_to_watch is not set then there is no work pending */
207 if (!eop_desc)
208 break;
209
210 /* prevent any other reads prior to eop_desc */
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214 /* if the descriptor isn't done, no work yet to do */
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219 /* clear next_to_watch to prevent false hangs */
220 tx_buf->next_to_watch = NULL;
221
222 /* update the statistics for this packet */
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226 /* free the skb */
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229 /* unmap skb header data */
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235 /* clear tx_buffer data */
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239 /* unmap remaining buffers */
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253 /* unmap any remaining paged data */
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263 /* move us one more past the eop_desc for start of next pkt */
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275 /* update budget accounting */
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289 /* check to see if there are < 4 descriptors
290 * waiting to be written back, then kick the hardware to force
291 * them to be written back in case we stay in NAPI.
292 * In this mode on X722 we do not enable Interrupt.
293 */
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303 /* notify netdev of completed buffers */
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310 /* Make sure that anybody stopping the queue after this
311 * sees the new next_to_clean.
312 */
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324 }
325
326 /**
327 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
328 * @vsi: the VSI we care about
329 * @q_vector: the vector on which to enable writeback
330 *
331 **/
iavf_enable_wb_on_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)332 static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334 {
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350 }
351
352 /**
353 * iavf_force_wb - Issue SW Interrupt so HW does a wb
354 * @vsi: the VSI we care about
355 * @q_vector: the vector on which to force writeback
356 *
357 **/
iavf_force_wb(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)358 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359 {
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 /* allow 00 to be written to the index */;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369 }
370
iavf_container_is_rx(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)371 static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373 {
374 return &q_vector->rx == rc;
375 }
376
377 #define IAVF_AIM_MULTIPLIER_100G 2560
378 #define IAVF_AIM_MULTIPLIER_50G 1280
379 #define IAVF_AIM_MULTIPLIER_40G 1024
380 #define IAVF_AIM_MULTIPLIER_20G 512
381 #define IAVF_AIM_MULTIPLIER_10G 256
382 #define IAVF_AIM_MULTIPLIER_1G 32
383
iavf_mbps_itr_multiplier(u32 speed_mbps)384 static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
385 {
386 switch (speed_mbps) {
387 case SPEED_100000:
388 return IAVF_AIM_MULTIPLIER_100G;
389 case SPEED_50000:
390 return IAVF_AIM_MULTIPLIER_50G;
391 case SPEED_40000:
392 return IAVF_AIM_MULTIPLIER_40G;
393 case SPEED_25000:
394 case SPEED_20000:
395 return IAVF_AIM_MULTIPLIER_20G;
396 case SPEED_10000:
397 default:
398 return IAVF_AIM_MULTIPLIER_10G;
399 case SPEED_1000:
400 case SPEED_100:
401 return IAVF_AIM_MULTIPLIER_1G;
402 }
403 }
404
405 static unsigned int
iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)406 iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
407 {
408 switch (speed_virtchnl) {
409 case VIRTCHNL_LINK_SPEED_40GB:
410 return IAVF_AIM_MULTIPLIER_40G;
411 case VIRTCHNL_LINK_SPEED_25GB:
412 case VIRTCHNL_LINK_SPEED_20GB:
413 return IAVF_AIM_MULTIPLIER_20G;
414 case VIRTCHNL_LINK_SPEED_10GB:
415 default:
416 return IAVF_AIM_MULTIPLIER_10G;
417 case VIRTCHNL_LINK_SPEED_1GB:
418 case VIRTCHNL_LINK_SPEED_100MB:
419 return IAVF_AIM_MULTIPLIER_1G;
420 }
421 }
422
iavf_itr_divisor(struct iavf_adapter * adapter)423 static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
424 {
425 if (ADV_LINK_SUPPORT(adapter))
426 return IAVF_ITR_ADAPTIVE_MIN_INC *
427 iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
428 else
429 return IAVF_ITR_ADAPTIVE_MIN_INC *
430 iavf_virtchnl_itr_multiplier(adapter->link_speed);
431 }
432
433 /**
434 * iavf_update_itr - update the dynamic ITR value based on statistics
435 * @q_vector: structure containing interrupt and ring information
436 * @rc: structure containing ring performance data
437 *
438 * Stores a new ITR value based on packets and byte
439 * counts during the last interrupt. The advantage of per interrupt
440 * computation is faster updates and more accurate ITR for the current
441 * traffic pattern. Constants in this function were computed
442 * based on theoretical maximum wire speed and thresholds were set based
443 * on testing data as well as attempting to minimize response time
444 * while increasing bulk throughput.
445 **/
iavf_update_itr(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)446 static void iavf_update_itr(struct iavf_q_vector *q_vector,
447 struct iavf_ring_container *rc)
448 {
449 unsigned int avg_wire_size, packets, bytes, itr;
450 unsigned long next_update = jiffies;
451
452 /* If we don't have any rings just leave ourselves set for maximum
453 * possible latency so we take ourselves out of the equation.
454 */
455 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
456 return;
457
458 /* For Rx we want to push the delay up and default to low latency.
459 * for Tx we want to pull the delay down and default to high latency.
460 */
461 itr = iavf_container_is_rx(q_vector, rc) ?
462 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
463 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
464
465 /* If we didn't update within up to 1 - 2 jiffies we can assume
466 * that either packets are coming in so slow there hasn't been
467 * any work, or that there is so much work that NAPI is dealing
468 * with interrupt moderation and we don't need to do anything.
469 */
470 if (time_after(next_update, rc->next_update))
471 goto clear_counts;
472
473 /* If itr_countdown is set it means we programmed an ITR within
474 * the last 4 interrupt cycles. This has a side effect of us
475 * potentially firing an early interrupt. In order to work around
476 * this we need to throw out any data received for a few
477 * interrupts following the update.
478 */
479 if (q_vector->itr_countdown) {
480 itr = rc->target_itr;
481 goto clear_counts;
482 }
483
484 packets = rc->total_packets;
485 bytes = rc->total_bytes;
486
487 if (iavf_container_is_rx(q_vector, rc)) {
488 /* If Rx there are 1 to 4 packets and bytes are less than
489 * 9000 assume insufficient data to use bulk rate limiting
490 * approach unless Tx is already in bulk rate limiting. We
491 * are likely latency driven.
492 */
493 if (packets && packets < 4 && bytes < 9000 &&
494 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
495 itr = IAVF_ITR_ADAPTIVE_LATENCY;
496 goto adjust_by_size;
497 }
498 } else if (packets < 4) {
499 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
500 * bulk mode and we are receiving 4 or fewer packets just
501 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
502 * that the Rx can relax.
503 */
504 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
505 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
506 IAVF_ITR_ADAPTIVE_MAX_USECS)
507 goto clear_counts;
508 } else if (packets > 32) {
509 /* If we have processed over 32 packets in a single interrupt
510 * for Tx assume we need to switch over to "bulk" mode.
511 */
512 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
513 }
514
515 /* We have no packets to actually measure against. This means
516 * either one of the other queues on this vector is active or
517 * we are a Tx queue doing TSO with too high of an interrupt rate.
518 *
519 * Between 4 and 56 we can assume that our current interrupt delay
520 * is only slightly too low. As such we should increase it by a small
521 * fixed amount.
522 */
523 if (packets < 56) {
524 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
525 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
526 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
527 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
528 }
529 goto clear_counts;
530 }
531
532 if (packets <= 256) {
533 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
534 itr &= IAVF_ITR_MASK;
535
536 /* Between 56 and 112 is our "goldilocks" zone where we are
537 * working out "just right". Just report that our current
538 * ITR is good for us.
539 */
540 if (packets <= 112)
541 goto clear_counts;
542
543 /* If packet count is 128 or greater we are likely looking
544 * at a slight overrun of the delay we want. Try halving
545 * our delay to see if that will cut the number of packets
546 * in half per interrupt.
547 */
548 itr /= 2;
549 itr &= IAVF_ITR_MASK;
550 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
551 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
552
553 goto clear_counts;
554 }
555
556 /* The paths below assume we are dealing with a bulk ITR since
557 * number of packets is greater than 256. We are just going to have
558 * to compute a value and try to bring the count under control,
559 * though for smaller packet sizes there isn't much we can do as
560 * NAPI polling will likely be kicking in sooner rather than later.
561 */
562 itr = IAVF_ITR_ADAPTIVE_BULK;
563
564 adjust_by_size:
565 /* If packet counts are 256 or greater we can assume we have a gross
566 * overestimation of what the rate should be. Instead of trying to fine
567 * tune it just use the formula below to try and dial in an exact value
568 * give the current packet size of the frame.
569 */
570 avg_wire_size = bytes / packets;
571
572 /* The following is a crude approximation of:
573 * wmem_default / (size + overhead) = desired_pkts_per_int
574 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
575 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
576 *
577 * Assuming wmem_default is 212992 and overhead is 640 bytes per
578 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
579 * formula down to
580 *
581 * (170 * (size + 24)) / (size + 640) = ITR
582 *
583 * We first do some math on the packet size and then finally bitshift
584 * by 8 after rounding up. We also have to account for PCIe link speed
585 * difference as ITR scales based on this.
586 */
587 if (avg_wire_size <= 60) {
588 /* Start at 250k ints/sec */
589 avg_wire_size = 4096;
590 } else if (avg_wire_size <= 380) {
591 /* 250K ints/sec to 60K ints/sec */
592 avg_wire_size *= 40;
593 avg_wire_size += 1696;
594 } else if (avg_wire_size <= 1084) {
595 /* 60K ints/sec to 36K ints/sec */
596 avg_wire_size *= 15;
597 avg_wire_size += 11452;
598 } else if (avg_wire_size <= 1980) {
599 /* 36K ints/sec to 30K ints/sec */
600 avg_wire_size *= 5;
601 avg_wire_size += 22420;
602 } else {
603 /* plateau at a limit of 30K ints/sec */
604 avg_wire_size = 32256;
605 }
606
607 /* If we are in low latency mode halve our delay which doubles the
608 * rate to somewhere between 100K to 16K ints/sec
609 */
610 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
611 avg_wire_size /= 2;
612
613 /* Resultant value is 256 times larger than it needs to be. This
614 * gives us room to adjust the value as needed to either increase
615 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
616 *
617 * Use addition as we have already recorded the new latency flag
618 * for the ITR value.
619 */
620 itr += DIV_ROUND_UP(avg_wire_size,
621 iavf_itr_divisor(q_vector->adapter)) *
622 IAVF_ITR_ADAPTIVE_MIN_INC;
623
624 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
625 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
626 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
627 }
628
629 clear_counts:
630 /* write back value */
631 rc->target_itr = itr;
632
633 /* next update should occur within next jiffy */
634 rc->next_update = next_update + 1;
635
636 rc->total_bytes = 0;
637 rc->total_packets = 0;
638 }
639
640 /**
641 * iavf_setup_tx_descriptors - Allocate the Tx descriptors
642 * @tx_ring: the tx ring to set up
643 *
644 * Return 0 on success, negative on error
645 **/
iavf_setup_tx_descriptors(struct iavf_ring * tx_ring)646 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
647 {
648 struct device *dev = tx_ring->dev;
649 int bi_size;
650
651 if (!dev)
652 return -ENOMEM;
653
654 /* warn if we are about to overwrite the pointer */
655 WARN_ON(tx_ring->tx_bi);
656 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
657 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
658 if (!tx_ring->tx_bi)
659 goto err;
660
661 /* round up to nearest 4K */
662 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
663 tx_ring->size = ALIGN(tx_ring->size, 4096);
664 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
665 &tx_ring->dma, GFP_KERNEL);
666 if (!tx_ring->desc) {
667 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
668 tx_ring->size);
669 goto err;
670 }
671
672 tx_ring->next_to_use = 0;
673 tx_ring->next_to_clean = 0;
674 tx_ring->tx_stats.prev_pkt_ctr = -1;
675 return 0;
676
677 err:
678 kfree(tx_ring->tx_bi);
679 tx_ring->tx_bi = NULL;
680 return -ENOMEM;
681 }
682
683 /**
684 * iavf_clean_rx_ring - Free Rx buffers
685 * @rx_ring: ring to be cleaned
686 **/
iavf_clean_rx_ring(struct iavf_ring * rx_ring)687 void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
688 {
689 unsigned long bi_size;
690 u16 i;
691
692 /* ring already cleared, nothing to do */
693 if (!rx_ring->rx_bi)
694 return;
695
696 if (rx_ring->skb) {
697 dev_kfree_skb(rx_ring->skb);
698 rx_ring->skb = NULL;
699 }
700
701 /* Free all the Rx ring sk_buffs */
702 for (i = 0; i < rx_ring->count; i++) {
703 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
704
705 if (!rx_bi->page)
706 continue;
707
708 /* Invalidate cache lines that may have been written to by
709 * device so that we avoid corrupting memory.
710 */
711 dma_sync_single_range_for_cpu(rx_ring->dev,
712 rx_bi->dma,
713 rx_bi->page_offset,
714 rx_ring->rx_buf_len,
715 DMA_FROM_DEVICE);
716
717 /* free resources associated with mapping */
718 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
719 iavf_rx_pg_size(rx_ring),
720 DMA_FROM_DEVICE,
721 IAVF_RX_DMA_ATTR);
722
723 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
724
725 rx_bi->page = NULL;
726 rx_bi->page_offset = 0;
727 }
728
729 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
730 memset(rx_ring->rx_bi, 0, bi_size);
731
732 /* Zero out the descriptor ring */
733 memset(rx_ring->desc, 0, rx_ring->size);
734
735 rx_ring->next_to_alloc = 0;
736 rx_ring->next_to_clean = 0;
737 rx_ring->next_to_use = 0;
738 }
739
740 /**
741 * iavf_free_rx_resources - Free Rx resources
742 * @rx_ring: ring to clean the resources from
743 *
744 * Free all receive software resources
745 **/
iavf_free_rx_resources(struct iavf_ring * rx_ring)746 void iavf_free_rx_resources(struct iavf_ring *rx_ring)
747 {
748 iavf_clean_rx_ring(rx_ring);
749 kfree(rx_ring->rx_bi);
750 rx_ring->rx_bi = NULL;
751
752 if (rx_ring->desc) {
753 dma_free_coherent(rx_ring->dev, rx_ring->size,
754 rx_ring->desc, rx_ring->dma);
755 rx_ring->desc = NULL;
756 }
757 }
758
759 /**
760 * iavf_setup_rx_descriptors - Allocate Rx descriptors
761 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
762 *
763 * Returns 0 on success, negative on failure
764 **/
iavf_setup_rx_descriptors(struct iavf_ring * rx_ring)765 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
766 {
767 struct device *dev = rx_ring->dev;
768 int bi_size;
769
770 /* warn if we are about to overwrite the pointer */
771 WARN_ON(rx_ring->rx_bi);
772 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
773 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
774 if (!rx_ring->rx_bi)
775 goto err;
776
777 u64_stats_init(&rx_ring->syncp);
778
779 /* Round up to nearest 4K */
780 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
781 rx_ring->size = ALIGN(rx_ring->size, 4096);
782 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
783 &rx_ring->dma, GFP_KERNEL);
784
785 if (!rx_ring->desc) {
786 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
787 rx_ring->size);
788 goto err;
789 }
790
791 rx_ring->next_to_alloc = 0;
792 rx_ring->next_to_clean = 0;
793 rx_ring->next_to_use = 0;
794
795 return 0;
796 err:
797 kfree(rx_ring->rx_bi);
798 rx_ring->rx_bi = NULL;
799 return -ENOMEM;
800 }
801
802 /**
803 * iavf_release_rx_desc - Store the new tail and head values
804 * @rx_ring: ring to bump
805 * @val: new head index
806 **/
iavf_release_rx_desc(struct iavf_ring * rx_ring,u32 val)807 static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
808 {
809 rx_ring->next_to_use = val;
810
811 /* update next to alloc since we have filled the ring */
812 rx_ring->next_to_alloc = val;
813
814 /* Force memory writes to complete before letting h/w
815 * know there are new descriptors to fetch. (Only
816 * applicable for weak-ordered memory model archs,
817 * such as IA-64).
818 */
819 wmb();
820 writel(val, rx_ring->tail);
821 }
822
823 /**
824 * iavf_rx_offset - Return expected offset into page to access data
825 * @rx_ring: Ring we are requesting offset of
826 *
827 * Returns the offset value for ring into the data buffer.
828 */
iavf_rx_offset(struct iavf_ring * rx_ring)829 static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
830 {
831 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
832 }
833
834 /**
835 * iavf_alloc_mapped_page - recycle or make a new page
836 * @rx_ring: ring to use
837 * @bi: rx_buffer struct to modify
838 *
839 * Returns true if the page was successfully allocated or
840 * reused.
841 **/
iavf_alloc_mapped_page(struct iavf_ring * rx_ring,struct iavf_rx_buffer * bi)842 static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
843 struct iavf_rx_buffer *bi)
844 {
845 struct page *page = bi->page;
846 dma_addr_t dma;
847
848 /* since we are recycling buffers we should seldom need to alloc */
849 if (likely(page)) {
850 rx_ring->rx_stats.page_reuse_count++;
851 return true;
852 }
853
854 /* alloc new page for storage */
855 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
856 if (unlikely(!page)) {
857 rx_ring->rx_stats.alloc_page_failed++;
858 return false;
859 }
860
861 /* map page for use */
862 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
863 iavf_rx_pg_size(rx_ring),
864 DMA_FROM_DEVICE,
865 IAVF_RX_DMA_ATTR);
866
867 /* if mapping failed free memory back to system since
868 * there isn't much point in holding memory we can't use
869 */
870 if (dma_mapping_error(rx_ring->dev, dma)) {
871 __free_pages(page, iavf_rx_pg_order(rx_ring));
872 rx_ring->rx_stats.alloc_page_failed++;
873 return false;
874 }
875
876 bi->dma = dma;
877 bi->page = page;
878 bi->page_offset = iavf_rx_offset(rx_ring);
879
880 /* initialize pagecnt_bias to 1 representing we fully own page */
881 bi->pagecnt_bias = 1;
882
883 return true;
884 }
885
886 /**
887 * iavf_receive_skb - Send a completed packet up the stack
888 * @rx_ring: rx ring in play
889 * @skb: packet to send up
890 * @vlan_tag: vlan tag for packet
891 **/
iavf_receive_skb(struct iavf_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)892 static void iavf_receive_skb(struct iavf_ring *rx_ring,
893 struct sk_buff *skb, u16 vlan_tag)
894 {
895 struct iavf_q_vector *q_vector = rx_ring->q_vector;
896
897 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
898 (vlan_tag & VLAN_VID_MASK))
899 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
900 else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
901 vlan_tag & VLAN_VID_MASK)
902 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
903
904 napi_gro_receive(&q_vector->napi, skb);
905 }
906
907 /**
908 * iavf_alloc_rx_buffers - Replace used receive buffers
909 * @rx_ring: ring to place buffers on
910 * @cleaned_count: number of buffers to replace
911 *
912 * Returns false if all allocations were successful, true if any fail
913 **/
iavf_alloc_rx_buffers(struct iavf_ring * rx_ring,u16 cleaned_count)914 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
915 {
916 u16 ntu = rx_ring->next_to_use;
917 union iavf_rx_desc *rx_desc;
918 struct iavf_rx_buffer *bi;
919
920 /* do nothing if no valid netdev defined */
921 if (!rx_ring->netdev || !cleaned_count)
922 return false;
923
924 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
925 bi = &rx_ring->rx_bi[ntu];
926
927 do {
928 if (!iavf_alloc_mapped_page(rx_ring, bi))
929 goto no_buffers;
930
931 /* sync the buffer for use by the device */
932 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
933 bi->page_offset,
934 rx_ring->rx_buf_len,
935 DMA_FROM_DEVICE);
936
937 /* Refresh the desc even if buffer_addrs didn't change
938 * because each write-back erases this info.
939 */
940 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
941
942 rx_desc++;
943 bi++;
944 ntu++;
945 if (unlikely(ntu == rx_ring->count)) {
946 rx_desc = IAVF_RX_DESC(rx_ring, 0);
947 bi = rx_ring->rx_bi;
948 ntu = 0;
949 }
950
951 /* clear the status bits for the next_to_use descriptor */
952 rx_desc->wb.qword1.status_error_len = 0;
953
954 cleaned_count--;
955 } while (cleaned_count);
956
957 if (rx_ring->next_to_use != ntu)
958 iavf_release_rx_desc(rx_ring, ntu);
959
960 return false;
961
962 no_buffers:
963 if (rx_ring->next_to_use != ntu)
964 iavf_release_rx_desc(rx_ring, ntu);
965
966 /* make sure to come back via polling to try again after
967 * allocation failure
968 */
969 return true;
970 }
971
972 /**
973 * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
974 * @vsi: the VSI we care about
975 * @skb: skb currently being received and modified
976 * @rx_desc: the receive descriptor
977 **/
iavf_rx_checksum(struct iavf_vsi * vsi,struct sk_buff * skb,union iavf_rx_desc * rx_desc)978 static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
979 struct sk_buff *skb,
980 union iavf_rx_desc *rx_desc)
981 {
982 struct iavf_rx_ptype_decoded decoded;
983 u32 rx_error, rx_status;
984 bool ipv4, ipv6;
985 u8 ptype;
986 u64 qword;
987
988 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
989 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
990 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
991 IAVF_RXD_QW1_ERROR_SHIFT;
992 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
993 IAVF_RXD_QW1_STATUS_SHIFT;
994 decoded = decode_rx_desc_ptype(ptype);
995
996 skb->ip_summed = CHECKSUM_NONE;
997
998 skb_checksum_none_assert(skb);
999
1000 /* Rx csum enabled and ip headers found? */
1001 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1002 return;
1003
1004 /* did the hardware decode the packet and checksum? */
1005 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
1006 return;
1007
1008 /* both known and outer_ip must be set for the below code to work */
1009 if (!(decoded.known && decoded.outer_ip))
1010 return;
1011
1012 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
1013 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
1014 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
1015 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
1016
1017 if (ipv4 &&
1018 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
1019 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
1020 goto checksum_fail;
1021
1022 /* likely incorrect csum if alternate IP extension headers found */
1023 if (ipv6 &&
1024 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1025 /* don't increment checksum err here, non-fatal err */
1026 return;
1027
1028 /* there was some L4 error, count error and punt packet to the stack */
1029 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
1030 goto checksum_fail;
1031
1032 /* handle packets that were not able to be checksummed due
1033 * to arrival speed, in this case the stack can compute
1034 * the csum.
1035 */
1036 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1037 return;
1038
1039 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1040 switch (decoded.inner_prot) {
1041 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1042 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1043 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1044 skb->ip_summed = CHECKSUM_UNNECESSARY;
1045 fallthrough;
1046 default:
1047 break;
1048 }
1049
1050 return;
1051
1052 checksum_fail:
1053 vsi->back->hw_csum_rx_error++;
1054 }
1055
1056 /**
1057 * iavf_ptype_to_htype - get a hash type
1058 * @ptype: the ptype value from the descriptor
1059 *
1060 * Returns a hash type to be used by skb_set_hash
1061 **/
iavf_ptype_to_htype(u8 ptype)1062 static inline int iavf_ptype_to_htype(u8 ptype)
1063 {
1064 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1065
1066 if (!decoded.known)
1067 return PKT_HASH_TYPE_NONE;
1068
1069 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1070 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1071 return PKT_HASH_TYPE_L4;
1072 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1073 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1074 return PKT_HASH_TYPE_L3;
1075 else
1076 return PKT_HASH_TYPE_L2;
1077 }
1078
1079 /**
1080 * iavf_rx_hash - set the hash value in the skb
1081 * @ring: descriptor ring
1082 * @rx_desc: specific descriptor
1083 * @skb: skb currently being received and modified
1084 * @rx_ptype: Rx packet type
1085 **/
iavf_rx_hash(struct iavf_ring * ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1086 static inline void iavf_rx_hash(struct iavf_ring *ring,
1087 union iavf_rx_desc *rx_desc,
1088 struct sk_buff *skb,
1089 u8 rx_ptype)
1090 {
1091 u32 hash;
1092 const __le64 rss_mask =
1093 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1094 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1095
1096 if (ring->netdev->features & NETIF_F_RXHASH)
1097 return;
1098
1099 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1100 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1101 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1102 }
1103 }
1104
1105 /**
1106 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1107 * @rx_ring: rx descriptor ring packet is being transacted on
1108 * @rx_desc: pointer to the EOP Rx descriptor
1109 * @skb: pointer to current skb being populated
1110 * @rx_ptype: the packet type decoded by hardware
1111 *
1112 * This function checks the ring, descriptor, and packet information in
1113 * order to populate the hash, checksum, VLAN, protocol, and
1114 * other fields within the skb.
1115 **/
1116 static inline
iavf_process_skb_fields(struct iavf_ring * rx_ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1117 void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1118 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1119 u8 rx_ptype)
1120 {
1121 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1122
1123 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1124
1125 skb_record_rx_queue(skb, rx_ring->queue_index);
1126
1127 /* modifies the skb - consumes the enet header */
1128 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1129 }
1130
1131 /**
1132 * iavf_cleanup_headers - Correct empty headers
1133 * @rx_ring: rx descriptor ring packet is being transacted on
1134 * @skb: pointer to current skb being fixed
1135 *
1136 * Also address the case where we are pulling data in on pages only
1137 * and as such no data is present in the skb header.
1138 *
1139 * In addition if skb is not at least 60 bytes we need to pad it so that
1140 * it is large enough to qualify as a valid Ethernet frame.
1141 *
1142 * Returns true if an error was encountered and skb was freed.
1143 **/
iavf_cleanup_headers(struct iavf_ring * rx_ring,struct sk_buff * skb)1144 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1145 {
1146 /* if eth_skb_pad returns an error the skb was freed */
1147 if (eth_skb_pad(skb))
1148 return true;
1149
1150 return false;
1151 }
1152
1153 /**
1154 * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1155 * @rx_ring: rx descriptor ring to store buffers on
1156 * @old_buff: donor buffer to have page reused
1157 *
1158 * Synchronizes page for reuse by the adapter
1159 **/
iavf_reuse_rx_page(struct iavf_ring * rx_ring,struct iavf_rx_buffer * old_buff)1160 static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1161 struct iavf_rx_buffer *old_buff)
1162 {
1163 struct iavf_rx_buffer *new_buff;
1164 u16 nta = rx_ring->next_to_alloc;
1165
1166 new_buff = &rx_ring->rx_bi[nta];
1167
1168 /* update, and store next to alloc */
1169 nta++;
1170 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1171
1172 /* transfer page from old buffer to new buffer */
1173 new_buff->dma = old_buff->dma;
1174 new_buff->page = old_buff->page;
1175 new_buff->page_offset = old_buff->page_offset;
1176 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1177 }
1178
1179 /**
1180 * iavf_can_reuse_rx_page - Determine if this page can be reused by
1181 * the adapter for another receive
1182 *
1183 * @rx_buffer: buffer containing the page
1184 *
1185 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1186 * an unused region in the page.
1187 *
1188 * For small pages, @truesize will be a constant value, half the size
1189 * of the memory at page. We'll attempt to alternate between high and
1190 * low halves of the page, with one half ready for use by the hardware
1191 * and the other half being consumed by the stack. We use the page
1192 * ref count to determine whether the stack has finished consuming the
1193 * portion of this page that was passed up with a previous packet. If
1194 * the page ref count is >1, we'll assume the "other" half page is
1195 * still busy, and this page cannot be reused.
1196 *
1197 * For larger pages, @truesize will be the actual space used by the
1198 * received packet (adjusted upward to an even multiple of the cache
1199 * line size). This will advance through the page by the amount
1200 * actually consumed by the received packets while there is still
1201 * space for a buffer. Each region of larger pages will be used at
1202 * most once, after which the page will not be reused.
1203 *
1204 * In either case, if the page is reusable its refcount is increased.
1205 **/
iavf_can_reuse_rx_page(struct iavf_rx_buffer * rx_buffer)1206 static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1207 {
1208 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1209 struct page *page = rx_buffer->page;
1210
1211 /* Is any reuse possible? */
1212 if (!dev_page_is_reusable(page))
1213 return false;
1214
1215 #if (PAGE_SIZE < 8192)
1216 /* if we are only owner of page we can reuse it */
1217 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1218 return false;
1219 #else
1220 #define IAVF_LAST_OFFSET \
1221 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1222 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1223 return false;
1224 #endif
1225
1226 /* If we have drained the page fragment pool we need to update
1227 * the pagecnt_bias and page count so that we fully restock the
1228 * number of references the driver holds.
1229 */
1230 if (unlikely(!pagecnt_bias)) {
1231 page_ref_add(page, USHRT_MAX);
1232 rx_buffer->pagecnt_bias = USHRT_MAX;
1233 }
1234
1235 return true;
1236 }
1237
1238 /**
1239 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1240 * @rx_ring: rx descriptor ring to transact packets on
1241 * @rx_buffer: buffer containing page to add
1242 * @skb: sk_buff to place the data into
1243 * @size: packet length from rx_desc
1244 *
1245 * This function will add the data contained in rx_buffer->page to the skb.
1246 * It will just attach the page as a frag to the skb.
1247 *
1248 * The function will then update the page offset.
1249 **/
iavf_add_rx_frag(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1250 static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1251 struct iavf_rx_buffer *rx_buffer,
1252 struct sk_buff *skb,
1253 unsigned int size)
1254 {
1255 #if (PAGE_SIZE < 8192)
1256 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1257 #else
1258 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1259 #endif
1260
1261 if (!size)
1262 return;
1263
1264 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1265 rx_buffer->page_offset, size, truesize);
1266
1267 /* page is being used so we must update the page offset */
1268 #if (PAGE_SIZE < 8192)
1269 rx_buffer->page_offset ^= truesize;
1270 #else
1271 rx_buffer->page_offset += truesize;
1272 #endif
1273 }
1274
1275 /**
1276 * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1277 * @rx_ring: rx descriptor ring to transact packets on
1278 * @size: size of buffer to add to skb
1279 *
1280 * This function will pull an Rx buffer from the ring and synchronize it
1281 * for use by the CPU.
1282 */
iavf_get_rx_buffer(struct iavf_ring * rx_ring,const unsigned int size)1283 static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1284 const unsigned int size)
1285 {
1286 struct iavf_rx_buffer *rx_buffer;
1287
1288 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1289 prefetchw(rx_buffer->page);
1290 if (!size)
1291 return rx_buffer;
1292
1293 /* we are reusing so sync this buffer for CPU use */
1294 dma_sync_single_range_for_cpu(rx_ring->dev,
1295 rx_buffer->dma,
1296 rx_buffer->page_offset,
1297 size,
1298 DMA_FROM_DEVICE);
1299
1300 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1301 rx_buffer->pagecnt_bias--;
1302
1303 return rx_buffer;
1304 }
1305
1306 /**
1307 * iavf_construct_skb - Allocate skb and populate it
1308 * @rx_ring: rx descriptor ring to transact packets on
1309 * @rx_buffer: rx buffer to pull data from
1310 * @size: size of buffer to add to skb
1311 *
1312 * This function allocates an skb. It then populates it with the page
1313 * data from the current receive descriptor, taking care to set up the
1314 * skb correctly.
1315 */
iavf_construct_skb(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,unsigned int size)1316 static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1317 struct iavf_rx_buffer *rx_buffer,
1318 unsigned int size)
1319 {
1320 void *va;
1321 #if (PAGE_SIZE < 8192)
1322 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1323 #else
1324 unsigned int truesize = SKB_DATA_ALIGN(size);
1325 #endif
1326 unsigned int headlen;
1327 struct sk_buff *skb;
1328
1329 if (!rx_buffer)
1330 return NULL;
1331 /* prefetch first cache line of first page */
1332 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1333 net_prefetch(va);
1334
1335 /* allocate a skb to store the frags */
1336 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1337 IAVF_RX_HDR_SIZE,
1338 GFP_ATOMIC | __GFP_NOWARN);
1339 if (unlikely(!skb))
1340 return NULL;
1341
1342 /* Determine available headroom for copy */
1343 headlen = size;
1344 if (headlen > IAVF_RX_HDR_SIZE)
1345 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1346
1347 /* align pull length to size of long to optimize memcpy performance */
1348 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1349
1350 /* update all of the pointers */
1351 size -= headlen;
1352 if (size) {
1353 skb_add_rx_frag(skb, 0, rx_buffer->page,
1354 rx_buffer->page_offset + headlen,
1355 size, truesize);
1356
1357 /* buffer is used by skb, update page_offset */
1358 #if (PAGE_SIZE < 8192)
1359 rx_buffer->page_offset ^= truesize;
1360 #else
1361 rx_buffer->page_offset += truesize;
1362 #endif
1363 } else {
1364 /* buffer is unused, reset bias back to rx_buffer */
1365 rx_buffer->pagecnt_bias++;
1366 }
1367
1368 return skb;
1369 }
1370
1371 /**
1372 * iavf_build_skb - Build skb around an existing buffer
1373 * @rx_ring: Rx descriptor ring to transact packets on
1374 * @rx_buffer: Rx buffer to pull data from
1375 * @size: size of buffer to add to skb
1376 *
1377 * This function builds an skb around an existing Rx buffer, taking care
1378 * to set up the skb correctly and avoid any memcpy overhead.
1379 */
iavf_build_skb(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,unsigned int size)1380 static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1381 struct iavf_rx_buffer *rx_buffer,
1382 unsigned int size)
1383 {
1384 void *va;
1385 #if (PAGE_SIZE < 8192)
1386 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1387 #else
1388 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1389 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1390 #endif
1391 struct sk_buff *skb;
1392
1393 if (!rx_buffer)
1394 return NULL;
1395 /* prefetch first cache line of first page */
1396 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1397 net_prefetch(va);
1398
1399 /* build an skb around the page buffer */
1400 skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
1401 if (unlikely(!skb))
1402 return NULL;
1403
1404 /* update pointers within the skb to store the data */
1405 skb_reserve(skb, IAVF_SKB_PAD);
1406 __skb_put(skb, size);
1407
1408 /* buffer is used by skb, update page_offset */
1409 #if (PAGE_SIZE < 8192)
1410 rx_buffer->page_offset ^= truesize;
1411 #else
1412 rx_buffer->page_offset += truesize;
1413 #endif
1414
1415 return skb;
1416 }
1417
1418 /**
1419 * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1420 * @rx_ring: rx descriptor ring to transact packets on
1421 * @rx_buffer: rx buffer to pull data from
1422 *
1423 * This function will clean up the contents of the rx_buffer. It will
1424 * either recycle the buffer or unmap it and free the associated resources.
1425 */
iavf_put_rx_buffer(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer)1426 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1427 struct iavf_rx_buffer *rx_buffer)
1428 {
1429 if (!rx_buffer)
1430 return;
1431
1432 if (iavf_can_reuse_rx_page(rx_buffer)) {
1433 /* hand second half of page back to the ring */
1434 iavf_reuse_rx_page(rx_ring, rx_buffer);
1435 rx_ring->rx_stats.page_reuse_count++;
1436 } else {
1437 /* we are not reusing the buffer so unmap it */
1438 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1439 iavf_rx_pg_size(rx_ring),
1440 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1441 __page_frag_cache_drain(rx_buffer->page,
1442 rx_buffer->pagecnt_bias);
1443 }
1444
1445 /* clear contents of buffer_info */
1446 rx_buffer->page = NULL;
1447 }
1448
1449 /**
1450 * iavf_is_non_eop - process handling of non-EOP buffers
1451 * @rx_ring: Rx ring being processed
1452 * @rx_desc: Rx descriptor for current buffer
1453 * @skb: Current socket buffer containing buffer in progress
1454 *
1455 * This function updates next to clean. If the buffer is an EOP buffer
1456 * this function exits returning false, otherwise it will place the
1457 * sk_buff in the next buffer to be chained and return true indicating
1458 * that this is in fact a non-EOP buffer.
1459 **/
iavf_is_non_eop(struct iavf_ring * rx_ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb)1460 static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1461 union iavf_rx_desc *rx_desc,
1462 struct sk_buff *skb)
1463 {
1464 u32 ntc = rx_ring->next_to_clean + 1;
1465
1466 /* fetch, update, and store next to clean */
1467 ntc = (ntc < rx_ring->count) ? ntc : 0;
1468 rx_ring->next_to_clean = ntc;
1469
1470 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1471
1472 /* if we are the last buffer then there is nothing else to do */
1473 #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1474 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1475 return false;
1476
1477 rx_ring->rx_stats.non_eop_descs++;
1478
1479 return true;
1480 }
1481
1482 /**
1483 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1484 * @rx_ring: rx descriptor ring to transact packets on
1485 * @budget: Total limit on number of packets to process
1486 *
1487 * This function provides a "bounce buffer" approach to Rx interrupt
1488 * processing. The advantage to this is that on systems that have
1489 * expensive overhead for IOMMU access this provides a means of avoiding
1490 * it by maintaining the mapping of the page to the system.
1491 *
1492 * Returns amount of work completed
1493 **/
iavf_clean_rx_irq(struct iavf_ring * rx_ring,int budget)1494 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1495 {
1496 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1497 struct sk_buff *skb = rx_ring->skb;
1498 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1499 bool failure = false;
1500
1501 while (likely(total_rx_packets < (unsigned int)budget)) {
1502 struct iavf_rx_buffer *rx_buffer;
1503 union iavf_rx_desc *rx_desc;
1504 unsigned int size;
1505 u16 vlan_tag = 0;
1506 u8 rx_ptype;
1507 u64 qword;
1508
1509 /* return some buffers to hardware, one at a time is too slow */
1510 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1511 failure = failure ||
1512 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1513 cleaned_count = 0;
1514 }
1515
1516 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1517
1518 /* status_error_len will always be zero for unused descriptors
1519 * because it's cleared in cleanup, and overlaps with hdr_addr
1520 * which is always zero because packet split isn't used, if the
1521 * hardware wrote DD then the length will be non-zero
1522 */
1523 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1524
1525 /* This memory barrier is needed to keep us from reading
1526 * any other fields out of the rx_desc until we have
1527 * verified the descriptor has been written back.
1528 */
1529 dma_rmb();
1530 #define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1531 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1532 break;
1533
1534 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1535 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1536
1537 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1538 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1539
1540 /* retrieve a buffer from the ring */
1541 if (skb)
1542 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1543 else if (ring_uses_build_skb(rx_ring))
1544 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1545 else
1546 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1547
1548 /* exit if we failed to retrieve a buffer */
1549 if (!skb) {
1550 rx_ring->rx_stats.alloc_buff_failed++;
1551 if (rx_buffer)
1552 rx_buffer->pagecnt_bias++;
1553 break;
1554 }
1555
1556 iavf_put_rx_buffer(rx_ring, rx_buffer);
1557 cleaned_count++;
1558
1559 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1560 continue;
1561
1562 /* ERR_MASK will only have valid bits if EOP set, and
1563 * what we are doing here is actually checking
1564 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1565 * the error field
1566 */
1567 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1568 dev_kfree_skb_any(skb);
1569 skb = NULL;
1570 continue;
1571 }
1572
1573 if (iavf_cleanup_headers(rx_ring, skb)) {
1574 skb = NULL;
1575 continue;
1576 }
1577
1578 /* probably a little skewed due to removing CRC */
1579 total_rx_bytes += skb->len;
1580
1581 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1582 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1583 IAVF_RXD_QW1_PTYPE_SHIFT;
1584
1585 /* populate checksum, VLAN, and protocol */
1586 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1587
1588 if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
1589 rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
1590 vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
1591 if (rx_desc->wb.qword2.ext_status &
1592 cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
1593 rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
1594 vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
1595
1596 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1597 iavf_receive_skb(rx_ring, skb, vlan_tag);
1598 skb = NULL;
1599
1600 /* update budget accounting */
1601 total_rx_packets++;
1602 }
1603
1604 rx_ring->skb = skb;
1605
1606 u64_stats_update_begin(&rx_ring->syncp);
1607 rx_ring->stats.packets += total_rx_packets;
1608 rx_ring->stats.bytes += total_rx_bytes;
1609 u64_stats_update_end(&rx_ring->syncp);
1610 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1611 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1612
1613 /* guarantee a trip back through this routine if there was a failure */
1614 return failure ? budget : (int)total_rx_packets;
1615 }
1616
iavf_buildreg_itr(const int type,u16 itr)1617 static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1618 {
1619 u32 val;
1620
1621 /* We don't bother with setting the CLEARPBA bit as the data sheet
1622 * points out doing so is "meaningless since it was already
1623 * auto-cleared". The auto-clearing happens when the interrupt is
1624 * asserted.
1625 *
1626 * Hardware errata 28 for also indicates that writing to a
1627 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1628 * an event in the PBA anyway so we need to rely on the automask
1629 * to hold pending events for us until the interrupt is re-enabled
1630 *
1631 * The itr value is reported in microseconds, and the register
1632 * value is recorded in 2 microsecond units. For this reason we
1633 * only need to shift by the interval shift - 1 instead of the
1634 * full value.
1635 */
1636 itr &= IAVF_ITR_MASK;
1637
1638 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1639 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1640 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1641
1642 return val;
1643 }
1644
1645 /* a small macro to shorten up some long lines */
1646 #define INTREG IAVF_VFINT_DYN_CTLN1
1647
1648 /* The act of updating the ITR will cause it to immediately trigger. In order
1649 * to prevent this from throwing off adaptive update statistics we defer the
1650 * update so that it can only happen so often. So after either Tx or Rx are
1651 * updated we make the adaptive scheme wait until either the ITR completely
1652 * expires via the next_update expiration or we have been through at least
1653 * 3 interrupts.
1654 */
1655 #define ITR_COUNTDOWN_START 3
1656
1657 /**
1658 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1659 * @vsi: the VSI we care about
1660 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1661 *
1662 **/
iavf_update_enable_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)1663 static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1664 struct iavf_q_vector *q_vector)
1665 {
1666 struct iavf_hw *hw = &vsi->back->hw;
1667 u32 intval;
1668
1669 /* These will do nothing if dynamic updates are not enabled */
1670 iavf_update_itr(q_vector, &q_vector->tx);
1671 iavf_update_itr(q_vector, &q_vector->rx);
1672
1673 /* This block of logic allows us to get away with only updating
1674 * one ITR value with each interrupt. The idea is to perform a
1675 * pseudo-lazy update with the following criteria.
1676 *
1677 * 1. Rx is given higher priority than Tx if both are in same state
1678 * 2. If we must reduce an ITR that is given highest priority.
1679 * 3. We then give priority to increasing ITR based on amount.
1680 */
1681 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1682 /* Rx ITR needs to be reduced, this is highest priority */
1683 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1684 q_vector->rx.target_itr);
1685 q_vector->rx.current_itr = q_vector->rx.target_itr;
1686 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1687 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1688 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1689 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1690 /* Tx ITR needs to be reduced, this is second priority
1691 * Tx ITR needs to be increased more than Rx, fourth priority
1692 */
1693 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1694 q_vector->tx.target_itr);
1695 q_vector->tx.current_itr = q_vector->tx.target_itr;
1696 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1697 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1698 /* Rx ITR needs to be increased, third priority */
1699 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1700 q_vector->rx.target_itr);
1701 q_vector->rx.current_itr = q_vector->rx.target_itr;
1702 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1703 } else {
1704 /* No ITR update, lowest priority */
1705 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1706 if (q_vector->itr_countdown)
1707 q_vector->itr_countdown--;
1708 }
1709
1710 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1711 wr32(hw, INTREG(q_vector->reg_idx), intval);
1712 }
1713
1714 /**
1715 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1716 * @napi: napi struct with our devices info in it
1717 * @budget: amount of work driver is allowed to do this pass, in packets
1718 *
1719 * This function will clean all queues associated with a q_vector.
1720 *
1721 * Returns the amount of work done
1722 **/
iavf_napi_poll(struct napi_struct * napi,int budget)1723 int iavf_napi_poll(struct napi_struct *napi, int budget)
1724 {
1725 struct iavf_q_vector *q_vector =
1726 container_of(napi, struct iavf_q_vector, napi);
1727 struct iavf_vsi *vsi = q_vector->vsi;
1728 struct iavf_ring *ring;
1729 bool clean_complete = true;
1730 bool arm_wb = false;
1731 int budget_per_ring;
1732 int work_done = 0;
1733
1734 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1735 napi_complete(napi);
1736 return 0;
1737 }
1738
1739 /* Since the actual Tx work is minimal, we can give the Tx a larger
1740 * budget and be more aggressive about cleaning up the Tx descriptors.
1741 */
1742 iavf_for_each_ring(ring, q_vector->tx) {
1743 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1744 clean_complete = false;
1745 continue;
1746 }
1747 arm_wb |= ring->arm_wb;
1748 ring->arm_wb = false;
1749 }
1750
1751 /* Handle case where we are called by netpoll with a budget of 0 */
1752 if (budget <= 0)
1753 goto tx_only;
1754
1755 /* We attempt to distribute budget to each Rx queue fairly, but don't
1756 * allow the budget to go below 1 because that would exit polling early.
1757 */
1758 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1759
1760 iavf_for_each_ring(ring, q_vector->rx) {
1761 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1762
1763 work_done += cleaned;
1764 /* if we clean as many as budgeted, we must not be done */
1765 if (cleaned >= budget_per_ring)
1766 clean_complete = false;
1767 }
1768
1769 /* If work not completed, return budget and polling will return */
1770 if (!clean_complete) {
1771 int cpu_id = smp_processor_id();
1772
1773 /* It is possible that the interrupt affinity has changed but,
1774 * if the cpu is pegged at 100%, polling will never exit while
1775 * traffic continues and the interrupt will be stuck on this
1776 * cpu. We check to make sure affinity is correct before we
1777 * continue to poll, otherwise we must stop polling so the
1778 * interrupt can move to the correct cpu.
1779 */
1780 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1781 /* Tell napi that we are done polling */
1782 napi_complete_done(napi, work_done);
1783
1784 /* Force an interrupt */
1785 iavf_force_wb(vsi, q_vector);
1786
1787 /* Return budget-1 so that polling stops */
1788 return budget - 1;
1789 }
1790 tx_only:
1791 if (arm_wb) {
1792 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1793 iavf_enable_wb_on_itr(vsi, q_vector);
1794 }
1795 return budget;
1796 }
1797
1798 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1799 q_vector->arm_wb_state = false;
1800
1801 /* Exit the polling mode, but don't re-enable interrupts if stack might
1802 * poll us due to busy-polling
1803 */
1804 if (likely(napi_complete_done(napi, work_done)))
1805 iavf_update_enable_itr(vsi, q_vector);
1806
1807 return min_t(int, work_done, budget - 1);
1808 }
1809
1810 /**
1811 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1812 * @skb: send buffer
1813 * @tx_ring: ring to send buffer on
1814 * @flags: the tx flags to be set
1815 *
1816 * Checks the skb and set up correspondingly several generic transmit flags
1817 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1818 *
1819 * Returns error code indicate the frame should be dropped upon error and the
1820 * otherwise returns 0 to indicate the flags has been set properly.
1821 **/
iavf_tx_prepare_vlan_flags(struct sk_buff * skb,struct iavf_ring * tx_ring,u32 * flags)1822 static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1823 struct iavf_ring *tx_ring, u32 *flags)
1824 {
1825 u32 tx_flags = 0;
1826
1827
1828 /* stack will only request hardware VLAN insertion offload for protocols
1829 * that the driver supports and has enabled
1830 */
1831 if (!skb_vlan_tag_present(skb))
1832 return;
1833
1834 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1835 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
1836 tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1837 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
1838 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1839 } else {
1840 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
1841 return;
1842 }
1843
1844 *flags = tx_flags;
1845 }
1846
1847 /**
1848 * iavf_tso - set up the tso context descriptor
1849 * @first: pointer to first Tx buffer for xmit
1850 * @hdr_len: ptr to the size of the packet header
1851 * @cd_type_cmd_tso_mss: Quad Word 1
1852 *
1853 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1854 **/
iavf_tso(struct iavf_tx_buffer * first,u8 * hdr_len,u64 * cd_type_cmd_tso_mss)1855 static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1856 u64 *cd_type_cmd_tso_mss)
1857 {
1858 struct sk_buff *skb = first->skb;
1859 u64 cd_cmd, cd_tso_len, cd_mss;
1860 union {
1861 struct iphdr *v4;
1862 struct ipv6hdr *v6;
1863 unsigned char *hdr;
1864 } ip;
1865 union {
1866 struct tcphdr *tcp;
1867 struct udphdr *udp;
1868 unsigned char *hdr;
1869 } l4;
1870 u32 paylen, l4_offset;
1871 u16 gso_segs, gso_size;
1872 int err;
1873
1874 if (skb->ip_summed != CHECKSUM_PARTIAL)
1875 return 0;
1876
1877 if (!skb_is_gso(skb))
1878 return 0;
1879
1880 err = skb_cow_head(skb, 0);
1881 if (err < 0)
1882 return err;
1883
1884 ip.hdr = skb_network_header(skb);
1885 l4.hdr = skb_transport_header(skb);
1886
1887 /* initialize outer IP header fields */
1888 if (ip.v4->version == 4) {
1889 ip.v4->tot_len = 0;
1890 ip.v4->check = 0;
1891 } else {
1892 ip.v6->payload_len = 0;
1893 }
1894
1895 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1896 SKB_GSO_GRE_CSUM |
1897 SKB_GSO_IPXIP4 |
1898 SKB_GSO_IPXIP6 |
1899 SKB_GSO_UDP_TUNNEL |
1900 SKB_GSO_UDP_TUNNEL_CSUM)) {
1901 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1902 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1903 l4.udp->len = 0;
1904
1905 /* determine offset of outer transport header */
1906 l4_offset = l4.hdr - skb->data;
1907
1908 /* remove payload length from outer checksum */
1909 paylen = skb->len - l4_offset;
1910 csum_replace_by_diff(&l4.udp->check,
1911 (__force __wsum)htonl(paylen));
1912 }
1913
1914 /* reset pointers to inner headers */
1915 ip.hdr = skb_inner_network_header(skb);
1916 l4.hdr = skb_inner_transport_header(skb);
1917
1918 /* initialize inner IP header fields */
1919 if (ip.v4->version == 4) {
1920 ip.v4->tot_len = 0;
1921 ip.v4->check = 0;
1922 } else {
1923 ip.v6->payload_len = 0;
1924 }
1925 }
1926
1927 /* determine offset of inner transport header */
1928 l4_offset = l4.hdr - skb->data;
1929 /* remove payload length from inner checksum */
1930 paylen = skb->len - l4_offset;
1931
1932 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1933 csum_replace_by_diff(&l4.udp->check,
1934 (__force __wsum)htonl(paylen));
1935 /* compute length of UDP segmentation header */
1936 *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1937 } else {
1938 csum_replace_by_diff(&l4.tcp->check,
1939 (__force __wsum)htonl(paylen));
1940 /* compute length of TCP segmentation header */
1941 *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1942 }
1943
1944 /* pull values out of skb_shinfo */
1945 gso_size = skb_shinfo(skb)->gso_size;
1946 gso_segs = skb_shinfo(skb)->gso_segs;
1947
1948 /* update GSO size and bytecount with header size */
1949 first->gso_segs = gso_segs;
1950 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1951
1952 /* find the field values */
1953 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1954 cd_tso_len = skb->len - *hdr_len;
1955 cd_mss = gso_size;
1956 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1957 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1958 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1959 return 1;
1960 }
1961
1962 /**
1963 * iavf_tx_enable_csum - Enable Tx checksum offloads
1964 * @skb: send buffer
1965 * @tx_flags: pointer to Tx flags currently set
1966 * @td_cmd: Tx descriptor command bits to set
1967 * @td_offset: Tx descriptor header offsets to set
1968 * @tx_ring: Tx descriptor ring
1969 * @cd_tunneling: ptr to context desc bits
1970 **/
iavf_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct iavf_ring * tx_ring,u32 * cd_tunneling)1971 static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1972 u32 *td_cmd, u32 *td_offset,
1973 struct iavf_ring *tx_ring,
1974 u32 *cd_tunneling)
1975 {
1976 union {
1977 struct iphdr *v4;
1978 struct ipv6hdr *v6;
1979 unsigned char *hdr;
1980 } ip;
1981 union {
1982 struct tcphdr *tcp;
1983 struct udphdr *udp;
1984 unsigned char *hdr;
1985 } l4;
1986 unsigned char *exthdr;
1987 u32 offset, cmd = 0;
1988 __be16 frag_off;
1989 u8 l4_proto = 0;
1990
1991 if (skb->ip_summed != CHECKSUM_PARTIAL)
1992 return 0;
1993
1994 ip.hdr = skb_network_header(skb);
1995 l4.hdr = skb_transport_header(skb);
1996
1997 /* compute outer L2 header size */
1998 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1999
2000 if (skb->encapsulation) {
2001 u32 tunnel = 0;
2002 /* define outer network header type */
2003 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2004 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2005 IAVF_TX_CTX_EXT_IP_IPV4 :
2006 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2007
2008 l4_proto = ip.v4->protocol;
2009 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2010 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
2011
2012 exthdr = ip.hdr + sizeof(*ip.v6);
2013 l4_proto = ip.v6->nexthdr;
2014 if (l4.hdr != exthdr)
2015 ipv6_skip_exthdr(skb, exthdr - skb->data,
2016 &l4_proto, &frag_off);
2017 }
2018
2019 /* define outer transport */
2020 switch (l4_proto) {
2021 case IPPROTO_UDP:
2022 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2023 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2024 break;
2025 case IPPROTO_GRE:
2026 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2027 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2028 break;
2029 case IPPROTO_IPIP:
2030 case IPPROTO_IPV6:
2031 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2032 l4.hdr = skb_inner_network_header(skb);
2033 break;
2034 default:
2035 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2036 return -1;
2037
2038 skb_checksum_help(skb);
2039 return 0;
2040 }
2041
2042 /* compute outer L3 header size */
2043 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2044 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2045
2046 /* switch IP header pointer from outer to inner header */
2047 ip.hdr = skb_inner_network_header(skb);
2048
2049 /* compute tunnel header size */
2050 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2051 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2052
2053 /* indicate if we need to offload outer UDP header */
2054 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2055 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2056 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2057 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2058
2059 /* record tunnel offload values */
2060 *cd_tunneling |= tunnel;
2061
2062 /* switch L4 header pointer from outer to inner */
2063 l4.hdr = skb_inner_transport_header(skb);
2064 l4_proto = 0;
2065
2066 /* reset type as we transition from outer to inner headers */
2067 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2068 if (ip.v4->version == 4)
2069 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2070 if (ip.v6->version == 6)
2071 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2072 }
2073
2074 /* Enable IP checksum offloads */
2075 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2076 l4_proto = ip.v4->protocol;
2077 /* the stack computes the IP header already, the only time we
2078 * need the hardware to recompute it is in the case of TSO.
2079 */
2080 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2081 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2082 IAVF_TX_DESC_CMD_IIPT_IPV4;
2083 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2084 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2085
2086 exthdr = ip.hdr + sizeof(*ip.v6);
2087 l4_proto = ip.v6->nexthdr;
2088 if (l4.hdr != exthdr)
2089 ipv6_skip_exthdr(skb, exthdr - skb->data,
2090 &l4_proto, &frag_off);
2091 }
2092
2093 /* compute inner L3 header size */
2094 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2095
2096 /* Enable L4 checksum offloads */
2097 switch (l4_proto) {
2098 case IPPROTO_TCP:
2099 /* enable checksum offloads */
2100 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2101 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2102 break;
2103 case IPPROTO_SCTP:
2104 /* enable SCTP checksum offload */
2105 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2106 offset |= (sizeof(struct sctphdr) >> 2) <<
2107 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2108 break;
2109 case IPPROTO_UDP:
2110 /* enable UDP checksum offload */
2111 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2112 offset |= (sizeof(struct udphdr) >> 2) <<
2113 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2114 break;
2115 default:
2116 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2117 return -1;
2118 skb_checksum_help(skb);
2119 return 0;
2120 }
2121
2122 *td_cmd |= cmd;
2123 *td_offset |= offset;
2124
2125 return 1;
2126 }
2127
2128 /**
2129 * iavf_create_tx_ctx - Build the Tx context descriptor
2130 * @tx_ring: ring to create the descriptor on
2131 * @cd_type_cmd_tso_mss: Quad Word 1
2132 * @cd_tunneling: Quad Word 0 - bits 0-31
2133 * @cd_l2tag2: Quad Word 0 - bits 32-63
2134 **/
iavf_create_tx_ctx(struct iavf_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2)2135 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2136 const u64 cd_type_cmd_tso_mss,
2137 const u32 cd_tunneling, const u32 cd_l2tag2)
2138 {
2139 struct iavf_tx_context_desc *context_desc;
2140 int i = tx_ring->next_to_use;
2141
2142 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2143 !cd_tunneling && !cd_l2tag2)
2144 return;
2145
2146 /* grab the next descriptor */
2147 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2148
2149 i++;
2150 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2151
2152 /* cpu_to_le32 and assign to struct fields */
2153 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2154 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2155 context_desc->rsvd = cpu_to_le16(0);
2156 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2157 }
2158
2159 /**
2160 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2161 * @skb: send buffer
2162 *
2163 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2164 * and so we need to figure out the cases where we need to linearize the skb.
2165 *
2166 * For TSO we need to count the TSO header and segment payload separately.
2167 * As such we need to check cases where we have 7 fragments or more as we
2168 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2169 * the segment payload in the first descriptor, and another 7 for the
2170 * fragments.
2171 **/
__iavf_chk_linearize(struct sk_buff * skb)2172 bool __iavf_chk_linearize(struct sk_buff *skb)
2173 {
2174 const skb_frag_t *frag, *stale;
2175 int nr_frags, sum;
2176
2177 /* no need to check if number of frags is less than 7 */
2178 nr_frags = skb_shinfo(skb)->nr_frags;
2179 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2180 return false;
2181
2182 /* We need to walk through the list and validate that each group
2183 * of 6 fragments totals at least gso_size.
2184 */
2185 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2186 frag = &skb_shinfo(skb)->frags[0];
2187
2188 /* Initialize size to the negative value of gso_size minus 1. We
2189 * use this as the worst case scenerio in which the frag ahead
2190 * of us only provides one byte which is why we are limited to 6
2191 * descriptors for a single transmit as the header and previous
2192 * fragment are already consuming 2 descriptors.
2193 */
2194 sum = 1 - skb_shinfo(skb)->gso_size;
2195
2196 /* Add size of frags 0 through 4 to create our initial sum */
2197 sum += skb_frag_size(frag++);
2198 sum += skb_frag_size(frag++);
2199 sum += skb_frag_size(frag++);
2200 sum += skb_frag_size(frag++);
2201 sum += skb_frag_size(frag++);
2202
2203 /* Walk through fragments adding latest fragment, testing it, and
2204 * then removing stale fragments from the sum.
2205 */
2206 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2207 int stale_size = skb_frag_size(stale);
2208
2209 sum += skb_frag_size(frag++);
2210
2211 /* The stale fragment may present us with a smaller
2212 * descriptor than the actual fragment size. To account
2213 * for that we need to remove all the data on the front and
2214 * figure out what the remainder would be in the last
2215 * descriptor associated with the fragment.
2216 */
2217 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2218 int align_pad = -(skb_frag_off(stale)) &
2219 (IAVF_MAX_READ_REQ_SIZE - 1);
2220
2221 sum -= align_pad;
2222 stale_size -= align_pad;
2223
2224 do {
2225 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2226 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2227 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2228 }
2229
2230 /* if sum is negative we failed to make sufficient progress */
2231 if (sum < 0)
2232 return true;
2233
2234 if (!nr_frags--)
2235 break;
2236
2237 sum -= stale_size;
2238 }
2239
2240 return false;
2241 }
2242
2243 /**
2244 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2245 * @tx_ring: the ring to be checked
2246 * @size: the size buffer we want to assure is available
2247 *
2248 * Returns -EBUSY if a stop is needed, else 0
2249 **/
__iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)2250 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2251 {
2252 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2253 /* Memory barrier before checking head and tail */
2254 smp_mb();
2255
2256 /* Check again in a case another CPU has just made room available. */
2257 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2258 return -EBUSY;
2259
2260 /* A reprieve! - use start_queue because it doesn't call schedule */
2261 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2262 ++tx_ring->tx_stats.restart_queue;
2263 return 0;
2264 }
2265
2266 /**
2267 * iavf_tx_map - Build the Tx descriptor
2268 * @tx_ring: ring to send buffer on
2269 * @skb: send buffer
2270 * @first: first buffer info buffer to use
2271 * @tx_flags: collected send information
2272 * @hdr_len: size of the packet header
2273 * @td_cmd: the command field in the descriptor
2274 * @td_offset: offset for checksum or crc
2275 **/
iavf_tx_map(struct iavf_ring * tx_ring,struct sk_buff * skb,struct iavf_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset)2276 static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2277 struct iavf_tx_buffer *first, u32 tx_flags,
2278 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2279 {
2280 unsigned int data_len = skb->data_len;
2281 unsigned int size = skb_headlen(skb);
2282 skb_frag_t *frag;
2283 struct iavf_tx_buffer *tx_bi;
2284 struct iavf_tx_desc *tx_desc;
2285 u16 i = tx_ring->next_to_use;
2286 u32 td_tag = 0;
2287 dma_addr_t dma;
2288
2289 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2290 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2291 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2292 IAVF_TX_FLAGS_VLAN_SHIFT;
2293 }
2294
2295 first->tx_flags = tx_flags;
2296
2297 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2298
2299 tx_desc = IAVF_TX_DESC(tx_ring, i);
2300 tx_bi = first;
2301
2302 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2303 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2304
2305 if (dma_mapping_error(tx_ring->dev, dma))
2306 goto dma_error;
2307
2308 /* record length, and DMA address */
2309 dma_unmap_len_set(tx_bi, len, size);
2310 dma_unmap_addr_set(tx_bi, dma, dma);
2311
2312 /* align size to end of page */
2313 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2314 tx_desc->buffer_addr = cpu_to_le64(dma);
2315
2316 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2317 tx_desc->cmd_type_offset_bsz =
2318 build_ctob(td_cmd, td_offset,
2319 max_data, td_tag);
2320
2321 tx_desc++;
2322 i++;
2323
2324 if (i == tx_ring->count) {
2325 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2326 i = 0;
2327 }
2328
2329 dma += max_data;
2330 size -= max_data;
2331
2332 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2333 tx_desc->buffer_addr = cpu_to_le64(dma);
2334 }
2335
2336 if (likely(!data_len))
2337 break;
2338
2339 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2340 size, td_tag);
2341
2342 tx_desc++;
2343 i++;
2344
2345 if (i == tx_ring->count) {
2346 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2347 i = 0;
2348 }
2349
2350 size = skb_frag_size(frag);
2351 data_len -= size;
2352
2353 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2354 DMA_TO_DEVICE);
2355
2356 tx_bi = &tx_ring->tx_bi[i];
2357 }
2358
2359 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2360
2361 i++;
2362 if (i == tx_ring->count)
2363 i = 0;
2364
2365 tx_ring->next_to_use = i;
2366
2367 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2368
2369 /* write last descriptor with RS and EOP bits */
2370 td_cmd |= IAVF_TXD_CMD;
2371 tx_desc->cmd_type_offset_bsz =
2372 build_ctob(td_cmd, td_offset, size, td_tag);
2373
2374 skb_tx_timestamp(skb);
2375
2376 /* Force memory writes to complete before letting h/w know there
2377 * are new descriptors to fetch.
2378 *
2379 * We also use this memory barrier to make certain all of the
2380 * status bits have been updated before next_to_watch is written.
2381 */
2382 wmb();
2383
2384 /* set next_to_watch value indicating a packet is present */
2385 first->next_to_watch = tx_desc;
2386
2387 /* notify HW of packet */
2388 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2389 writel(i, tx_ring->tail);
2390 }
2391
2392 return;
2393
2394 dma_error:
2395 dev_info(tx_ring->dev, "TX DMA map failed\n");
2396
2397 /* clear dma mappings for failed tx_bi map */
2398 for (;;) {
2399 tx_bi = &tx_ring->tx_bi[i];
2400 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2401 if (tx_bi == first)
2402 break;
2403 if (i == 0)
2404 i = tx_ring->count;
2405 i--;
2406 }
2407
2408 tx_ring->next_to_use = i;
2409 }
2410
2411 /**
2412 * iavf_xmit_frame_ring - Sends buffer on Tx ring
2413 * @skb: send buffer
2414 * @tx_ring: ring to send buffer on
2415 *
2416 * Returns NETDEV_TX_OK if sent, else an error code
2417 **/
iavf_xmit_frame_ring(struct sk_buff * skb,struct iavf_ring * tx_ring)2418 static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2419 struct iavf_ring *tx_ring)
2420 {
2421 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2422 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2423 struct iavf_tx_buffer *first;
2424 u32 td_offset = 0;
2425 u32 tx_flags = 0;
2426 __be16 protocol;
2427 u32 td_cmd = 0;
2428 u8 hdr_len = 0;
2429 int tso, count;
2430
2431 /* prefetch the data, we'll need it later */
2432 prefetch(skb->data);
2433
2434 iavf_trace(xmit_frame_ring, skb, tx_ring);
2435
2436 count = iavf_xmit_descriptor_count(skb);
2437 if (iavf_chk_linearize(skb, count)) {
2438 if (__skb_linearize(skb)) {
2439 dev_kfree_skb_any(skb);
2440 return NETDEV_TX_OK;
2441 }
2442 count = iavf_txd_use_count(skb->len);
2443 tx_ring->tx_stats.tx_linearize++;
2444 }
2445
2446 /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
2447 * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
2448 * + 4 desc gap to avoid the cache line where head is,
2449 * + 1 desc for context descriptor,
2450 * otherwise try next time
2451 */
2452 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2453 tx_ring->tx_stats.tx_busy++;
2454 return NETDEV_TX_BUSY;
2455 }
2456
2457 /* record the location of the first descriptor for this packet */
2458 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2459 first->skb = skb;
2460 first->bytecount = skb->len;
2461 first->gso_segs = 1;
2462
2463 /* prepare the xmit flags */
2464 iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
2465 if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2466 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
2467 IAVF_TXD_CTX_QW1_CMD_SHIFT;
2468 cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2469 IAVF_TX_FLAGS_VLAN_SHIFT;
2470 }
2471
2472 /* obtain protocol of skb */
2473 protocol = vlan_get_protocol(skb);
2474
2475 /* setup IPv4/IPv6 offloads */
2476 if (protocol == htons(ETH_P_IP))
2477 tx_flags |= IAVF_TX_FLAGS_IPV4;
2478 else if (protocol == htons(ETH_P_IPV6))
2479 tx_flags |= IAVF_TX_FLAGS_IPV6;
2480
2481 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2482
2483 if (tso < 0)
2484 goto out_drop;
2485 else if (tso)
2486 tx_flags |= IAVF_TX_FLAGS_TSO;
2487
2488 /* Always offload the checksum, since it's in the data descriptor */
2489 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2490 tx_ring, &cd_tunneling);
2491 if (tso < 0)
2492 goto out_drop;
2493
2494 /* always enable CRC insertion offload */
2495 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2496
2497 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2498 cd_tunneling, cd_l2tag2);
2499
2500 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2501 td_cmd, td_offset);
2502
2503 return NETDEV_TX_OK;
2504
2505 out_drop:
2506 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2507 dev_kfree_skb_any(first->skb);
2508 first->skb = NULL;
2509 return NETDEV_TX_OK;
2510 }
2511
2512 /**
2513 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2514 * @skb: send buffer
2515 * @netdev: network interface device structure
2516 *
2517 * Returns NETDEV_TX_OK if sent, else an error code
2518 **/
iavf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)2519 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2520 {
2521 struct iavf_adapter *adapter = netdev_priv(netdev);
2522 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2523
2524 /* hardware can't handle really short frames, hardware padding works
2525 * beyond this point
2526 */
2527 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2528 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2529 return NETDEV_TX_OK;
2530 skb->len = IAVF_MIN_TX_LEN;
2531 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2532 }
2533
2534 return iavf_xmit_frame_ring(skb, tx_ring);
2535 }
2536