1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/filter.h>
14 #include "efx_channels.h"
15 #include "efx.h"
16 #include "efx_common.h"
17 #include "tx_common.h"
18 #include "rx_common.h"
19 #include "nic.h"
20 #include "sriov.h"
21 #include "workarounds.h"
22
23 /* This is the first interrupt mode to try out of:
24 * 0 => MSI-X
25 * 1 => MSI
26 * 2 => legacy
27 */
28 unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX;
29
30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
31 * i.e. the number of CPUs among which we may distribute simultaneous
32 * interrupt handling.
33 *
34 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
35 * The default (0) means to assign an interrupt to each core.
36 */
37 unsigned int efx_siena_rss_cpus;
38
39 static unsigned int irq_adapt_low_thresh = 8000;
40 module_param(irq_adapt_low_thresh, uint, 0644);
41 MODULE_PARM_DESC(irq_adapt_low_thresh,
42 "Threshold score for reducing IRQ moderation");
43
44 static unsigned int irq_adapt_high_thresh = 16000;
45 module_param(irq_adapt_high_thresh, uint, 0644);
46 MODULE_PARM_DESC(irq_adapt_high_thresh,
47 "Threshold score for increasing IRQ moderation");
48
49 static const struct efx_channel_type efx_default_channel_type;
50
51 /*************
52 * INTERRUPTS
53 *************/
54
count_online_cores(struct efx_nic * efx,bool local_node)55 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
56 {
57 cpumask_var_t filter_mask;
58 unsigned int count;
59 int cpu;
60
61 if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
62 netif_warn(efx, probe, efx->net_dev,
63 "RSS disabled due to allocation failure\n");
64 return 1;
65 }
66
67 cpumask_copy(filter_mask, cpu_online_mask);
68 if (local_node)
69 cpumask_and(filter_mask, filter_mask,
70 cpumask_of_pcibus(efx->pci_dev->bus));
71
72 count = 0;
73 for_each_cpu(cpu, filter_mask) {
74 ++count;
75 cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
76 }
77
78 free_cpumask_var(filter_mask);
79
80 return count;
81 }
82
efx_wanted_parallelism(struct efx_nic * efx)83 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
84 {
85 unsigned int count;
86
87 if (efx_siena_rss_cpus) {
88 count = efx_siena_rss_cpus;
89 } else {
90 count = count_online_cores(efx, true);
91
92 /* If no online CPUs in local node, fallback to any online CPUs */
93 if (count == 0)
94 count = count_online_cores(efx, false);
95 }
96
97 if (count > EFX_MAX_RX_QUEUES) {
98 netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus,
99 warn,
100 "Reducing number of rx queues from %u to %u.\n",
101 count, EFX_MAX_RX_QUEUES);
102 count = EFX_MAX_RX_QUEUES;
103 }
104
105 /* If RSS is requested for the PF *and* VFs then we can't write RSS
106 * table entries that are inaccessible to VFs
107 */
108 #ifdef CONFIG_SFC_SIENA_SRIOV
109 if (efx->type->sriov_wanted) {
110 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
111 count > efx_vf_size(efx)) {
112 netif_warn(efx, probe, efx->net_dev,
113 "Reducing number of RSS channels from %u to %u for "
114 "VF support. Increase vf-msix-limit to use more "
115 "channels on the PF.\n",
116 count, efx_vf_size(efx));
117 count = efx_vf_size(efx);
118 }
119 }
120 #endif
121
122 return count;
123 }
124
efx_allocate_msix_channels(struct efx_nic * efx,unsigned int max_channels,unsigned int extra_channels,unsigned int parallelism)125 static int efx_allocate_msix_channels(struct efx_nic *efx,
126 unsigned int max_channels,
127 unsigned int extra_channels,
128 unsigned int parallelism)
129 {
130 unsigned int n_channels = parallelism;
131 int vec_count;
132 int tx_per_ev;
133 int n_xdp_tx;
134 int n_xdp_ev;
135
136 if (efx_siena_separate_tx_channels)
137 n_channels *= 2;
138 n_channels += extra_channels;
139
140 /* To allow XDP transmit to happen from arbitrary NAPI contexts
141 * we allocate a TX queue per CPU. We share event queues across
142 * multiple tx queues, assuming tx and ev queues are both
143 * maximum size.
144 */
145 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
146 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
147 n_xdp_tx = num_possible_cpus();
148 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
149
150 vec_count = pci_msix_vec_count(efx->pci_dev);
151 if (vec_count < 0)
152 return vec_count;
153
154 max_channels = min_t(unsigned int, vec_count, max_channels);
155
156 /* Check resources.
157 * We need a channel per event queue, plus a VI per tx queue.
158 * This may be more pessimistic than it needs to be.
159 */
160 if (n_channels >= max_channels) {
161 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
162 netif_warn(efx, drv, efx->net_dev,
163 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
164 n_xdp_ev, n_channels, max_channels);
165 netif_warn(efx, drv, efx->net_dev,
166 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
167 } else if (n_channels + n_xdp_tx > efx->max_vis) {
168 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
169 netif_warn(efx, drv, efx->net_dev,
170 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
171 n_xdp_tx, n_channels, efx->max_vis);
172 netif_warn(efx, drv, efx->net_dev,
173 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
174 } else if (n_channels + n_xdp_ev > max_channels) {
175 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
176 netif_warn(efx, drv, efx->net_dev,
177 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
178 n_xdp_ev, n_channels, max_channels);
179
180 n_xdp_ev = max_channels - n_channels;
181 netif_warn(efx, drv, efx->net_dev,
182 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
183 DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
184 } else {
185 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
186 }
187
188 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
189 efx->n_xdp_channels = n_xdp_ev;
190 efx->xdp_tx_per_channel = tx_per_ev;
191 efx->xdp_tx_queue_count = n_xdp_tx;
192 n_channels += n_xdp_ev;
193 netif_dbg(efx, drv, efx->net_dev,
194 "Allocating %d TX and %d event queues for XDP\n",
195 n_xdp_ev * tx_per_ev, n_xdp_ev);
196 } else {
197 efx->n_xdp_channels = 0;
198 efx->xdp_tx_per_channel = 0;
199 efx->xdp_tx_queue_count = n_xdp_tx;
200 }
201
202 if (vec_count < n_channels) {
203 netif_err(efx, drv, efx->net_dev,
204 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
205 vec_count, n_channels);
206 netif_err(efx, drv, efx->net_dev,
207 "WARNING: Performance may be reduced.\n");
208 n_channels = vec_count;
209 }
210
211 n_channels = min(n_channels, max_channels);
212
213 efx->n_channels = n_channels;
214
215 /* Ignore XDP tx channels when creating rx channels. */
216 n_channels -= efx->n_xdp_channels;
217
218 if (efx_siena_separate_tx_channels) {
219 efx->n_tx_channels =
220 min(max(n_channels / 2, 1U),
221 efx->max_tx_channels);
222 efx->tx_channel_offset =
223 n_channels - efx->n_tx_channels;
224 efx->n_rx_channels =
225 max(n_channels -
226 efx->n_tx_channels, 1U);
227 } else {
228 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
229 efx->tx_channel_offset = 0;
230 efx->n_rx_channels = n_channels;
231 }
232
233 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
234 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
235
236 efx->xdp_channel_offset = n_channels;
237
238 netif_dbg(efx, drv, efx->net_dev,
239 "Allocating %u RX channels\n",
240 efx->n_rx_channels);
241
242 return efx->n_channels;
243 }
244
245 /* Probe the number and type of interrupts we are able to obtain, and
246 * the resulting numbers of channels and RX queues.
247 */
efx_siena_probe_interrupts(struct efx_nic * efx)248 int efx_siena_probe_interrupts(struct efx_nic *efx)
249 {
250 unsigned int extra_channels = 0;
251 unsigned int rss_spread;
252 unsigned int i, j;
253 int rc;
254
255 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
256 if (efx->extra_channel_type[i])
257 ++extra_channels;
258
259 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
260 unsigned int parallelism = efx_wanted_parallelism(efx);
261 struct msix_entry xentries[EFX_MAX_CHANNELS];
262 unsigned int n_channels;
263
264 rc = efx_allocate_msix_channels(efx, efx->max_channels,
265 extra_channels, parallelism);
266 if (rc >= 0) {
267 n_channels = rc;
268 for (i = 0; i < n_channels; i++)
269 xentries[i].entry = i;
270 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
271 n_channels);
272 }
273 if (rc < 0) {
274 /* Fall back to single channel MSI */
275 netif_err(efx, drv, efx->net_dev,
276 "could not enable MSI-X\n");
277 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
278 efx->interrupt_mode = EFX_INT_MODE_MSI;
279 else
280 return rc;
281 } else if (rc < n_channels) {
282 netif_err(efx, drv, efx->net_dev,
283 "WARNING: Insufficient MSI-X vectors"
284 " available (%d < %u).\n", rc, n_channels);
285 netif_err(efx, drv, efx->net_dev,
286 "WARNING: Performance may be reduced.\n");
287 n_channels = rc;
288 }
289
290 if (rc > 0) {
291 for (i = 0; i < efx->n_channels; i++)
292 efx_get_channel(efx, i)->irq =
293 xentries[i].vector;
294 }
295 }
296
297 /* Try single interrupt MSI */
298 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
299 efx->n_channels = 1;
300 efx->n_rx_channels = 1;
301 efx->n_tx_channels = 1;
302 efx->tx_channel_offset = 0;
303 efx->n_xdp_channels = 0;
304 efx->xdp_channel_offset = efx->n_channels;
305 rc = pci_enable_msi(efx->pci_dev);
306 if (rc == 0) {
307 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
308 } else {
309 netif_err(efx, drv, efx->net_dev,
310 "could not enable MSI\n");
311 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
312 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
313 else
314 return rc;
315 }
316 }
317
318 /* Assume legacy interrupts */
319 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
320 efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
321 efx->n_rx_channels = 1;
322 efx->n_tx_channels = 1;
323 efx->tx_channel_offset = 1;
324 efx->n_xdp_channels = 0;
325 efx->xdp_channel_offset = efx->n_channels;
326 efx->legacy_irq = efx->pci_dev->irq;
327 }
328
329 /* Assign extra channels if possible, before XDP channels */
330 efx->n_extra_tx_channels = 0;
331 j = efx->xdp_channel_offset;
332 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
333 if (!efx->extra_channel_type[i])
334 continue;
335 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
336 efx->extra_channel_type[i]->handle_no_channel(efx);
337 } else {
338 --j;
339 efx_get_channel(efx, j)->type =
340 efx->extra_channel_type[i];
341 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
342 efx->n_extra_tx_channels++;
343 }
344 }
345
346 rss_spread = efx->n_rx_channels;
347 /* RSS might be usable on VFs even if it is disabled on the PF */
348 #ifdef CONFIG_SFC_SIENA_SRIOV
349 if (efx->type->sriov_wanted) {
350 efx->rss_spread = ((rss_spread > 1 ||
351 !efx->type->sriov_wanted(efx)) ?
352 rss_spread : efx_vf_size(efx));
353 return 0;
354 }
355 #endif
356 efx->rss_spread = rss_spread;
357
358 return 0;
359 }
360
361 #if defined(CONFIG_SMP)
efx_siena_set_interrupt_affinity(struct efx_nic * efx)362 void efx_siena_set_interrupt_affinity(struct efx_nic *efx)
363 {
364 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
365 struct efx_channel *channel;
366 unsigned int cpu;
367
368 /* If no online CPUs in local node, fallback to any online CPU */
369 if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
370 numa_mask = cpu_online_mask;
371
372 cpu = -1;
373 efx_for_each_channel(channel, efx) {
374 cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
375 if (cpu >= nr_cpu_ids)
376 cpu = cpumask_first_and(cpu_online_mask, numa_mask);
377 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
378 }
379 }
380
efx_siena_clear_interrupt_affinity(struct efx_nic * efx)381 void efx_siena_clear_interrupt_affinity(struct efx_nic *efx)
382 {
383 struct efx_channel *channel;
384
385 efx_for_each_channel(channel, efx)
386 irq_set_affinity_hint(channel->irq, NULL);
387 }
388 #else
389 void
efx_siena_set_interrupt_affinity(struct efx_nic * efx __always_unused)390 efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused)
391 {
392 }
393
394 void
efx_siena_clear_interrupt_affinity(struct efx_nic * efx __always_unused)395 efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused)
396 {
397 }
398 #endif /* CONFIG_SMP */
399
efx_siena_remove_interrupts(struct efx_nic * efx)400 void efx_siena_remove_interrupts(struct efx_nic *efx)
401 {
402 struct efx_channel *channel;
403
404 /* Remove MSI/MSI-X interrupts */
405 efx_for_each_channel(channel, efx)
406 channel->irq = 0;
407 pci_disable_msi(efx->pci_dev);
408 pci_disable_msix(efx->pci_dev);
409
410 /* Remove legacy interrupt */
411 efx->legacy_irq = 0;
412 }
413
414 /***************
415 * EVENT QUEUES
416 ***************/
417
418 /* Create event queue
419 * Event queue memory allocations are done only once. If the channel
420 * is reset, the memory buffer will be reused; this guards against
421 * errors during channel reset and also simplifies interrupt handling.
422 */
efx_probe_eventq(struct efx_channel * channel)423 static int efx_probe_eventq(struct efx_channel *channel)
424 {
425 struct efx_nic *efx = channel->efx;
426 unsigned long entries;
427
428 netif_dbg(efx, probe, efx->net_dev,
429 "chan %d create event queue\n", channel->channel);
430
431 /* Build an event queue with room for one event per tx and rx buffer,
432 * plus some extra for link state events and MCDI completions.
433 */
434 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
435 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
436 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
437
438 return efx_nic_probe_eventq(channel);
439 }
440
441 /* Prepare channel's event queue */
efx_init_eventq(struct efx_channel * channel)442 static int efx_init_eventq(struct efx_channel *channel)
443 {
444 struct efx_nic *efx = channel->efx;
445 int rc;
446
447 EFX_WARN_ON_PARANOID(channel->eventq_init);
448
449 netif_dbg(efx, drv, efx->net_dev,
450 "chan %d init event queue\n", channel->channel);
451
452 rc = efx_nic_init_eventq(channel);
453 if (rc == 0) {
454 efx->type->push_irq_moderation(channel);
455 channel->eventq_read_ptr = 0;
456 channel->eventq_init = true;
457 }
458 return rc;
459 }
460
461 /* Enable event queue processing and NAPI */
efx_siena_start_eventq(struct efx_channel * channel)462 void efx_siena_start_eventq(struct efx_channel *channel)
463 {
464 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
465 "chan %d start event queue\n", channel->channel);
466
467 /* Make sure the NAPI handler sees the enabled flag set */
468 channel->enabled = true;
469 smp_wmb();
470
471 napi_enable(&channel->napi_str);
472 efx_nic_eventq_read_ack(channel);
473 }
474
475 /* Disable event queue processing and NAPI */
efx_siena_stop_eventq(struct efx_channel * channel)476 void efx_siena_stop_eventq(struct efx_channel *channel)
477 {
478 if (!channel->enabled)
479 return;
480
481 napi_disable(&channel->napi_str);
482 channel->enabled = false;
483 }
484
efx_fini_eventq(struct efx_channel * channel)485 static void efx_fini_eventq(struct efx_channel *channel)
486 {
487 if (!channel->eventq_init)
488 return;
489
490 netif_dbg(channel->efx, drv, channel->efx->net_dev,
491 "chan %d fini event queue\n", channel->channel);
492
493 efx_nic_fini_eventq(channel);
494 channel->eventq_init = false;
495 }
496
efx_remove_eventq(struct efx_channel * channel)497 static void efx_remove_eventq(struct efx_channel *channel)
498 {
499 netif_dbg(channel->efx, drv, channel->efx->net_dev,
500 "chan %d remove event queue\n", channel->channel);
501
502 efx_nic_remove_eventq(channel);
503 }
504
505 /**************************************************************************
506 *
507 * Channel handling
508 *
509 *************************************************************************/
510
511 #ifdef CONFIG_RFS_ACCEL
efx_filter_rfs_expire(struct work_struct * data)512 static void efx_filter_rfs_expire(struct work_struct *data)
513 {
514 struct delayed_work *dwork = to_delayed_work(data);
515 struct efx_channel *channel;
516 unsigned int time, quota;
517
518 channel = container_of(dwork, struct efx_channel, filter_work);
519 time = jiffies - channel->rfs_last_expiry;
520 quota = channel->rfs_filter_count * time / (30 * HZ);
521 if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
522 min(channel->rfs_filter_count, quota)))
523 channel->rfs_last_expiry += time;
524 /* Ensure we do more work eventually even if NAPI poll is not happening */
525 schedule_delayed_work(dwork, 30 * HZ);
526 }
527 #endif
528
529 /* Allocate and initialise a channel structure. */
efx_alloc_channel(struct efx_nic * efx,int i)530 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
531 {
532 struct efx_rx_queue *rx_queue;
533 struct efx_tx_queue *tx_queue;
534 struct efx_channel *channel;
535 int j;
536
537 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
538 if (!channel)
539 return NULL;
540
541 channel->efx = efx;
542 channel->channel = i;
543 channel->type = &efx_default_channel_type;
544
545 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
546 tx_queue = &channel->tx_queue[j];
547 tx_queue->efx = efx;
548 tx_queue->queue = -1;
549 tx_queue->label = j;
550 tx_queue->channel = channel;
551 }
552
553 #ifdef CONFIG_RFS_ACCEL
554 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
555 #endif
556
557 rx_queue = &channel->rx_queue;
558 rx_queue->efx = efx;
559 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
560
561 return channel;
562 }
563
efx_siena_init_channels(struct efx_nic * efx)564 int efx_siena_init_channels(struct efx_nic *efx)
565 {
566 unsigned int i;
567
568 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
569 efx->channel[i] = efx_alloc_channel(efx, i);
570 if (!efx->channel[i])
571 return -ENOMEM;
572 efx->msi_context[i].efx = efx;
573 efx->msi_context[i].index = i;
574 }
575
576 /* Higher numbered interrupt modes are less capable! */
577 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
578 efx_siena_interrupt_mode);
579
580 efx->max_channels = EFX_MAX_CHANNELS;
581 efx->max_tx_channels = EFX_MAX_CHANNELS;
582
583 return 0;
584 }
585
efx_siena_fini_channels(struct efx_nic * efx)586 void efx_siena_fini_channels(struct efx_nic *efx)
587 {
588 unsigned int i;
589
590 for (i = 0; i < EFX_MAX_CHANNELS; i++)
591 if (efx->channel[i]) {
592 kfree(efx->channel[i]);
593 efx->channel[i] = NULL;
594 }
595 }
596
597 /* Allocate and initialise a channel structure, copying parameters
598 * (but not resources) from an old channel structure.
599 */
600 static
efx_copy_channel(const struct efx_channel * old_channel)601 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
602 {
603 struct efx_rx_queue *rx_queue;
604 struct efx_tx_queue *tx_queue;
605 struct efx_channel *channel;
606 int j;
607
608 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
609 if (!channel)
610 return NULL;
611
612 *channel = *old_channel;
613
614 channel->napi_dev = NULL;
615 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
616 channel->napi_str.napi_id = 0;
617 channel->napi_str.state = 0;
618 memset(&channel->eventq, 0, sizeof(channel->eventq));
619
620 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
621 tx_queue = &channel->tx_queue[j];
622 if (tx_queue->channel)
623 tx_queue->channel = channel;
624 tx_queue->buffer = NULL;
625 tx_queue->cb_page = NULL;
626 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
627 }
628
629 rx_queue = &channel->rx_queue;
630 rx_queue->buffer = NULL;
631 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
632 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
633 #ifdef CONFIG_RFS_ACCEL
634 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
635 #endif
636
637 return channel;
638 }
639
efx_probe_channel(struct efx_channel * channel)640 static int efx_probe_channel(struct efx_channel *channel)
641 {
642 struct efx_tx_queue *tx_queue;
643 struct efx_rx_queue *rx_queue;
644 int rc;
645
646 netif_dbg(channel->efx, probe, channel->efx->net_dev,
647 "creating channel %d\n", channel->channel);
648
649 rc = channel->type->pre_probe(channel);
650 if (rc)
651 goto fail;
652
653 rc = efx_probe_eventq(channel);
654 if (rc)
655 goto fail;
656
657 efx_for_each_channel_tx_queue(tx_queue, channel) {
658 rc = efx_siena_probe_tx_queue(tx_queue);
659 if (rc)
660 goto fail;
661 }
662
663 efx_for_each_channel_rx_queue(rx_queue, channel) {
664 rc = efx_siena_probe_rx_queue(rx_queue);
665 if (rc)
666 goto fail;
667 }
668
669 channel->rx_list = NULL;
670
671 return 0;
672
673 fail:
674 efx_siena_remove_channel(channel);
675 return rc;
676 }
677
efx_get_channel_name(struct efx_channel * channel,char * buf,size_t len)678 static void efx_get_channel_name(struct efx_channel *channel, char *buf,
679 size_t len)
680 {
681 struct efx_nic *efx = channel->efx;
682 const char *type;
683 int number;
684
685 number = channel->channel;
686
687 if (number >= efx->xdp_channel_offset &&
688 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
689 type = "-xdp";
690 number -= efx->xdp_channel_offset;
691 } else if (efx->tx_channel_offset == 0) {
692 type = "";
693 } else if (number < efx->tx_channel_offset) {
694 type = "-rx";
695 } else {
696 type = "-tx";
697 number -= efx->tx_channel_offset;
698 }
699 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
700 }
701
efx_siena_set_channel_names(struct efx_nic * efx)702 void efx_siena_set_channel_names(struct efx_nic *efx)
703 {
704 struct efx_channel *channel;
705
706 efx_for_each_channel(channel, efx)
707 channel->type->get_name(channel,
708 efx->msi_context[channel->channel].name,
709 sizeof(efx->msi_context[0].name));
710 }
711
efx_siena_probe_channels(struct efx_nic * efx)712 int efx_siena_probe_channels(struct efx_nic *efx)
713 {
714 struct efx_channel *channel;
715 int rc;
716
717 /* Restart special buffer allocation */
718 efx->next_buffer_table = 0;
719
720 /* Probe channels in reverse, so that any 'extra' channels
721 * use the start of the buffer table. This allows the traffic
722 * channels to be resized without moving them or wasting the
723 * entries before them.
724 */
725 efx_for_each_channel_rev(channel, efx) {
726 rc = efx_probe_channel(channel);
727 if (rc) {
728 netif_err(efx, probe, efx->net_dev,
729 "failed to create channel %d\n",
730 channel->channel);
731 goto fail;
732 }
733 }
734 efx_siena_set_channel_names(efx);
735
736 return 0;
737
738 fail:
739 efx_siena_remove_channels(efx);
740 return rc;
741 }
742
efx_siena_remove_channel(struct efx_channel * channel)743 void efx_siena_remove_channel(struct efx_channel *channel)
744 {
745 struct efx_tx_queue *tx_queue;
746 struct efx_rx_queue *rx_queue;
747
748 netif_dbg(channel->efx, drv, channel->efx->net_dev,
749 "destroy chan %d\n", channel->channel);
750
751 efx_for_each_channel_rx_queue(rx_queue, channel)
752 efx_siena_remove_rx_queue(rx_queue);
753 efx_for_each_channel_tx_queue(tx_queue, channel)
754 efx_siena_remove_tx_queue(tx_queue);
755 efx_remove_eventq(channel);
756 channel->type->post_remove(channel);
757 }
758
efx_siena_remove_channels(struct efx_nic * efx)759 void efx_siena_remove_channels(struct efx_nic *efx)
760 {
761 struct efx_channel *channel;
762
763 efx_for_each_channel(channel, efx)
764 efx_siena_remove_channel(channel);
765
766 kfree(efx->xdp_tx_queues);
767 }
768
efx_set_xdp_tx_queue(struct efx_nic * efx,int xdp_queue_number,struct efx_tx_queue * tx_queue)769 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
770 struct efx_tx_queue *tx_queue)
771 {
772 if (xdp_queue_number >= efx->xdp_tx_queue_count)
773 return -EINVAL;
774
775 netif_dbg(efx, drv, efx->net_dev,
776 "Channel %u TXQ %u is XDP %u, HW %u\n",
777 tx_queue->channel->channel, tx_queue->label,
778 xdp_queue_number, tx_queue->queue);
779 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
780 return 0;
781 }
782
efx_set_xdp_channels(struct efx_nic * efx)783 static void efx_set_xdp_channels(struct efx_nic *efx)
784 {
785 struct efx_tx_queue *tx_queue;
786 struct efx_channel *channel;
787 unsigned int next_queue = 0;
788 int xdp_queue_number = 0;
789 int rc;
790
791 /* We need to mark which channels really have RX and TX
792 * queues, and adjust the TX queue numbers if we have separate
793 * RX-only and TX-only channels.
794 */
795 efx_for_each_channel(channel, efx) {
796 if (channel->channel < efx->tx_channel_offset)
797 continue;
798
799 if (efx_channel_is_xdp_tx(channel)) {
800 efx_for_each_channel_tx_queue(tx_queue, channel) {
801 tx_queue->queue = next_queue++;
802 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
803 tx_queue);
804 if (rc == 0)
805 xdp_queue_number++;
806 }
807 } else {
808 efx_for_each_channel_tx_queue(tx_queue, channel) {
809 tx_queue->queue = next_queue++;
810 netif_dbg(efx, drv, efx->net_dev,
811 "Channel %u TXQ %u is HW %u\n",
812 channel->channel, tx_queue->label,
813 tx_queue->queue);
814 }
815
816 /* If XDP is borrowing queues from net stack, it must
817 * use the queue with no csum offload, which is the
818 * first one of the channel
819 * (note: tx_queue_by_type is not initialized yet)
820 */
821 if (efx->xdp_txq_queues_mode ==
822 EFX_XDP_TX_QUEUES_BORROWED) {
823 tx_queue = &channel->tx_queue[0];
824 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
825 tx_queue);
826 if (rc == 0)
827 xdp_queue_number++;
828 }
829 }
830 }
831 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
832 xdp_queue_number != efx->xdp_tx_queue_count);
833 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
834 xdp_queue_number > efx->xdp_tx_queue_count);
835
836 /* If we have more CPUs than assigned XDP TX queues, assign the already
837 * existing queues to the exceeding CPUs
838 */
839 next_queue = 0;
840 while (xdp_queue_number < efx->xdp_tx_queue_count) {
841 tx_queue = efx->xdp_tx_queues[next_queue++];
842 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
843 if (rc == 0)
844 xdp_queue_number++;
845 }
846 }
847
848 static int efx_soft_enable_interrupts(struct efx_nic *efx);
849 static void efx_soft_disable_interrupts(struct efx_nic *efx);
850 static void efx_init_napi_channel(struct efx_channel *channel);
851 static void efx_fini_napi_channel(struct efx_channel *channel);
852
efx_siena_realloc_channels(struct efx_nic * efx,u32 rxq_entries,u32 txq_entries)853 int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
854 u32 txq_entries)
855 {
856 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
857 unsigned int i, next_buffer_table = 0;
858 u32 old_rxq_entries, old_txq_entries;
859 int rc, rc2;
860
861 rc = efx_check_disabled(efx);
862 if (rc)
863 return rc;
864
865 /* Not all channels should be reallocated. We must avoid
866 * reallocating their buffer table entries.
867 */
868 efx_for_each_channel(channel, efx) {
869 struct efx_rx_queue *rx_queue;
870 struct efx_tx_queue *tx_queue;
871
872 if (channel->type->copy)
873 continue;
874 next_buffer_table = max(next_buffer_table,
875 channel->eventq.index +
876 channel->eventq.entries);
877 efx_for_each_channel_rx_queue(rx_queue, channel)
878 next_buffer_table = max(next_buffer_table,
879 rx_queue->rxd.index +
880 rx_queue->rxd.entries);
881 efx_for_each_channel_tx_queue(tx_queue, channel)
882 next_buffer_table = max(next_buffer_table,
883 tx_queue->txd.index +
884 tx_queue->txd.entries);
885 }
886
887 efx_device_detach_sync(efx);
888 efx_siena_stop_all(efx);
889 efx_soft_disable_interrupts(efx);
890
891 /* Clone channels (where possible) */
892 memset(other_channel, 0, sizeof(other_channel));
893 for (i = 0; i < efx->n_channels; i++) {
894 channel = efx->channel[i];
895 if (channel->type->copy)
896 channel = channel->type->copy(channel);
897 if (!channel) {
898 rc = -ENOMEM;
899 goto out;
900 }
901 other_channel[i] = channel;
902 }
903
904 /* Swap entry counts and channel pointers */
905 old_rxq_entries = efx->rxq_entries;
906 old_txq_entries = efx->txq_entries;
907 efx->rxq_entries = rxq_entries;
908 efx->txq_entries = txq_entries;
909 for (i = 0; i < efx->n_channels; i++)
910 swap(efx->channel[i], other_channel[i]);
911
912 /* Restart buffer table allocation */
913 efx->next_buffer_table = next_buffer_table;
914
915 for (i = 0; i < efx->n_channels; i++) {
916 channel = efx->channel[i];
917 if (!channel->type->copy)
918 continue;
919 rc = efx_probe_channel(channel);
920 if (rc)
921 goto rollback;
922 efx_init_napi_channel(efx->channel[i]);
923 }
924
925 efx_set_xdp_channels(efx);
926 out:
927 /* Destroy unused channel structures */
928 for (i = 0; i < efx->n_channels; i++) {
929 channel = other_channel[i];
930 if (channel && channel->type->copy) {
931 efx_fini_napi_channel(channel);
932 efx_siena_remove_channel(channel);
933 kfree(channel);
934 }
935 }
936
937 rc2 = efx_soft_enable_interrupts(efx);
938 if (rc2) {
939 rc = rc ? rc : rc2;
940 netif_err(efx, drv, efx->net_dev,
941 "unable to restart interrupts on channel reallocation\n");
942 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
943 } else {
944 efx_siena_start_all(efx);
945 efx_device_attach_if_not_resetting(efx);
946 }
947 return rc;
948
949 rollback:
950 /* Swap back */
951 efx->rxq_entries = old_rxq_entries;
952 efx->txq_entries = old_txq_entries;
953 for (i = 0; i < efx->n_channels; i++)
954 swap(efx->channel[i], other_channel[i]);
955 goto out;
956 }
957
efx_siena_set_channels(struct efx_nic * efx)958 int efx_siena_set_channels(struct efx_nic *efx)
959 {
960 struct efx_channel *channel;
961 int rc;
962
963 if (efx->xdp_tx_queue_count) {
964 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
965
966 /* Allocate array for XDP TX queue lookup. */
967 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
968 sizeof(*efx->xdp_tx_queues),
969 GFP_KERNEL);
970 if (!efx->xdp_tx_queues)
971 return -ENOMEM;
972 }
973
974 efx_for_each_channel(channel, efx) {
975 if (channel->channel < efx->n_rx_channels)
976 channel->rx_queue.core_index = channel->channel;
977 else
978 channel->rx_queue.core_index = -1;
979 }
980
981 efx_set_xdp_channels(efx);
982
983 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
984 if (rc)
985 return rc;
986 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
987 }
988
efx_default_channel_want_txqs(struct efx_channel * channel)989 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
990 {
991 return channel->channel - channel->efx->tx_channel_offset <
992 channel->efx->n_tx_channels;
993 }
994
995 /*************
996 * START/STOP
997 *************/
998
efx_soft_enable_interrupts(struct efx_nic * efx)999 static int efx_soft_enable_interrupts(struct efx_nic *efx)
1000 {
1001 struct efx_channel *channel, *end_channel;
1002 int rc;
1003
1004 BUG_ON(efx->state == STATE_DISABLED);
1005
1006 efx->irq_soft_enabled = true;
1007 smp_wmb();
1008
1009 efx_for_each_channel(channel, efx) {
1010 if (!channel->type->keep_eventq) {
1011 rc = efx_init_eventq(channel);
1012 if (rc)
1013 goto fail;
1014 }
1015 efx_siena_start_eventq(channel);
1016 }
1017
1018 efx_siena_mcdi_mode_event(efx);
1019
1020 return 0;
1021 fail:
1022 end_channel = channel;
1023 efx_for_each_channel(channel, efx) {
1024 if (channel == end_channel)
1025 break;
1026 efx_siena_stop_eventq(channel);
1027 if (!channel->type->keep_eventq)
1028 efx_fini_eventq(channel);
1029 }
1030
1031 return rc;
1032 }
1033
efx_soft_disable_interrupts(struct efx_nic * efx)1034 static void efx_soft_disable_interrupts(struct efx_nic *efx)
1035 {
1036 struct efx_channel *channel;
1037
1038 if (efx->state == STATE_DISABLED)
1039 return;
1040
1041 efx_siena_mcdi_mode_poll(efx);
1042
1043 efx->irq_soft_enabled = false;
1044 smp_wmb();
1045
1046 if (efx->legacy_irq)
1047 synchronize_irq(efx->legacy_irq);
1048
1049 efx_for_each_channel(channel, efx) {
1050 if (channel->irq)
1051 synchronize_irq(channel->irq);
1052
1053 efx_siena_stop_eventq(channel);
1054 if (!channel->type->keep_eventq)
1055 efx_fini_eventq(channel);
1056 }
1057
1058 /* Flush the asynchronous MCDI request queue */
1059 efx_siena_mcdi_flush_async(efx);
1060 }
1061
efx_siena_enable_interrupts(struct efx_nic * efx)1062 int efx_siena_enable_interrupts(struct efx_nic *efx)
1063 {
1064 struct efx_channel *channel, *end_channel;
1065 int rc;
1066
1067 /* TODO: Is this really a bug? */
1068 BUG_ON(efx->state == STATE_DISABLED);
1069
1070 if (efx->eeh_disabled_legacy_irq) {
1071 enable_irq(efx->legacy_irq);
1072 efx->eeh_disabled_legacy_irq = false;
1073 }
1074
1075 efx->type->irq_enable_master(efx);
1076
1077 efx_for_each_channel(channel, efx) {
1078 if (channel->type->keep_eventq) {
1079 rc = efx_init_eventq(channel);
1080 if (rc)
1081 goto fail;
1082 }
1083 }
1084
1085 rc = efx_soft_enable_interrupts(efx);
1086 if (rc)
1087 goto fail;
1088
1089 return 0;
1090
1091 fail:
1092 end_channel = channel;
1093 efx_for_each_channel(channel, efx) {
1094 if (channel == end_channel)
1095 break;
1096 if (channel->type->keep_eventq)
1097 efx_fini_eventq(channel);
1098 }
1099
1100 efx->type->irq_disable_non_ev(efx);
1101
1102 return rc;
1103 }
1104
efx_siena_disable_interrupts(struct efx_nic * efx)1105 void efx_siena_disable_interrupts(struct efx_nic *efx)
1106 {
1107 struct efx_channel *channel;
1108
1109 efx_soft_disable_interrupts(efx);
1110
1111 efx_for_each_channel(channel, efx) {
1112 if (channel->type->keep_eventq)
1113 efx_fini_eventq(channel);
1114 }
1115
1116 efx->type->irq_disable_non_ev(efx);
1117 }
1118
efx_siena_start_channels(struct efx_nic * efx)1119 void efx_siena_start_channels(struct efx_nic *efx)
1120 {
1121 struct efx_tx_queue *tx_queue;
1122 struct efx_rx_queue *rx_queue;
1123 struct efx_channel *channel;
1124
1125 efx_for_each_channel_rev(channel, efx) {
1126 efx_for_each_channel_tx_queue(tx_queue, channel) {
1127 efx_siena_init_tx_queue(tx_queue);
1128 atomic_inc(&efx->active_queues);
1129 }
1130
1131 efx_for_each_channel_rx_queue(rx_queue, channel) {
1132 efx_siena_init_rx_queue(rx_queue);
1133 atomic_inc(&efx->active_queues);
1134 efx_siena_stop_eventq(channel);
1135 efx_siena_fast_push_rx_descriptors(rx_queue, false);
1136 efx_siena_start_eventq(channel);
1137 }
1138
1139 WARN_ON(channel->rx_pkt_n_frags);
1140 }
1141 }
1142
efx_siena_stop_channels(struct efx_nic * efx)1143 void efx_siena_stop_channels(struct efx_nic *efx)
1144 {
1145 struct efx_tx_queue *tx_queue;
1146 struct efx_rx_queue *rx_queue;
1147 struct efx_channel *channel;
1148 int rc = 0;
1149
1150 /* Stop RX refill */
1151 efx_for_each_channel(channel, efx) {
1152 efx_for_each_channel_rx_queue(rx_queue, channel)
1153 rx_queue->refill_enabled = false;
1154 }
1155
1156 efx_for_each_channel(channel, efx) {
1157 /* RX packet processing is pipelined, so wait for the
1158 * NAPI handler to complete. At least event queue 0
1159 * might be kept active by non-data events, so don't
1160 * use napi_synchronize() but actually disable NAPI
1161 * temporarily.
1162 */
1163 if (efx_channel_has_rx_queue(channel)) {
1164 efx_siena_stop_eventq(channel);
1165 efx_siena_start_eventq(channel);
1166 }
1167 }
1168
1169 if (efx->type->fini_dmaq)
1170 rc = efx->type->fini_dmaq(efx);
1171
1172 if (rc) {
1173 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1174 } else {
1175 netif_dbg(efx, drv, efx->net_dev,
1176 "successfully flushed all queues\n");
1177 }
1178
1179 efx_for_each_channel(channel, efx) {
1180 efx_for_each_channel_rx_queue(rx_queue, channel)
1181 efx_siena_fini_rx_queue(rx_queue);
1182 efx_for_each_channel_tx_queue(tx_queue, channel)
1183 efx_siena_fini_tx_queue(tx_queue);
1184 }
1185 }
1186
1187 /**************************************************************************
1188 *
1189 * NAPI interface
1190 *
1191 *************************************************************************/
1192
1193 /* Process channel's event queue
1194 *
1195 * This function is responsible for processing the event queue of a
1196 * single channel. The caller must guarantee that this function will
1197 * never be concurrently called more than once on the same channel,
1198 * though different channels may be being processed concurrently.
1199 */
efx_process_channel(struct efx_channel * channel,int budget)1200 static int efx_process_channel(struct efx_channel *channel, int budget)
1201 {
1202 struct efx_tx_queue *tx_queue;
1203 struct list_head rx_list;
1204 int spent;
1205
1206 if (unlikely(!channel->enabled))
1207 return 0;
1208
1209 /* Prepare the batch receive list */
1210 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1211 INIT_LIST_HEAD(&rx_list);
1212 channel->rx_list = &rx_list;
1213
1214 efx_for_each_channel_tx_queue(tx_queue, channel) {
1215 tx_queue->pkts_compl = 0;
1216 tx_queue->bytes_compl = 0;
1217 }
1218
1219 spent = efx_nic_process_eventq(channel, budget);
1220 if (spent && efx_channel_has_rx_queue(channel)) {
1221 struct efx_rx_queue *rx_queue =
1222 efx_channel_get_rx_queue(channel);
1223
1224 efx_rx_flush_packet(channel);
1225 efx_siena_fast_push_rx_descriptors(rx_queue, true);
1226 }
1227
1228 /* Update BQL */
1229 efx_for_each_channel_tx_queue(tx_queue, channel) {
1230 if (tx_queue->bytes_compl) {
1231 netdev_tx_completed_queue(tx_queue->core_txq,
1232 tx_queue->pkts_compl,
1233 tx_queue->bytes_compl);
1234 }
1235 }
1236
1237 /* Receive any packets we queued up */
1238 netif_receive_skb_list(channel->rx_list);
1239 channel->rx_list = NULL;
1240
1241 return spent;
1242 }
1243
efx_update_irq_mod(struct efx_nic * efx,struct efx_channel * channel)1244 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1245 {
1246 int step = efx->irq_mod_step_us;
1247
1248 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1249 if (channel->irq_moderation_us > step) {
1250 channel->irq_moderation_us -= step;
1251 efx->type->push_irq_moderation(channel);
1252 }
1253 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1254 if (channel->irq_moderation_us <
1255 efx->irq_rx_moderation_us) {
1256 channel->irq_moderation_us += step;
1257 efx->type->push_irq_moderation(channel);
1258 }
1259 }
1260
1261 channel->irq_count = 0;
1262 channel->irq_mod_score = 0;
1263 }
1264
1265 /* NAPI poll handler
1266 *
1267 * NAPI guarantees serialisation of polls of the same device, which
1268 * provides the guarantee required by efx_process_channel().
1269 */
efx_poll(struct napi_struct * napi,int budget)1270 static int efx_poll(struct napi_struct *napi, int budget)
1271 {
1272 struct efx_channel *channel =
1273 container_of(napi, struct efx_channel, napi_str);
1274 struct efx_nic *efx = channel->efx;
1275 #ifdef CONFIG_RFS_ACCEL
1276 unsigned int time;
1277 #endif
1278 int spent;
1279
1280 netif_vdbg(efx, intr, efx->net_dev,
1281 "channel %d NAPI poll executing on CPU %d\n",
1282 channel->channel, raw_smp_processor_id());
1283
1284 spent = efx_process_channel(channel, budget);
1285
1286 xdp_do_flush_map();
1287
1288 if (spent < budget) {
1289 if (efx_channel_has_rx_queue(channel) &&
1290 efx->irq_rx_adaptive &&
1291 unlikely(++channel->irq_count == 1000)) {
1292 efx_update_irq_mod(efx, channel);
1293 }
1294
1295 #ifdef CONFIG_RFS_ACCEL
1296 /* Perhaps expire some ARFS filters */
1297 time = jiffies - channel->rfs_last_expiry;
1298 /* Would our quota be >= 20? */
1299 if (channel->rfs_filter_count * time >= 600 * HZ)
1300 mod_delayed_work(system_wq, &channel->filter_work, 0);
1301 #endif
1302
1303 /* There is no race here; although napi_disable() will
1304 * only wait for napi_complete(), this isn't a problem
1305 * since efx_nic_eventq_read_ack() will have no effect if
1306 * interrupts have already been disabled.
1307 */
1308 if (napi_complete_done(napi, spent))
1309 efx_nic_eventq_read_ack(channel);
1310 }
1311
1312 return spent;
1313 }
1314
efx_init_napi_channel(struct efx_channel * channel)1315 static void efx_init_napi_channel(struct efx_channel *channel)
1316 {
1317 struct efx_nic *efx = channel->efx;
1318
1319 channel->napi_dev = efx->net_dev;
1320 netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
1321 }
1322
efx_siena_init_napi(struct efx_nic * efx)1323 void efx_siena_init_napi(struct efx_nic *efx)
1324 {
1325 struct efx_channel *channel;
1326
1327 efx_for_each_channel(channel, efx)
1328 efx_init_napi_channel(channel);
1329 }
1330
efx_fini_napi_channel(struct efx_channel * channel)1331 static void efx_fini_napi_channel(struct efx_channel *channel)
1332 {
1333 if (channel->napi_dev)
1334 netif_napi_del(&channel->napi_str);
1335
1336 channel->napi_dev = NULL;
1337 }
1338
efx_siena_fini_napi(struct efx_nic * efx)1339 void efx_siena_fini_napi(struct efx_nic *efx)
1340 {
1341 struct efx_channel *channel;
1342
1343 efx_for_each_channel(channel, efx)
1344 efx_fini_napi_channel(channel);
1345 }
1346
1347 /***************
1348 * Housekeeping
1349 ***************/
1350
efx_channel_dummy_op_int(struct efx_channel * channel)1351 static int efx_channel_dummy_op_int(struct efx_channel *channel)
1352 {
1353 return 0;
1354 }
1355
efx_siena_channel_dummy_op_void(struct efx_channel * channel)1356 void efx_siena_channel_dummy_op_void(struct efx_channel *channel)
1357 {
1358 }
1359
1360 static const struct efx_channel_type efx_default_channel_type = {
1361 .pre_probe = efx_channel_dummy_op_int,
1362 .post_remove = efx_siena_channel_dummy_op_void,
1363 .get_name = efx_get_channel_name,
1364 .copy = efx_copy_channel,
1365 .want_txqs = efx_default_channel_want_txqs,
1366 .keep_eventq = false,
1367 .want_pio = true,
1368 };
1369