1 /*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: pv-drivers@vmware.com
24 *
25 */
26
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
29
30 #include "vmxnet3_int.h"
31
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
35 /*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
39 static const struct pci_device_id vmxnet3_pciid_table[] = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42 };
43
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
46 static int enable_mq = 1;
47
48 static void
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
50
51 /*
52 * Enable/Disable the given intr
53 */
54 static void
vmxnet3_enable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56 {
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58 }
59
60
61 static void
vmxnet3_disable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63 {
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65 }
66
67
68 /*
69 * Enable/Disable all intrs used by the device
70 */
71 static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter * adapter)72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73 {
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
78 if (!VMXNET3_VERSION_GE_6(adapter) ||
79 !adapter->queuesExtEnabled) {
80 adapter->shared->devRead.intrConf.intrCtrl &=
81 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
82 } else {
83 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
84 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
85 }
86 }
87
88
89 static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter * adapter)90 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
91 {
92 int i;
93
94 if (!VMXNET3_VERSION_GE_6(adapter) ||
95 !adapter->queuesExtEnabled) {
96 adapter->shared->devRead.intrConf.intrCtrl |=
97 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
98 } else {
99 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
100 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
101 }
102 for (i = 0; i < adapter->intr.num_intrs; i++)
103 vmxnet3_disable_intr(adapter, i);
104 }
105
106
107 static void
vmxnet3_ack_events(struct vmxnet3_adapter * adapter,u32 events)108 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
109 {
110 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
111 }
112
113
114 static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)115 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
116 {
117 return tq->stopped;
118 }
119
120
121 static void
vmxnet3_tq_start(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)122 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123 {
124 tq->stopped = false;
125 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
126 }
127
128
129 static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)130 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
131 {
132 tq->stopped = false;
133 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
134 }
135
136
137 static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)138 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
139 {
140 tq->stopped = true;
141 tq->num_stop++;
142 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
143 }
144
145 /* Check if capability is supported by UPT device or
146 * UPT is even requested
147 */
148 bool
vmxnet3_check_ptcapability(u32 cap_supported,u32 cap)149 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
150 {
151 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
152 cap_supported & (1UL << cap)) {
153 return true;
154 }
155
156 return false;
157 }
158
159
160 /*
161 * Check the link state. This may start or stop the tx queue.
162 */
163 static void
vmxnet3_check_link(struct vmxnet3_adapter * adapter,bool affectTxQueue)164 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
165 {
166 u32 ret;
167 int i;
168 unsigned long flags;
169
170 spin_lock_irqsave(&adapter->cmd_lock, flags);
171 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
172 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
173 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
174
175 adapter->link_speed = ret >> 16;
176 if (ret & 1) { /* Link is up. */
177 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
178 adapter->link_speed);
179 netif_carrier_on(adapter->netdev);
180
181 if (affectTxQueue) {
182 for (i = 0; i < adapter->num_tx_queues; i++)
183 vmxnet3_tq_start(&adapter->tx_queue[i],
184 adapter);
185 }
186 } else {
187 netdev_info(adapter->netdev, "NIC Link is Down\n");
188 netif_carrier_off(adapter->netdev);
189
190 if (affectTxQueue) {
191 for (i = 0; i < adapter->num_tx_queues; i++)
192 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
193 }
194 }
195 }
196
197 static void
vmxnet3_process_events(struct vmxnet3_adapter * adapter)198 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
199 {
200 int i;
201 unsigned long flags;
202 u32 events = le32_to_cpu(adapter->shared->ecr);
203 if (!events)
204 return;
205
206 vmxnet3_ack_events(adapter, events);
207
208 /* Check if link state has changed */
209 if (events & VMXNET3_ECR_LINK)
210 vmxnet3_check_link(adapter, true);
211
212 /* Check if there is an error on xmit/recv queues */
213 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
214 spin_lock_irqsave(&adapter->cmd_lock, flags);
215 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
216 VMXNET3_CMD_GET_QUEUE_STATUS);
217 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
218
219 for (i = 0; i < adapter->num_tx_queues; i++)
220 if (adapter->tqd_start[i].status.stopped)
221 dev_err(&adapter->netdev->dev,
222 "%s: tq[%d] error 0x%x\n",
223 adapter->netdev->name, i, le32_to_cpu(
224 adapter->tqd_start[i].status.error));
225 for (i = 0; i < adapter->num_rx_queues; i++)
226 if (adapter->rqd_start[i].status.stopped)
227 dev_err(&adapter->netdev->dev,
228 "%s: rq[%d] error 0x%x\n",
229 adapter->netdev->name, i,
230 adapter->rqd_start[i].status.error);
231
232 schedule_work(&adapter->work);
233 }
234 }
235
236 #ifdef __BIG_ENDIAN_BITFIELD
237 /*
238 * The device expects the bitfields in shared structures to be written in
239 * little endian. When CPU is big endian, the following routines are used to
240 * correctly read and write into ABI.
241 * The general technique used here is : double word bitfields are defined in
242 * opposite order for big endian architecture. Then before reading them in
243 * driver the complete double word is translated using le32_to_cpu. Similarly
244 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
245 * double words into required format.
246 * In order to avoid touching bits in shared structure more than once, temporary
247 * descriptors are used. These are passed as srcDesc to following functions.
248 */
vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc * srcDesc,struct Vmxnet3_RxDesc * dstDesc)249 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
250 struct Vmxnet3_RxDesc *dstDesc)
251 {
252 u32 *src = (u32 *)srcDesc + 2;
253 u32 *dst = (u32 *)dstDesc + 2;
254 dstDesc->addr = le64_to_cpu(srcDesc->addr);
255 *dst = le32_to_cpu(*src);
256 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
257 }
258
vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc * srcDesc,struct Vmxnet3_TxDesc * dstDesc)259 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
260 struct Vmxnet3_TxDesc *dstDesc)
261 {
262 int i;
263 u32 *src = (u32 *)(srcDesc + 1);
264 u32 *dst = (u32 *)(dstDesc + 1);
265
266 /* Working backwards so that the gen bit is set at the end. */
267 for (i = 2; i > 0; i--) {
268 src--;
269 dst--;
270 *dst = cpu_to_le32(*src);
271 }
272 }
273
274
vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc * srcDesc,struct Vmxnet3_RxCompDesc * dstDesc)275 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
276 struct Vmxnet3_RxCompDesc *dstDesc)
277 {
278 int i = 0;
279 u32 *src = (u32 *)srcDesc;
280 u32 *dst = (u32 *)dstDesc;
281 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
282 *dst = le32_to_cpu(*src);
283 src++;
284 dst++;
285 }
286 }
287
288
289 /* Used to read bitfield values from double words. */
get_bitfield32(const __le32 * bitfield,u32 pos,u32 size)290 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
291 {
292 u32 temp = le32_to_cpu(*bitfield);
293 u32 mask = ((1 << size) - 1) << pos;
294 temp &= mask;
295 temp >>= pos;
296 return temp;
297 }
298
299
300
301 #endif /* __BIG_ENDIAN_BITFIELD */
302
303 #ifdef __BIG_ENDIAN_BITFIELD
304
305 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
306 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
307 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
308 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
309 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
310 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
311 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
312 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
313 VMXNET3_TCD_GEN_SIZE)
314 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
315 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
316 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
317 (dstrcd) = (tmp); \
318 vmxnet3_RxCompToCPU((rcd), (tmp)); \
319 } while (0)
320 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
321 (dstrxd) = (tmp); \
322 vmxnet3_RxDescToCPU((rxd), (tmp)); \
323 } while (0)
324
325 #else
326
327 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
328 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
329 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
330 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
331 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
332 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
333
334 #endif /* __BIG_ENDIAN_BITFIELD */
335
336
337 static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info * tbi,struct pci_dev * pdev)338 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
339 struct pci_dev *pdev)
340 {
341 if (tbi->map_type == VMXNET3_MAP_SINGLE)
342 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
343 DMA_TO_DEVICE);
344 else if (tbi->map_type == VMXNET3_MAP_PAGE)
345 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
346 DMA_TO_DEVICE);
347 else
348 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
349
350 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
351 }
352
353
354 static int
vmxnet3_unmap_pkt(u32 eop_idx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter)355 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
356 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
357 {
358 struct sk_buff *skb;
359 int entries = 0;
360
361 /* no out of order completion */
362 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
363 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
364
365 skb = tq->buf_info[eop_idx].skb;
366 BUG_ON(skb == NULL);
367 tq->buf_info[eop_idx].skb = NULL;
368
369 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
370
371 while (tq->tx_ring.next2comp != eop_idx) {
372 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
373 pdev);
374
375 /* update next2comp w/o tx_lock. Since we are marking more,
376 * instead of less, tx ring entries avail, the worst case is
377 * that the tx routine incorrectly re-queues a pkt due to
378 * insufficient tx ring entries.
379 */
380 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
381 entries++;
382 }
383
384 dev_kfree_skb_any(skb);
385 return entries;
386 }
387
388
389 static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)390 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
391 struct vmxnet3_adapter *adapter)
392 {
393 int completed = 0;
394 union Vmxnet3_GenericDesc *gdesc;
395
396 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
397 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
398 /* Prevent any &gdesc->tcd field from being (speculatively)
399 * read before (&gdesc->tcd)->gen is read.
400 */
401 dma_rmb();
402
403 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
404 &gdesc->tcd), tq, adapter->pdev,
405 adapter);
406
407 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
408 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
409 }
410
411 if (completed) {
412 spin_lock(&tq->tx_lock);
413 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
414 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
415 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
416 netif_carrier_ok(adapter->netdev))) {
417 vmxnet3_tq_wake(tq, adapter);
418 }
419 spin_unlock(&tq->tx_lock);
420 }
421 return completed;
422 }
423
424
425 static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)426 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
427 struct vmxnet3_adapter *adapter)
428 {
429 int i;
430
431 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
432 struct vmxnet3_tx_buf_info *tbi;
433
434 tbi = tq->buf_info + tq->tx_ring.next2comp;
435
436 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
437 if (tbi->skb) {
438 dev_kfree_skb_any(tbi->skb);
439 tbi->skb = NULL;
440 }
441 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
442 }
443
444 /* sanity check, verify all buffers are indeed unmapped and freed */
445 for (i = 0; i < tq->tx_ring.size; i++) {
446 BUG_ON(tq->buf_info[i].skb != NULL ||
447 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
448 }
449
450 tq->tx_ring.gen = VMXNET3_INIT_GEN;
451 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
452
453 tq->comp_ring.gen = VMXNET3_INIT_GEN;
454 tq->comp_ring.next2proc = 0;
455 }
456
457
458 static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)459 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
460 struct vmxnet3_adapter *adapter)
461 {
462 if (tq->tx_ring.base) {
463 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
464 sizeof(struct Vmxnet3_TxDesc),
465 tq->tx_ring.base, tq->tx_ring.basePA);
466 tq->tx_ring.base = NULL;
467 }
468 if (tq->data_ring.base) {
469 dma_free_coherent(&adapter->pdev->dev,
470 tq->data_ring.size * tq->txdata_desc_size,
471 tq->data_ring.base, tq->data_ring.basePA);
472 tq->data_ring.base = NULL;
473 }
474 if (tq->comp_ring.base) {
475 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
476 sizeof(struct Vmxnet3_TxCompDesc),
477 tq->comp_ring.base, tq->comp_ring.basePA);
478 tq->comp_ring.base = NULL;
479 }
480 kfree(tq->buf_info);
481 tq->buf_info = NULL;
482 }
483
484
485 /* Destroy all tx queues */
486 void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter * adapter)487 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
488 {
489 int i;
490
491 for (i = 0; i < adapter->num_tx_queues; i++)
492 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
493 }
494
495
496 static void
vmxnet3_tq_init(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)497 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
498 struct vmxnet3_adapter *adapter)
499 {
500 int i;
501
502 /* reset the tx ring contents to 0 and reset the tx ring states */
503 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
504 sizeof(struct Vmxnet3_TxDesc));
505 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
506 tq->tx_ring.gen = VMXNET3_INIT_GEN;
507
508 memset(tq->data_ring.base, 0,
509 tq->data_ring.size * tq->txdata_desc_size);
510
511 /* reset the tx comp ring contents to 0 and reset comp ring states */
512 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
513 sizeof(struct Vmxnet3_TxCompDesc));
514 tq->comp_ring.next2proc = 0;
515 tq->comp_ring.gen = VMXNET3_INIT_GEN;
516
517 /* reset the bookkeeping data */
518 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
519 for (i = 0; i < tq->tx_ring.size; i++)
520 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
521
522 /* stats are not reset */
523 }
524
525
526 static int
vmxnet3_tq_create(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)527 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
528 struct vmxnet3_adapter *adapter)
529 {
530 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
531 tq->comp_ring.base || tq->buf_info);
532
533 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
534 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
535 &tq->tx_ring.basePA, GFP_KERNEL);
536 if (!tq->tx_ring.base) {
537 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
538 goto err;
539 }
540
541 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
542 tq->data_ring.size * tq->txdata_desc_size,
543 &tq->data_ring.basePA, GFP_KERNEL);
544 if (!tq->data_ring.base) {
545 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
546 goto err;
547 }
548
549 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
550 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
551 &tq->comp_ring.basePA, GFP_KERNEL);
552 if (!tq->comp_ring.base) {
553 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
554 goto err;
555 }
556
557 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
558 GFP_KERNEL,
559 dev_to_node(&adapter->pdev->dev));
560 if (!tq->buf_info)
561 goto err;
562
563 return 0;
564
565 err:
566 vmxnet3_tq_destroy(tq, adapter);
567 return -ENOMEM;
568 }
569
570 static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter * adapter)571 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
572 {
573 int i;
574
575 for (i = 0; i < adapter->num_tx_queues; i++)
576 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
577 }
578
579 /*
580 * starting from ring->next2fill, allocate rx buffers for the given ring
581 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
582 * are allocated or allocation fails
583 */
584
585 static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue * rq,u32 ring_idx,int num_to_alloc,struct vmxnet3_adapter * adapter)586 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587 int num_to_alloc, struct vmxnet3_adapter *adapter)
588 {
589 int num_allocated = 0;
590 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
591 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
592 u32 val;
593
594 while (num_allocated <= num_to_alloc) {
595 struct vmxnet3_rx_buf_info *rbi;
596 union Vmxnet3_GenericDesc *gd;
597
598 rbi = rbi_base + ring->next2fill;
599 gd = ring->base + ring->next2fill;
600 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
601
602 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
603 if (rbi->skb == NULL) {
604 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
605 rbi->len,
606 GFP_KERNEL);
607 if (unlikely(rbi->skb == NULL)) {
608 rq->stats.rx_buf_alloc_failure++;
609 break;
610 }
611
612 rbi->dma_addr = dma_map_single(
613 &adapter->pdev->dev,
614 rbi->skb->data, rbi->len,
615 DMA_FROM_DEVICE);
616 if (dma_mapping_error(&adapter->pdev->dev,
617 rbi->dma_addr)) {
618 dev_kfree_skb_any(rbi->skb);
619 rbi->skb = NULL;
620 rq->stats.rx_buf_alloc_failure++;
621 break;
622 }
623 } else {
624 /* rx buffer skipped by the device */
625 }
626 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
627 } else {
628 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
629 rbi->len != PAGE_SIZE);
630
631 if (rbi->page == NULL) {
632 rbi->page = alloc_page(GFP_ATOMIC);
633 if (unlikely(rbi->page == NULL)) {
634 rq->stats.rx_buf_alloc_failure++;
635 break;
636 }
637 rbi->dma_addr = dma_map_page(
638 &adapter->pdev->dev,
639 rbi->page, 0, PAGE_SIZE,
640 DMA_FROM_DEVICE);
641 if (dma_mapping_error(&adapter->pdev->dev,
642 rbi->dma_addr)) {
643 put_page(rbi->page);
644 rbi->page = NULL;
645 rq->stats.rx_buf_alloc_failure++;
646 break;
647 }
648 } else {
649 /* rx buffers skipped by the device */
650 }
651 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
652 }
653
654 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
655 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
656 | val | rbi->len);
657
658 /* Fill the last buffer but dont mark it ready, or else the
659 * device will think that the queue is full */
660 if (num_allocated == num_to_alloc) {
661 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
662 break;
663 }
664
665 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
666 num_allocated++;
667 vmxnet3_cmd_ring_adv_next2fill(ring);
668 }
669
670 netdev_dbg(adapter->netdev,
671 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
672 num_allocated, ring->next2fill, ring->next2comp);
673
674 /* so that the device can distinguish a full ring and an empty ring */
675 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
676
677 return num_allocated;
678 }
679
680
681 static void
vmxnet3_append_frag(struct sk_buff * skb,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_buf_info * rbi)682 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
683 struct vmxnet3_rx_buf_info *rbi)
684 {
685 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
686
687 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
688
689 __skb_frag_set_page(frag, rbi->page);
690 skb_frag_off_set(frag, 0);
691 skb_frag_size_set(frag, rcd->len);
692 skb->data_len += rcd->len;
693 skb->truesize += PAGE_SIZE;
694 skb_shinfo(skb)->nr_frags++;
695 }
696
697
698 static int
vmxnet3_map_pkt(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter)699 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
700 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
701 struct vmxnet3_adapter *adapter)
702 {
703 u32 dw2, len;
704 unsigned long buf_offset;
705 int i;
706 union Vmxnet3_GenericDesc *gdesc;
707 struct vmxnet3_tx_buf_info *tbi = NULL;
708
709 BUG_ON(ctx->copy_size > skb_headlen(skb));
710
711 /* use the previous gen bit for the SOP desc */
712 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
713
714 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
715 gdesc = ctx->sop_txd; /* both loops below can be skipped */
716
717 /* no need to map the buffer if headers are copied */
718 if (ctx->copy_size) {
719 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
720 tq->tx_ring.next2fill *
721 tq->txdata_desc_size);
722 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
723 ctx->sop_txd->dword[3] = 0;
724
725 tbi = tq->buf_info + tq->tx_ring.next2fill;
726 tbi->map_type = VMXNET3_MAP_NONE;
727
728 netdev_dbg(adapter->netdev,
729 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
730 tq->tx_ring.next2fill,
731 le64_to_cpu(ctx->sop_txd->txd.addr),
732 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
733 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
734
735 /* use the right gen for non-SOP desc */
736 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
737 }
738
739 /* linear part can use multiple tx desc if it's big */
740 len = skb_headlen(skb) - ctx->copy_size;
741 buf_offset = ctx->copy_size;
742 while (len) {
743 u32 buf_size;
744
745 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
746 buf_size = len;
747 dw2 |= len;
748 } else {
749 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
750 /* spec says that for TxDesc.len, 0 == 2^14 */
751 }
752
753 tbi = tq->buf_info + tq->tx_ring.next2fill;
754 tbi->map_type = VMXNET3_MAP_SINGLE;
755 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
756 skb->data + buf_offset, buf_size,
757 DMA_TO_DEVICE);
758 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
759 return -EFAULT;
760
761 tbi->len = buf_size;
762
763 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
764 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
765
766 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
767 gdesc->dword[2] = cpu_to_le32(dw2);
768 gdesc->dword[3] = 0;
769
770 netdev_dbg(adapter->netdev,
771 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
772 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
773 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
774 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
775 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
776
777 len -= buf_size;
778 buf_offset += buf_size;
779 }
780
781 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
782 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
783 u32 buf_size;
784
785 buf_offset = 0;
786 len = skb_frag_size(frag);
787 while (len) {
788 tbi = tq->buf_info + tq->tx_ring.next2fill;
789 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
790 buf_size = len;
791 dw2 |= len;
792 } else {
793 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
794 /* spec says that for TxDesc.len, 0 == 2^14 */
795 }
796 tbi->map_type = VMXNET3_MAP_PAGE;
797 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
798 buf_offset, buf_size,
799 DMA_TO_DEVICE);
800 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
801 return -EFAULT;
802
803 tbi->len = buf_size;
804
805 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
806 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
807
808 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
809 gdesc->dword[2] = cpu_to_le32(dw2);
810 gdesc->dword[3] = 0;
811
812 netdev_dbg(adapter->netdev,
813 "txd[%u]: 0x%llx %u %u\n",
814 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
815 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
816 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
817 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
818
819 len -= buf_size;
820 buf_offset += buf_size;
821 }
822 }
823
824 ctx->eop_txd = gdesc;
825
826 /* set the last buf_info for the pkt */
827 tbi->skb = skb;
828 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
829
830 return 0;
831 }
832
833
834 /* Init all tx queues */
835 static void
vmxnet3_tq_init_all(struct vmxnet3_adapter * adapter)836 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
837 {
838 int i;
839
840 for (i = 0; i < adapter->num_tx_queues; i++)
841 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
842 }
843
844
845 /*
846 * parse relevant protocol headers:
847 * For a tso pkt, relevant headers are L2/3/4 including options
848 * For a pkt requesting csum offloading, they are L2/3 and may include L4
849 * if it's a TCP/UDP pkt
850 *
851 * Returns:
852 * -1: error happens during parsing
853 * 0: protocol headers parsed, but too big to be copied
854 * 1: protocol headers parsed and copied
855 *
856 * Other effects:
857 * 1. related *ctx fields are updated.
858 * 2. ctx->copy_size is # of bytes copied
859 * 3. the portion to be copied is guaranteed to be in the linear part
860 *
861 */
862 static int
vmxnet3_parse_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)863 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
864 struct vmxnet3_tx_ctx *ctx,
865 struct vmxnet3_adapter *adapter)
866 {
867 u8 protocol = 0;
868
869 if (ctx->mss) { /* TSO */
870 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
871 ctx->l4_offset = skb_inner_transport_offset(skb);
872 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
873 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
874 } else {
875 ctx->l4_offset = skb_transport_offset(skb);
876 ctx->l4_hdr_size = tcp_hdrlen(skb);
877 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
878 }
879 } else {
880 if (skb->ip_summed == CHECKSUM_PARTIAL) {
881 /* For encap packets, skb_checksum_start_offset refers
882 * to inner L4 offset. Thus, below works for encap as
883 * well as non-encap case
884 */
885 ctx->l4_offset = skb_checksum_start_offset(skb);
886
887 if (VMXNET3_VERSION_GE_4(adapter) &&
888 skb->encapsulation) {
889 struct iphdr *iph = inner_ip_hdr(skb);
890
891 if (iph->version == 4) {
892 protocol = iph->protocol;
893 } else {
894 const struct ipv6hdr *ipv6h;
895
896 ipv6h = inner_ipv6_hdr(skb);
897 protocol = ipv6h->nexthdr;
898 }
899 } else {
900 if (ctx->ipv4) {
901 const struct iphdr *iph = ip_hdr(skb);
902
903 protocol = iph->protocol;
904 } else if (ctx->ipv6) {
905 const struct ipv6hdr *ipv6h;
906
907 ipv6h = ipv6_hdr(skb);
908 protocol = ipv6h->nexthdr;
909 }
910 }
911
912 switch (protocol) {
913 case IPPROTO_TCP:
914 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
915 tcp_hdrlen(skb);
916 break;
917 case IPPROTO_UDP:
918 ctx->l4_hdr_size = sizeof(struct udphdr);
919 break;
920 default:
921 ctx->l4_hdr_size = 0;
922 break;
923 }
924
925 ctx->copy_size = min(ctx->l4_offset +
926 ctx->l4_hdr_size, skb->len);
927 } else {
928 ctx->l4_offset = 0;
929 ctx->l4_hdr_size = 0;
930 /* copy as much as allowed */
931 ctx->copy_size = min_t(unsigned int,
932 tq->txdata_desc_size,
933 skb_headlen(skb));
934 }
935
936 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
937 ctx->copy_size = skb->len;
938
939 /* make sure headers are accessible directly */
940 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
941 goto err;
942 }
943
944 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
945 tq->stats.oversized_hdr++;
946 ctx->copy_size = 0;
947 return 0;
948 }
949
950 return 1;
951 err:
952 return -1;
953 }
954
955 /*
956 * copy relevant protocol headers to the transmit ring:
957 * For a tso pkt, relevant headers are L2/3/4 including options
958 * For a pkt requesting csum offloading, they are L2/3 and may include L4
959 * if it's a TCP/UDP pkt
960 *
961 *
962 * Note that this requires that vmxnet3_parse_hdr be called first to set the
963 * appropriate bits in ctx first
964 */
965 static void
vmxnet3_copy_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)966 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
967 struct vmxnet3_tx_ctx *ctx,
968 struct vmxnet3_adapter *adapter)
969 {
970 struct Vmxnet3_TxDataDesc *tdd;
971
972 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
973 tq->tx_ring.next2fill *
974 tq->txdata_desc_size);
975
976 memcpy(tdd->data, skb->data, ctx->copy_size);
977 netdev_dbg(adapter->netdev,
978 "copy %u bytes to dataRing[%u]\n",
979 ctx->copy_size, tq->tx_ring.next2fill);
980 }
981
982
983 static void
vmxnet3_prepare_inner_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)984 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
985 struct vmxnet3_tx_ctx *ctx)
986 {
987 struct tcphdr *tcph = inner_tcp_hdr(skb);
988 struct iphdr *iph = inner_ip_hdr(skb);
989
990 if (iph->version == 4) {
991 iph->check = 0;
992 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
993 IPPROTO_TCP, 0);
994 } else {
995 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
996
997 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
998 IPPROTO_TCP, 0);
999 }
1000 }
1001
1002 static void
vmxnet3_prepare_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1003 vmxnet3_prepare_tso(struct sk_buff *skb,
1004 struct vmxnet3_tx_ctx *ctx)
1005 {
1006 struct tcphdr *tcph = tcp_hdr(skb);
1007
1008 if (ctx->ipv4) {
1009 struct iphdr *iph = ip_hdr(skb);
1010
1011 iph->check = 0;
1012 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1013 IPPROTO_TCP, 0);
1014 } else if (ctx->ipv6) {
1015 tcp_v6_gso_csum_prep(skb);
1016 }
1017 }
1018
txd_estimate(const struct sk_buff * skb)1019 static int txd_estimate(const struct sk_buff *skb)
1020 {
1021 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1022 int i;
1023
1024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1025 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1026
1027 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1028 }
1029 return count;
1030 }
1031
1032 /*
1033 * Transmits a pkt thru a given tq
1034 * Returns:
1035 * NETDEV_TX_OK: descriptors are setup successfully
1036 * NETDEV_TX_OK: error occurred, the pkt is dropped
1037 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1038 *
1039 * Side-effects:
1040 * 1. tx ring may be changed
1041 * 2. tq stats may be updated accordingly
1042 * 3. shared->txNumDeferred may be updated
1043 */
1044
1045 static int
vmxnet3_tq_xmit(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter,struct net_device * netdev)1046 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1047 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1048 {
1049 int ret;
1050 u32 count;
1051 int num_pkts;
1052 int tx_num_deferred;
1053 unsigned long flags;
1054 struct vmxnet3_tx_ctx ctx;
1055 union Vmxnet3_GenericDesc *gdesc;
1056 #ifdef __BIG_ENDIAN_BITFIELD
1057 /* Use temporary descriptor to avoid touching bits multiple times */
1058 union Vmxnet3_GenericDesc tempTxDesc;
1059 #endif
1060
1061 count = txd_estimate(skb);
1062
1063 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1064 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1065
1066 ctx.mss = skb_shinfo(skb)->gso_size;
1067 if (ctx.mss) {
1068 if (skb_header_cloned(skb)) {
1069 if (unlikely(pskb_expand_head(skb, 0, 0,
1070 GFP_ATOMIC) != 0)) {
1071 tq->stats.drop_tso++;
1072 goto drop_pkt;
1073 }
1074 tq->stats.copy_skb_header++;
1075 }
1076 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1077 /* tso pkts must not use more than
1078 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1079 */
1080 if (skb_linearize(skb) != 0) {
1081 tq->stats.drop_too_many_frags++;
1082 goto drop_pkt;
1083 }
1084 tq->stats.linearized++;
1085
1086 /* recalculate the # of descriptors to use */
1087 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1088 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1089 tq->stats.drop_too_many_frags++;
1090 goto drop_pkt;
1091 }
1092 }
1093 if (skb->encapsulation) {
1094 vmxnet3_prepare_inner_tso(skb, &ctx);
1095 } else {
1096 vmxnet3_prepare_tso(skb, &ctx);
1097 }
1098 } else {
1099 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1100
1101 /* non-tso pkts must not use more than
1102 * VMXNET3_MAX_TXD_PER_PKT entries
1103 */
1104 if (skb_linearize(skb) != 0) {
1105 tq->stats.drop_too_many_frags++;
1106 goto drop_pkt;
1107 }
1108 tq->stats.linearized++;
1109
1110 /* recalculate the # of descriptors to use */
1111 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1112 }
1113 }
1114
1115 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1116 if (ret >= 0) {
1117 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1118 /* hdrs parsed, check against other limits */
1119 if (ctx.mss) {
1120 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1121 VMXNET3_MAX_TX_BUF_SIZE)) {
1122 tq->stats.drop_oversized_hdr++;
1123 goto drop_pkt;
1124 }
1125 } else {
1126 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1127 if (unlikely(ctx.l4_offset +
1128 skb->csum_offset >
1129 VMXNET3_MAX_CSUM_OFFSET)) {
1130 tq->stats.drop_oversized_hdr++;
1131 goto drop_pkt;
1132 }
1133 }
1134 }
1135 } else {
1136 tq->stats.drop_hdr_inspect_err++;
1137 goto drop_pkt;
1138 }
1139
1140 spin_lock_irqsave(&tq->tx_lock, flags);
1141
1142 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1143 tq->stats.tx_ring_full++;
1144 netdev_dbg(adapter->netdev,
1145 "tx queue stopped on %s, next2comp %u"
1146 " next2fill %u\n", adapter->netdev->name,
1147 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1148
1149 vmxnet3_tq_stop(tq, adapter);
1150 spin_unlock_irqrestore(&tq->tx_lock, flags);
1151 return NETDEV_TX_BUSY;
1152 }
1153
1154
1155 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1156
1157 /* fill tx descs related to addr & len */
1158 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1159 goto unlock_drop_pkt;
1160
1161 /* setup the EOP desc */
1162 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1163
1164 /* setup the SOP desc */
1165 #ifdef __BIG_ENDIAN_BITFIELD
1166 gdesc = &tempTxDesc;
1167 gdesc->dword[2] = ctx.sop_txd->dword[2];
1168 gdesc->dword[3] = ctx.sop_txd->dword[3];
1169 #else
1170 gdesc = ctx.sop_txd;
1171 #endif
1172 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1173 if (ctx.mss) {
1174 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1175 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1176 if (VMXNET3_VERSION_GE_7(adapter)) {
1177 gdesc->txd.om = VMXNET3_OM_TSO;
1178 gdesc->txd.ext1 = 1;
1179 } else {
1180 gdesc->txd.om = VMXNET3_OM_ENCAP;
1181 }
1182 gdesc->txd.msscof = ctx.mss;
1183
1184 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1185 gdesc->txd.oco = 1;
1186 } else {
1187 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1188 gdesc->txd.om = VMXNET3_OM_TSO;
1189 gdesc->txd.msscof = ctx.mss;
1190 }
1191 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1192 } else {
1193 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1194 if (VMXNET3_VERSION_GE_4(adapter) &&
1195 skb->encapsulation) {
1196 gdesc->txd.hlen = ctx.l4_offset +
1197 ctx.l4_hdr_size;
1198 if (VMXNET3_VERSION_GE_7(adapter)) {
1199 gdesc->txd.om = VMXNET3_OM_CSUM;
1200 gdesc->txd.msscof = ctx.l4_offset +
1201 skb->csum_offset;
1202 gdesc->txd.ext1 = 1;
1203 } else {
1204 gdesc->txd.om = VMXNET3_OM_ENCAP;
1205 gdesc->txd.msscof = 0; /* Reserved */
1206 }
1207 } else {
1208 gdesc->txd.hlen = ctx.l4_offset;
1209 gdesc->txd.om = VMXNET3_OM_CSUM;
1210 gdesc->txd.msscof = ctx.l4_offset +
1211 skb->csum_offset;
1212 }
1213 } else {
1214 gdesc->txd.om = 0;
1215 gdesc->txd.msscof = 0;
1216 }
1217 num_pkts = 1;
1218 }
1219 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1220 tx_num_deferred += num_pkts;
1221
1222 if (skb_vlan_tag_present(skb)) {
1223 gdesc->txd.ti = 1;
1224 gdesc->txd.tci = skb_vlan_tag_get(skb);
1225 }
1226
1227 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1228 * all other writes to &gdesc->txd.
1229 */
1230 dma_wmb();
1231
1232 /* finally flips the GEN bit of the SOP desc. */
1233 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1234 VMXNET3_TXD_GEN);
1235 #ifdef __BIG_ENDIAN_BITFIELD
1236 /* Finished updating in bitfields of Tx Desc, so write them in original
1237 * place.
1238 */
1239 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1240 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1241 gdesc = ctx.sop_txd;
1242 #endif
1243 netdev_dbg(adapter->netdev,
1244 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1245 (u32)(ctx.sop_txd -
1246 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1247 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1248
1249 spin_unlock_irqrestore(&tq->tx_lock, flags);
1250
1251 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1252 tq->shared->txNumDeferred = 0;
1253 VMXNET3_WRITE_BAR0_REG(adapter,
1254 adapter->tx_prod_offset + tq->qid * 8,
1255 tq->tx_ring.next2fill);
1256 }
1257
1258 return NETDEV_TX_OK;
1259
1260 unlock_drop_pkt:
1261 spin_unlock_irqrestore(&tq->tx_lock, flags);
1262 drop_pkt:
1263 tq->stats.drop_total++;
1264 dev_kfree_skb_any(skb);
1265 return NETDEV_TX_OK;
1266 }
1267
1268
1269 static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1270 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1271 {
1272 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1273
1274 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1275 return vmxnet3_tq_xmit(skb,
1276 &adapter->tx_queue[skb->queue_mapping],
1277 adapter, netdev);
1278 }
1279
1280
1281 static void
vmxnet3_rx_csum(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1282 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1283 struct sk_buff *skb,
1284 union Vmxnet3_GenericDesc *gdesc)
1285 {
1286 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1287 if (gdesc->rcd.v4 &&
1288 (le32_to_cpu(gdesc->dword[3]) &
1289 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1290 skb->ip_summed = CHECKSUM_UNNECESSARY;
1291 if ((le32_to_cpu(gdesc->dword[0]) &
1292 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1293 skb->csum_level = 1;
1294 }
1295 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1296 !(le32_to_cpu(gdesc->dword[0]) &
1297 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1298 WARN_ON_ONCE(gdesc->rcd.frg &&
1299 !(le32_to_cpu(gdesc->dword[0]) &
1300 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1301 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1302 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304 if ((le32_to_cpu(gdesc->dword[0]) &
1305 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1306 skb->csum_level = 1;
1307 }
1308 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1309 !(le32_to_cpu(gdesc->dword[0]) &
1310 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1311 WARN_ON_ONCE(gdesc->rcd.frg &&
1312 !(le32_to_cpu(gdesc->dword[0]) &
1313 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1314 } else {
1315 if (gdesc->rcd.csum) {
1316 skb->csum = htons(gdesc->rcd.csum);
1317 skb->ip_summed = CHECKSUM_PARTIAL;
1318 } else {
1319 skb_checksum_none_assert(skb);
1320 }
1321 }
1322 } else {
1323 skb_checksum_none_assert(skb);
1324 }
1325 }
1326
1327
1328 static void
vmxnet3_rx_error(struct vmxnet3_rx_queue * rq,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_ctx * ctx,struct vmxnet3_adapter * adapter)1329 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1330 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1331 {
1332 rq->stats.drop_err++;
1333 if (!rcd->fcs)
1334 rq->stats.drop_fcs++;
1335
1336 rq->stats.drop_total++;
1337
1338 /*
1339 * We do not unmap and chain the rx buffer to the skb.
1340 * We basically pretend this buffer is not used and will be recycled
1341 * by vmxnet3_rq_alloc_rx_buf()
1342 */
1343
1344 /*
1345 * ctx->skb may be NULL if this is the first and the only one
1346 * desc for the pkt
1347 */
1348 if (ctx->skb)
1349 dev_kfree_skb_irq(ctx->skb);
1350
1351 ctx->skb = NULL;
1352 }
1353
1354
1355 static u32
vmxnet3_get_hdr_len(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1356 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1357 union Vmxnet3_GenericDesc *gdesc)
1358 {
1359 u32 hlen, maplen;
1360 union {
1361 void *ptr;
1362 struct ethhdr *eth;
1363 struct vlan_ethhdr *veth;
1364 struct iphdr *ipv4;
1365 struct ipv6hdr *ipv6;
1366 struct tcphdr *tcp;
1367 } hdr;
1368 BUG_ON(gdesc->rcd.tcp == 0);
1369
1370 maplen = skb_headlen(skb);
1371 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1372 return 0;
1373
1374 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1375 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1376 hlen = sizeof(struct vlan_ethhdr);
1377 else
1378 hlen = sizeof(struct ethhdr);
1379
1380 hdr.eth = eth_hdr(skb);
1381 if (gdesc->rcd.v4) {
1382 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1383 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1384 hdr.ptr += hlen;
1385 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1386 hlen = hdr.ipv4->ihl << 2;
1387 hdr.ptr += hdr.ipv4->ihl << 2;
1388 } else if (gdesc->rcd.v6) {
1389 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1390 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1391 hdr.ptr += hlen;
1392 /* Use an estimated value, since we also need to handle
1393 * TSO case.
1394 */
1395 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1396 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1397 hlen = sizeof(struct ipv6hdr);
1398 hdr.ptr += sizeof(struct ipv6hdr);
1399 } else {
1400 /* Non-IP pkt, dont estimate header length */
1401 return 0;
1402 }
1403
1404 if (hlen + sizeof(struct tcphdr) > maplen)
1405 return 0;
1406
1407 return (hlen + (hdr.tcp->doff << 2));
1408 }
1409
1410 static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter,int quota)1411 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1412 struct vmxnet3_adapter *adapter, int quota)
1413 {
1414 u32 rxprod_reg[2] = {
1415 adapter->rx_prod_offset, adapter->rx_prod2_offset
1416 };
1417 u32 num_pkts = 0;
1418 bool skip_page_frags = false;
1419 bool encap_lro = false;
1420 struct Vmxnet3_RxCompDesc *rcd;
1421 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1422 u16 segCnt = 0, mss = 0;
1423 int comp_offset, fill_offset;
1424 #ifdef __BIG_ENDIAN_BITFIELD
1425 struct Vmxnet3_RxDesc rxCmdDesc;
1426 struct Vmxnet3_RxCompDesc rxComp;
1427 #endif
1428 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1429 &rxComp);
1430 while (rcd->gen == rq->comp_ring.gen) {
1431 struct vmxnet3_rx_buf_info *rbi;
1432 struct sk_buff *skb, *new_skb = NULL;
1433 struct page *new_page = NULL;
1434 dma_addr_t new_dma_addr;
1435 int num_to_alloc;
1436 struct Vmxnet3_RxDesc *rxd;
1437 u32 idx, ring_idx;
1438 struct vmxnet3_cmd_ring *ring = NULL;
1439 if (num_pkts >= quota) {
1440 /* we may stop even before we see the EOP desc of
1441 * the current pkt
1442 */
1443 break;
1444 }
1445
1446 /* Prevent any rcd field from being (speculatively) read before
1447 * rcd->gen is read.
1448 */
1449 dma_rmb();
1450
1451 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1452 rcd->rqID != rq->dataRingQid);
1453 idx = rcd->rxdIdx;
1454 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1455 ring = rq->rx_ring + ring_idx;
1456 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1457 &rxCmdDesc);
1458 rbi = rq->buf_info[ring_idx] + idx;
1459
1460 BUG_ON(rxd->addr != rbi->dma_addr ||
1461 rxd->len != rbi->len);
1462
1463 if (unlikely(rcd->eop && rcd->err)) {
1464 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1465 goto rcd_done;
1466 }
1467
1468 if (rcd->sop) { /* first buf of the pkt */
1469 bool rxDataRingUsed;
1470 u16 len;
1471
1472 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1473 (rcd->rqID != rq->qid &&
1474 rcd->rqID != rq->dataRingQid));
1475
1476 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1477 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1478
1479 if (unlikely(rcd->len == 0)) {
1480 /* Pretend the rx buffer is skipped. */
1481 BUG_ON(!(rcd->sop && rcd->eop));
1482 netdev_dbg(adapter->netdev,
1483 "rxRing[%u][%u] 0 length\n",
1484 ring_idx, idx);
1485 goto rcd_done;
1486 }
1487
1488 skip_page_frags = false;
1489 ctx->skb = rbi->skb;
1490
1491 rxDataRingUsed =
1492 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1493 len = rxDataRingUsed ? rcd->len : rbi->len;
1494 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1495 len);
1496 if (new_skb == NULL) {
1497 /* Skb allocation failed, do not handover this
1498 * skb to stack. Reuse it. Drop the existing pkt
1499 */
1500 rq->stats.rx_buf_alloc_failure++;
1501 ctx->skb = NULL;
1502 rq->stats.drop_total++;
1503 skip_page_frags = true;
1504 goto rcd_done;
1505 }
1506
1507 if (rxDataRingUsed) {
1508 size_t sz;
1509
1510 BUG_ON(rcd->len > rq->data_ring.desc_size);
1511
1512 ctx->skb = new_skb;
1513 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1514 memcpy(new_skb->data,
1515 &rq->data_ring.base[sz], rcd->len);
1516 } else {
1517 ctx->skb = rbi->skb;
1518
1519 new_dma_addr =
1520 dma_map_single(&adapter->pdev->dev,
1521 new_skb->data, rbi->len,
1522 DMA_FROM_DEVICE);
1523 if (dma_mapping_error(&adapter->pdev->dev,
1524 new_dma_addr)) {
1525 dev_kfree_skb(new_skb);
1526 /* Skb allocation failed, do not
1527 * handover this skb to stack. Reuse
1528 * it. Drop the existing pkt.
1529 */
1530 rq->stats.rx_buf_alloc_failure++;
1531 ctx->skb = NULL;
1532 rq->stats.drop_total++;
1533 skip_page_frags = true;
1534 goto rcd_done;
1535 }
1536
1537 dma_unmap_single(&adapter->pdev->dev,
1538 rbi->dma_addr,
1539 rbi->len,
1540 DMA_FROM_DEVICE);
1541
1542 /* Immediate refill */
1543 rbi->skb = new_skb;
1544 rbi->dma_addr = new_dma_addr;
1545 rxd->addr = cpu_to_le64(rbi->dma_addr);
1546 rxd->len = rbi->len;
1547 }
1548
1549 #ifdef VMXNET3_RSS
1550 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1551 (adapter->netdev->features & NETIF_F_RXHASH)) {
1552 enum pkt_hash_types hash_type;
1553
1554 switch (rcd->rssType) {
1555 case VMXNET3_RCD_RSS_TYPE_IPV4:
1556 case VMXNET3_RCD_RSS_TYPE_IPV6:
1557 hash_type = PKT_HASH_TYPE_L3;
1558 break;
1559 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1560 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1561 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1562 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1563 hash_type = PKT_HASH_TYPE_L4;
1564 break;
1565 default:
1566 hash_type = PKT_HASH_TYPE_L3;
1567 break;
1568 }
1569 skb_set_hash(ctx->skb,
1570 le32_to_cpu(rcd->rssHash),
1571 hash_type);
1572 }
1573 #endif
1574 skb_record_rx_queue(ctx->skb, rq->qid);
1575 skb_put(ctx->skb, rcd->len);
1576
1577 if (VMXNET3_VERSION_GE_2(adapter) &&
1578 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1579 struct Vmxnet3_RxCompDescExt *rcdlro;
1580 union Vmxnet3_GenericDesc *gdesc;
1581
1582 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1583 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1584
1585 segCnt = rcdlro->segCnt;
1586 WARN_ON_ONCE(segCnt == 0);
1587 mss = rcdlro->mss;
1588 if (unlikely(segCnt <= 1))
1589 segCnt = 0;
1590 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1591 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1592 } else {
1593 segCnt = 0;
1594 }
1595 } else {
1596 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1597
1598 /* non SOP buffer must be type 1 in most cases */
1599 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1600 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1601
1602 /* If an sop buffer was dropped, skip all
1603 * following non-sop fragments. They will be reused.
1604 */
1605 if (skip_page_frags)
1606 goto rcd_done;
1607
1608 if (rcd->len) {
1609 new_page = alloc_page(GFP_ATOMIC);
1610 /* Replacement page frag could not be allocated.
1611 * Reuse this page. Drop the pkt and free the
1612 * skb which contained this page as a frag. Skip
1613 * processing all the following non-sop frags.
1614 */
1615 if (unlikely(!new_page)) {
1616 rq->stats.rx_buf_alloc_failure++;
1617 dev_kfree_skb(ctx->skb);
1618 ctx->skb = NULL;
1619 skip_page_frags = true;
1620 goto rcd_done;
1621 }
1622 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1623 new_page,
1624 0, PAGE_SIZE,
1625 DMA_FROM_DEVICE);
1626 if (dma_mapping_error(&adapter->pdev->dev,
1627 new_dma_addr)) {
1628 put_page(new_page);
1629 rq->stats.rx_buf_alloc_failure++;
1630 dev_kfree_skb(ctx->skb);
1631 ctx->skb = NULL;
1632 skip_page_frags = true;
1633 goto rcd_done;
1634 }
1635
1636 dma_unmap_page(&adapter->pdev->dev,
1637 rbi->dma_addr, rbi->len,
1638 DMA_FROM_DEVICE);
1639
1640 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1641
1642 /* Immediate refill */
1643 rbi->page = new_page;
1644 rbi->dma_addr = new_dma_addr;
1645 rxd->addr = cpu_to_le64(rbi->dma_addr);
1646 rxd->len = rbi->len;
1647 }
1648 }
1649
1650
1651 skb = ctx->skb;
1652 if (rcd->eop) {
1653 u32 mtu = adapter->netdev->mtu;
1654 skb->len += skb->data_len;
1655
1656 vmxnet3_rx_csum(adapter, skb,
1657 (union Vmxnet3_GenericDesc *)rcd);
1658 skb->protocol = eth_type_trans(skb, adapter->netdev);
1659 if ((!rcd->tcp && !encap_lro) ||
1660 !(adapter->netdev->features & NETIF_F_LRO))
1661 goto not_lro;
1662
1663 if (segCnt != 0 && mss != 0) {
1664 skb_shinfo(skb)->gso_type = rcd->v4 ?
1665 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1666 skb_shinfo(skb)->gso_size = mss;
1667 skb_shinfo(skb)->gso_segs = segCnt;
1668 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1669 u32 hlen;
1670
1671 hlen = vmxnet3_get_hdr_len(adapter, skb,
1672 (union Vmxnet3_GenericDesc *)rcd);
1673 if (hlen == 0)
1674 goto not_lro;
1675
1676 skb_shinfo(skb)->gso_type =
1677 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1678 if (segCnt != 0) {
1679 skb_shinfo(skb)->gso_segs = segCnt;
1680 skb_shinfo(skb)->gso_size =
1681 DIV_ROUND_UP(skb->len -
1682 hlen, segCnt);
1683 } else {
1684 skb_shinfo(skb)->gso_size = mtu - hlen;
1685 }
1686 }
1687 not_lro:
1688 if (unlikely(rcd->ts))
1689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1690
1691 if (adapter->netdev->features & NETIF_F_LRO)
1692 netif_receive_skb(skb);
1693 else
1694 napi_gro_receive(&rq->napi, skb);
1695
1696 ctx->skb = NULL;
1697 encap_lro = false;
1698 num_pkts++;
1699 }
1700
1701 rcd_done:
1702 /* device may have skipped some rx descs */
1703 ring = rq->rx_ring + ring_idx;
1704 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1705
1706 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1707 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1708 idx - ring->next2fill - 1;
1709 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1710 ring->next2comp = idx;
1711 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1712
1713 /* Ensure that the writes to rxd->gen bits will be observed
1714 * after all other writes to rxd objects.
1715 */
1716 dma_wmb();
1717
1718 while (num_to_alloc) {
1719 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1720 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1721 goto refill_buf;
1722 if (ring_idx == 0) {
1723 /* ring0 Type1 buffers can get skipped; re-fill them */
1724 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1725 goto refill_buf;
1726 }
1727 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1728 refill_buf:
1729 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1730 &rxCmdDesc);
1731 WARN_ON(!rxd->addr);
1732
1733 /* Recv desc is ready to be used by the device */
1734 rxd->gen = ring->gen;
1735 vmxnet3_cmd_ring_adv_next2fill(ring);
1736 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1737 num_to_alloc--;
1738 } else {
1739 /* rx completion hasn't occurred */
1740 ring->isOutOfOrder = 1;
1741 break;
1742 }
1743 }
1744
1745 if (num_to_alloc == 0) {
1746 ring->isOutOfOrder = 0;
1747 }
1748
1749 /* if needed, update the register */
1750 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1751 VMXNET3_WRITE_BAR0_REG(adapter,
1752 rxprod_reg[ring_idx] + rq->qid * 8,
1753 ring->next2fill);
1754 }
1755
1756 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1757 vmxnet3_getRxComp(rcd,
1758 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1759 }
1760
1761 return num_pkts;
1762 }
1763
1764
1765 static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1766 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1767 struct vmxnet3_adapter *adapter)
1768 {
1769 u32 i, ring_idx;
1770 struct Vmxnet3_RxDesc *rxd;
1771
1772 /* ring has already been cleaned up */
1773 if (!rq->rx_ring[0].base)
1774 return;
1775
1776 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1777 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1778 #ifdef __BIG_ENDIAN_BITFIELD
1779 struct Vmxnet3_RxDesc rxDesc;
1780 #endif
1781 vmxnet3_getRxDesc(rxd,
1782 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1783
1784 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1785 rq->buf_info[ring_idx][i].skb) {
1786 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1787 rxd->len, DMA_FROM_DEVICE);
1788 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1789 rq->buf_info[ring_idx][i].skb = NULL;
1790 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1791 rq->buf_info[ring_idx][i].page) {
1792 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1793 rxd->len, DMA_FROM_DEVICE);
1794 put_page(rq->buf_info[ring_idx][i].page);
1795 rq->buf_info[ring_idx][i].page = NULL;
1796 }
1797 }
1798
1799 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1800 rq->rx_ring[ring_idx].next2fill =
1801 rq->rx_ring[ring_idx].next2comp = 0;
1802 }
1803
1804 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1805 rq->comp_ring.next2proc = 0;
1806 }
1807
1808
1809 static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter * adapter)1810 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1811 {
1812 int i;
1813
1814 for (i = 0; i < adapter->num_rx_queues; i++)
1815 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1816 }
1817
1818
vmxnet3_rq_destroy(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1819 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1820 struct vmxnet3_adapter *adapter)
1821 {
1822 int i;
1823 int j;
1824
1825 /* all rx buffers must have already been freed */
1826 for (i = 0; i < 2; i++) {
1827 if (rq->buf_info[i]) {
1828 for (j = 0; j < rq->rx_ring[i].size; j++)
1829 BUG_ON(rq->buf_info[i][j].page != NULL);
1830 }
1831 }
1832
1833
1834 for (i = 0; i < 2; i++) {
1835 if (rq->rx_ring[i].base) {
1836 dma_free_coherent(&adapter->pdev->dev,
1837 rq->rx_ring[i].size
1838 * sizeof(struct Vmxnet3_RxDesc),
1839 rq->rx_ring[i].base,
1840 rq->rx_ring[i].basePA);
1841 rq->rx_ring[i].base = NULL;
1842 }
1843 }
1844
1845 if (rq->data_ring.base) {
1846 dma_free_coherent(&adapter->pdev->dev,
1847 rq->rx_ring[0].size * rq->data_ring.desc_size,
1848 rq->data_ring.base, rq->data_ring.basePA);
1849 rq->data_ring.base = NULL;
1850 }
1851
1852 if (rq->comp_ring.base) {
1853 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1854 * sizeof(struct Vmxnet3_RxCompDesc),
1855 rq->comp_ring.base, rq->comp_ring.basePA);
1856 rq->comp_ring.base = NULL;
1857 }
1858
1859 kfree(rq->buf_info[0]);
1860 rq->buf_info[0] = NULL;
1861 rq->buf_info[1] = NULL;
1862 }
1863
1864 static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter * adapter)1865 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1866 {
1867 int i;
1868
1869 for (i = 0; i < adapter->num_rx_queues; i++) {
1870 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1871
1872 if (rq->data_ring.base) {
1873 dma_free_coherent(&adapter->pdev->dev,
1874 (rq->rx_ring[0].size *
1875 rq->data_ring.desc_size),
1876 rq->data_ring.base,
1877 rq->data_ring.basePA);
1878 rq->data_ring.base = NULL;
1879 rq->data_ring.desc_size = 0;
1880 }
1881 }
1882 }
1883
1884 static int
vmxnet3_rq_init(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1885 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1886 struct vmxnet3_adapter *adapter)
1887 {
1888 int i;
1889
1890 /* initialize buf_info */
1891 for (i = 0; i < rq->rx_ring[0].size; i++) {
1892
1893 /* 1st buf for a pkt is skbuff */
1894 if (i % adapter->rx_buf_per_pkt == 0) {
1895 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1896 rq->buf_info[0][i].len = adapter->skb_buf_size;
1897 } else { /* subsequent bufs for a pkt is frag */
1898 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1899 rq->buf_info[0][i].len = PAGE_SIZE;
1900 }
1901 }
1902 for (i = 0; i < rq->rx_ring[1].size; i++) {
1903 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1904 rq->buf_info[1][i].len = PAGE_SIZE;
1905 }
1906
1907 /* reset internal state and allocate buffers for both rings */
1908 for (i = 0; i < 2; i++) {
1909 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1910
1911 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1912 sizeof(struct Vmxnet3_RxDesc));
1913 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1914 rq->rx_ring[i].isOutOfOrder = 0;
1915 }
1916 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1917 adapter) == 0) {
1918 /* at least has 1 rx buffer for the 1st ring */
1919 return -ENOMEM;
1920 }
1921 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1922
1923 /* reset the comp ring */
1924 rq->comp_ring.next2proc = 0;
1925 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1926 sizeof(struct Vmxnet3_RxCompDesc));
1927 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1928
1929 /* reset rxctx */
1930 rq->rx_ctx.skb = NULL;
1931
1932 /* stats are not reset */
1933 return 0;
1934 }
1935
1936
1937 static int
vmxnet3_rq_init_all(struct vmxnet3_adapter * adapter)1938 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1939 {
1940 int i, err = 0;
1941
1942 for (i = 0; i < adapter->num_rx_queues; i++) {
1943 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1944 if (unlikely(err)) {
1945 dev_err(&adapter->netdev->dev, "%s: failed to "
1946 "initialize rx queue%i\n",
1947 adapter->netdev->name, i);
1948 break;
1949 }
1950 }
1951 return err;
1952
1953 }
1954
1955
1956 static int
vmxnet3_rq_create(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1957 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1958 {
1959 int i;
1960 size_t sz;
1961 struct vmxnet3_rx_buf_info *bi;
1962
1963 for (i = 0; i < 2; i++) {
1964
1965 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1966 rq->rx_ring[i].base = dma_alloc_coherent(
1967 &adapter->pdev->dev, sz,
1968 &rq->rx_ring[i].basePA,
1969 GFP_KERNEL);
1970 if (!rq->rx_ring[i].base) {
1971 netdev_err(adapter->netdev,
1972 "failed to allocate rx ring %d\n", i);
1973 goto err;
1974 }
1975 }
1976
1977 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1978 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1979 rq->data_ring.base =
1980 dma_alloc_coherent(&adapter->pdev->dev, sz,
1981 &rq->data_ring.basePA,
1982 GFP_KERNEL);
1983 if (!rq->data_ring.base) {
1984 netdev_err(adapter->netdev,
1985 "rx data ring will be disabled\n");
1986 adapter->rxdataring_enabled = false;
1987 }
1988 } else {
1989 rq->data_ring.base = NULL;
1990 rq->data_ring.desc_size = 0;
1991 }
1992
1993 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1994 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1995 &rq->comp_ring.basePA,
1996 GFP_KERNEL);
1997 if (!rq->comp_ring.base) {
1998 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1999 goto err;
2000 }
2001
2002 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2003 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2004 dev_to_node(&adapter->pdev->dev));
2005 if (!bi)
2006 goto err;
2007
2008 rq->buf_info[0] = bi;
2009 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2010
2011 return 0;
2012
2013 err:
2014 vmxnet3_rq_destroy(rq, adapter);
2015 return -ENOMEM;
2016 }
2017
2018
2019 static int
vmxnet3_rq_create_all(struct vmxnet3_adapter * adapter)2020 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2021 {
2022 int i, err = 0;
2023
2024 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2025
2026 for (i = 0; i < adapter->num_rx_queues; i++) {
2027 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2028 if (unlikely(err)) {
2029 dev_err(&adapter->netdev->dev,
2030 "%s: failed to create rx queue%i\n",
2031 adapter->netdev->name, i);
2032 goto err_out;
2033 }
2034 }
2035
2036 if (!adapter->rxdataring_enabled)
2037 vmxnet3_rq_destroy_all_rxdataring(adapter);
2038
2039 return err;
2040 err_out:
2041 vmxnet3_rq_destroy_all(adapter);
2042 return err;
2043
2044 }
2045
2046 /* Multiple queue aware polling function for tx and rx */
2047
2048 static int
vmxnet3_do_poll(struct vmxnet3_adapter * adapter,int budget)2049 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2050 {
2051 int rcd_done = 0, i;
2052 if (unlikely(adapter->shared->ecr))
2053 vmxnet3_process_events(adapter);
2054 for (i = 0; i < adapter->num_tx_queues; i++)
2055 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2056
2057 for (i = 0; i < adapter->num_rx_queues; i++)
2058 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2059 adapter, budget);
2060 return rcd_done;
2061 }
2062
2063
2064 static int
vmxnet3_poll(struct napi_struct * napi,int budget)2065 vmxnet3_poll(struct napi_struct *napi, int budget)
2066 {
2067 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2068 struct vmxnet3_rx_queue, napi);
2069 int rxd_done;
2070
2071 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2072
2073 if (rxd_done < budget) {
2074 napi_complete_done(napi, rxd_done);
2075 vmxnet3_enable_all_intrs(rx_queue->adapter);
2076 }
2077 return rxd_done;
2078 }
2079
2080 /*
2081 * NAPI polling function for MSI-X mode with multiple Rx queues
2082 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2083 */
2084
2085 static int
vmxnet3_poll_rx_only(struct napi_struct * napi,int budget)2086 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2087 {
2088 struct vmxnet3_rx_queue *rq = container_of(napi,
2089 struct vmxnet3_rx_queue, napi);
2090 struct vmxnet3_adapter *adapter = rq->adapter;
2091 int rxd_done;
2092
2093 /* When sharing interrupt with corresponding tx queue, process
2094 * tx completions in that queue as well
2095 */
2096 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2097 struct vmxnet3_tx_queue *tq =
2098 &adapter->tx_queue[rq - adapter->rx_queue];
2099 vmxnet3_tq_tx_complete(tq, adapter);
2100 }
2101
2102 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2103
2104 if (rxd_done < budget) {
2105 napi_complete_done(napi, rxd_done);
2106 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2107 }
2108 return rxd_done;
2109 }
2110
2111
2112 #ifdef CONFIG_PCI_MSI
2113
2114 /*
2115 * Handle completion interrupts on tx queues
2116 * Returns whether or not the intr is handled
2117 */
2118
2119 static irqreturn_t
vmxnet3_msix_tx(int irq,void * data)2120 vmxnet3_msix_tx(int irq, void *data)
2121 {
2122 struct vmxnet3_tx_queue *tq = data;
2123 struct vmxnet3_adapter *adapter = tq->adapter;
2124
2125 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2126 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2127
2128 /* Handle the case where only one irq is allocate for all tx queues */
2129 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2130 int i;
2131 for (i = 0; i < adapter->num_tx_queues; i++) {
2132 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2133 vmxnet3_tq_tx_complete(txq, adapter);
2134 }
2135 } else {
2136 vmxnet3_tq_tx_complete(tq, adapter);
2137 }
2138 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2139
2140 return IRQ_HANDLED;
2141 }
2142
2143
2144 /*
2145 * Handle completion interrupts on rx queues. Returns whether or not the
2146 * intr is handled
2147 */
2148
2149 static irqreturn_t
vmxnet3_msix_rx(int irq,void * data)2150 vmxnet3_msix_rx(int irq, void *data)
2151 {
2152 struct vmxnet3_rx_queue *rq = data;
2153 struct vmxnet3_adapter *adapter = rq->adapter;
2154
2155 /* disable intr if needed */
2156 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2157 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2158 napi_schedule(&rq->napi);
2159
2160 return IRQ_HANDLED;
2161 }
2162
2163 /*
2164 *----------------------------------------------------------------------------
2165 *
2166 * vmxnet3_msix_event --
2167 *
2168 * vmxnet3 msix event intr handler
2169 *
2170 * Result:
2171 * whether or not the intr is handled
2172 *
2173 *----------------------------------------------------------------------------
2174 */
2175
2176 static irqreturn_t
vmxnet3_msix_event(int irq,void * data)2177 vmxnet3_msix_event(int irq, void *data)
2178 {
2179 struct net_device *dev = data;
2180 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2181
2182 /* disable intr if needed */
2183 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2184 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2185
2186 if (adapter->shared->ecr)
2187 vmxnet3_process_events(adapter);
2188
2189 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2190
2191 return IRQ_HANDLED;
2192 }
2193
2194 #endif /* CONFIG_PCI_MSI */
2195
2196
2197 /* Interrupt handler for vmxnet3 */
2198 static irqreturn_t
vmxnet3_intr(int irq,void * dev_id)2199 vmxnet3_intr(int irq, void *dev_id)
2200 {
2201 struct net_device *dev = dev_id;
2202 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2203
2204 if (adapter->intr.type == VMXNET3_IT_INTX) {
2205 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2206 if (unlikely(icr == 0))
2207 /* not ours */
2208 return IRQ_NONE;
2209 }
2210
2211
2212 /* disable intr if needed */
2213 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2214 vmxnet3_disable_all_intrs(adapter);
2215
2216 napi_schedule(&adapter->rx_queue[0].napi);
2217
2218 return IRQ_HANDLED;
2219 }
2220
2221 #ifdef CONFIG_NET_POLL_CONTROLLER
2222
2223 /* netpoll callback. */
2224 static void
vmxnet3_netpoll(struct net_device * netdev)2225 vmxnet3_netpoll(struct net_device *netdev)
2226 {
2227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2228
2229 switch (adapter->intr.type) {
2230 #ifdef CONFIG_PCI_MSI
2231 case VMXNET3_IT_MSIX: {
2232 int i;
2233 for (i = 0; i < adapter->num_rx_queues; i++)
2234 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2235 break;
2236 }
2237 #endif
2238 case VMXNET3_IT_MSI:
2239 default:
2240 vmxnet3_intr(0, adapter->netdev);
2241 break;
2242 }
2243
2244 }
2245 #endif /* CONFIG_NET_POLL_CONTROLLER */
2246
2247 static int
vmxnet3_request_irqs(struct vmxnet3_adapter * adapter)2248 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2249 {
2250 struct vmxnet3_intr *intr = &adapter->intr;
2251 int err = 0, i;
2252 int vector = 0;
2253
2254 #ifdef CONFIG_PCI_MSI
2255 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2256 for (i = 0; i < adapter->num_tx_queues; i++) {
2257 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2258 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2259 adapter->netdev->name, vector);
2260 err = request_irq(
2261 intr->msix_entries[vector].vector,
2262 vmxnet3_msix_tx, 0,
2263 adapter->tx_queue[i].name,
2264 &adapter->tx_queue[i]);
2265 } else {
2266 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2267 adapter->netdev->name, vector);
2268 }
2269 if (err) {
2270 dev_err(&adapter->netdev->dev,
2271 "Failed to request irq for MSIX, %s, "
2272 "error %d\n",
2273 adapter->tx_queue[i].name, err);
2274 return err;
2275 }
2276
2277 /* Handle the case where only 1 MSIx was allocated for
2278 * all tx queues */
2279 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2280 for (; i < adapter->num_tx_queues; i++)
2281 adapter->tx_queue[i].comp_ring.intr_idx
2282 = vector;
2283 vector++;
2284 break;
2285 } else {
2286 adapter->tx_queue[i].comp_ring.intr_idx
2287 = vector++;
2288 }
2289 }
2290 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2291 vector = 0;
2292
2293 for (i = 0; i < adapter->num_rx_queues; i++) {
2294 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2295 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2296 adapter->netdev->name, vector);
2297 else
2298 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2299 adapter->netdev->name, vector);
2300 err = request_irq(intr->msix_entries[vector].vector,
2301 vmxnet3_msix_rx, 0,
2302 adapter->rx_queue[i].name,
2303 &(adapter->rx_queue[i]));
2304 if (err) {
2305 netdev_err(adapter->netdev,
2306 "Failed to request irq for MSIX, "
2307 "%s, error %d\n",
2308 adapter->rx_queue[i].name, err);
2309 return err;
2310 }
2311
2312 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2313 }
2314
2315 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2316 adapter->netdev->name, vector);
2317 err = request_irq(intr->msix_entries[vector].vector,
2318 vmxnet3_msix_event, 0,
2319 intr->event_msi_vector_name, adapter->netdev);
2320 intr->event_intr_idx = vector;
2321
2322 } else if (intr->type == VMXNET3_IT_MSI) {
2323 adapter->num_rx_queues = 1;
2324 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2325 adapter->netdev->name, adapter->netdev);
2326 } else {
2327 #endif
2328 adapter->num_rx_queues = 1;
2329 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2330 IRQF_SHARED, adapter->netdev->name,
2331 adapter->netdev);
2332 #ifdef CONFIG_PCI_MSI
2333 }
2334 #endif
2335 intr->num_intrs = vector + 1;
2336 if (err) {
2337 netdev_err(adapter->netdev,
2338 "Failed to request irq (intr type:%d), error %d\n",
2339 intr->type, err);
2340 } else {
2341 /* Number of rx queues will not change after this */
2342 for (i = 0; i < adapter->num_rx_queues; i++) {
2343 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2344 rq->qid = i;
2345 rq->qid2 = i + adapter->num_rx_queues;
2346 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2347 }
2348
2349 /* init our intr settings */
2350 for (i = 0; i < intr->num_intrs; i++)
2351 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2352 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2353 adapter->intr.event_intr_idx = 0;
2354 for (i = 0; i < adapter->num_tx_queues; i++)
2355 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2356 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2357 }
2358
2359 netdev_info(adapter->netdev,
2360 "intr type %u, mode %u, %u vectors allocated\n",
2361 intr->type, intr->mask_mode, intr->num_intrs);
2362 }
2363
2364 return err;
2365 }
2366
2367
2368 static void
vmxnet3_free_irqs(struct vmxnet3_adapter * adapter)2369 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2370 {
2371 struct vmxnet3_intr *intr = &adapter->intr;
2372 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2373
2374 switch (intr->type) {
2375 #ifdef CONFIG_PCI_MSI
2376 case VMXNET3_IT_MSIX:
2377 {
2378 int i, vector = 0;
2379
2380 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2381 for (i = 0; i < adapter->num_tx_queues; i++) {
2382 free_irq(intr->msix_entries[vector++].vector,
2383 &(adapter->tx_queue[i]));
2384 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2385 break;
2386 }
2387 }
2388
2389 for (i = 0; i < adapter->num_rx_queues; i++) {
2390 free_irq(intr->msix_entries[vector++].vector,
2391 &(adapter->rx_queue[i]));
2392 }
2393
2394 free_irq(intr->msix_entries[vector].vector,
2395 adapter->netdev);
2396 BUG_ON(vector >= intr->num_intrs);
2397 break;
2398 }
2399 #endif
2400 case VMXNET3_IT_MSI:
2401 free_irq(adapter->pdev->irq, adapter->netdev);
2402 break;
2403 case VMXNET3_IT_INTX:
2404 free_irq(adapter->pdev->irq, adapter->netdev);
2405 break;
2406 default:
2407 BUG();
2408 }
2409 }
2410
2411
2412 static void
vmxnet3_restore_vlan(struct vmxnet3_adapter * adapter)2413 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2414 {
2415 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2416 u16 vid;
2417
2418 /* allow untagged pkts */
2419 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2420
2421 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2422 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2423 }
2424
2425
2426 static int
vmxnet3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2427 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2428 {
2429 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2430
2431 if (!(netdev->flags & IFF_PROMISC)) {
2432 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2433 unsigned long flags;
2434
2435 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2436 spin_lock_irqsave(&adapter->cmd_lock, flags);
2437 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2438 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2439 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2440 }
2441
2442 set_bit(vid, adapter->active_vlans);
2443
2444 return 0;
2445 }
2446
2447
2448 static int
vmxnet3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2449 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2450 {
2451 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2452
2453 if (!(netdev->flags & IFF_PROMISC)) {
2454 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2455 unsigned long flags;
2456
2457 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2458 spin_lock_irqsave(&adapter->cmd_lock, flags);
2459 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2460 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2461 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2462 }
2463
2464 clear_bit(vid, adapter->active_vlans);
2465
2466 return 0;
2467 }
2468
2469
2470 static u8 *
vmxnet3_copy_mc(struct net_device * netdev)2471 vmxnet3_copy_mc(struct net_device *netdev)
2472 {
2473 u8 *buf = NULL;
2474 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2475
2476 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2477 if (sz <= 0xffff) {
2478 /* We may be called with BH disabled */
2479 buf = kmalloc(sz, GFP_ATOMIC);
2480 if (buf) {
2481 struct netdev_hw_addr *ha;
2482 int i = 0;
2483
2484 netdev_for_each_mc_addr(ha, netdev)
2485 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2486 ETH_ALEN);
2487 }
2488 }
2489 return buf;
2490 }
2491
2492
2493 static void
vmxnet3_set_mc(struct net_device * netdev)2494 vmxnet3_set_mc(struct net_device *netdev)
2495 {
2496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2497 unsigned long flags;
2498 struct Vmxnet3_RxFilterConf *rxConf =
2499 &adapter->shared->devRead.rxFilterConf;
2500 u8 *new_table = NULL;
2501 dma_addr_t new_table_pa = 0;
2502 bool new_table_pa_valid = false;
2503 u32 new_mode = VMXNET3_RXM_UCAST;
2504
2505 if (netdev->flags & IFF_PROMISC) {
2506 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2507 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2508
2509 new_mode |= VMXNET3_RXM_PROMISC;
2510 } else {
2511 vmxnet3_restore_vlan(adapter);
2512 }
2513
2514 if (netdev->flags & IFF_BROADCAST)
2515 new_mode |= VMXNET3_RXM_BCAST;
2516
2517 if (netdev->flags & IFF_ALLMULTI)
2518 new_mode |= VMXNET3_RXM_ALL_MULTI;
2519 else
2520 if (!netdev_mc_empty(netdev)) {
2521 new_table = vmxnet3_copy_mc(netdev);
2522 if (new_table) {
2523 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2524
2525 rxConf->mfTableLen = cpu_to_le16(sz);
2526 new_table_pa = dma_map_single(
2527 &adapter->pdev->dev,
2528 new_table,
2529 sz,
2530 DMA_TO_DEVICE);
2531 if (!dma_mapping_error(&adapter->pdev->dev,
2532 new_table_pa)) {
2533 new_mode |= VMXNET3_RXM_MCAST;
2534 new_table_pa_valid = true;
2535 rxConf->mfTablePA = cpu_to_le64(
2536 new_table_pa);
2537 }
2538 }
2539 if (!new_table_pa_valid) {
2540 netdev_info(netdev,
2541 "failed to copy mcast list, setting ALL_MULTI\n");
2542 new_mode |= VMXNET3_RXM_ALL_MULTI;
2543 }
2544 }
2545
2546 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2547 rxConf->mfTableLen = 0;
2548 rxConf->mfTablePA = 0;
2549 }
2550
2551 spin_lock_irqsave(&adapter->cmd_lock, flags);
2552 if (new_mode != rxConf->rxMode) {
2553 rxConf->rxMode = cpu_to_le32(new_mode);
2554 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2555 VMXNET3_CMD_UPDATE_RX_MODE);
2556 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2557 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2558 }
2559
2560 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2561 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2562 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2563
2564 if (new_table_pa_valid)
2565 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2566 rxConf->mfTableLen, DMA_TO_DEVICE);
2567 kfree(new_table);
2568 }
2569
2570 void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter * adapter)2571 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2572 {
2573 int i;
2574
2575 for (i = 0; i < adapter->num_rx_queues; i++)
2576 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2577 }
2578
2579
2580 /*
2581 * Set up driver_shared based on settings in adapter.
2582 */
2583
2584 static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter * adapter)2585 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2586 {
2587 struct Vmxnet3_DriverShared *shared = adapter->shared;
2588 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2589 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2590 struct Vmxnet3_TxQueueConf *tqc;
2591 struct Vmxnet3_RxQueueConf *rqc;
2592 int i;
2593
2594 memset(shared, 0, sizeof(*shared));
2595
2596 /* driver settings */
2597 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2598 devRead->misc.driverInfo.version = cpu_to_le32(
2599 VMXNET3_DRIVER_VERSION_NUM);
2600 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2601 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2602 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2603 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2604 *((u32 *)&devRead->misc.driverInfo.gos));
2605 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2606 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2607
2608 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2609 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2610
2611 /* set up feature flags */
2612 if (adapter->netdev->features & NETIF_F_RXCSUM)
2613 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2614
2615 if (adapter->netdev->features & NETIF_F_LRO) {
2616 devRead->misc.uptFeatures |= UPT1_F_LRO;
2617 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2618 }
2619 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2620 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2621
2622 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2623 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2624 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2625
2626 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2627 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2628 devRead->misc.queueDescLen = cpu_to_le32(
2629 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2630 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2631
2632 /* tx queue settings */
2633 devRead->misc.numTxQueues = adapter->num_tx_queues;
2634 for (i = 0; i < adapter->num_tx_queues; i++) {
2635 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2636 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2637 tqc = &adapter->tqd_start[i].conf;
2638 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2639 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2640 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2641 tqc->ddPA = cpu_to_le64(~0ULL);
2642 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2643 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2644 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2645 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2646 tqc->ddLen = cpu_to_le32(0);
2647 tqc->intrIdx = tq->comp_ring.intr_idx;
2648 }
2649
2650 /* rx queue settings */
2651 devRead->misc.numRxQueues = adapter->num_rx_queues;
2652 for (i = 0; i < adapter->num_rx_queues; i++) {
2653 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2654 rqc = &adapter->rqd_start[i].conf;
2655 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2656 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2657 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2658 rqc->ddPA = cpu_to_le64(~0ULL);
2659 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2660 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2661 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2662 rqc->ddLen = cpu_to_le32(0);
2663 rqc->intrIdx = rq->comp_ring.intr_idx;
2664 if (VMXNET3_VERSION_GE_3(adapter)) {
2665 rqc->rxDataRingBasePA =
2666 cpu_to_le64(rq->data_ring.basePA);
2667 rqc->rxDataRingDescSize =
2668 cpu_to_le16(rq->data_ring.desc_size);
2669 }
2670 }
2671
2672 #ifdef VMXNET3_RSS
2673 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2674
2675 if (adapter->rss) {
2676 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2677
2678 devRead->misc.uptFeatures |= UPT1_F_RSS;
2679 devRead->misc.numRxQueues = adapter->num_rx_queues;
2680 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2681 UPT1_RSS_HASH_TYPE_IPV4 |
2682 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2683 UPT1_RSS_HASH_TYPE_IPV6;
2684 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2685 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2686 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2687 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2688
2689 for (i = 0; i < rssConf->indTableSize; i++)
2690 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2691 i, adapter->num_rx_queues);
2692
2693 devRead->rssConfDesc.confVer = 1;
2694 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2695 devRead->rssConfDesc.confPA =
2696 cpu_to_le64(adapter->rss_conf_pa);
2697 }
2698
2699 #endif /* VMXNET3_RSS */
2700
2701 /* intr settings */
2702 if (!VMXNET3_VERSION_GE_6(adapter) ||
2703 !adapter->queuesExtEnabled) {
2704 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2705 VMXNET3_IMM_AUTO;
2706 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2707 for (i = 0; i < adapter->intr.num_intrs; i++)
2708 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2709
2710 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2711 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2712 } else {
2713 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2714 VMXNET3_IMM_AUTO;
2715 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2716 for (i = 0; i < adapter->intr.num_intrs; i++)
2717 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2718
2719 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2720 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2721 }
2722
2723 /* rx filter settings */
2724 devRead->rxFilterConf.rxMode = 0;
2725 vmxnet3_restore_vlan(adapter);
2726 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2727
2728 /* the rest are already zeroed */
2729 }
2730
2731 static void
vmxnet3_init_bufsize(struct vmxnet3_adapter * adapter)2732 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2733 {
2734 struct Vmxnet3_DriverShared *shared = adapter->shared;
2735 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2736 unsigned long flags;
2737
2738 if (!VMXNET3_VERSION_GE_7(adapter))
2739 return;
2740
2741 cmdInfo->ringBufSize = adapter->ringBufSize;
2742 spin_lock_irqsave(&adapter->cmd_lock, flags);
2743 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2744 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
2745 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2746 }
2747
2748 static void
vmxnet3_init_coalesce(struct vmxnet3_adapter * adapter)2749 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2750 {
2751 struct Vmxnet3_DriverShared *shared = adapter->shared;
2752 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2753 unsigned long flags;
2754
2755 if (!VMXNET3_VERSION_GE_3(adapter))
2756 return;
2757
2758 spin_lock_irqsave(&adapter->cmd_lock, flags);
2759 cmdInfo->varConf.confVer = 1;
2760 cmdInfo->varConf.confLen =
2761 cpu_to_le32(sizeof(*adapter->coal_conf));
2762 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2763
2764 if (adapter->default_coal_mode) {
2765 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2766 VMXNET3_CMD_GET_COALESCE);
2767 } else {
2768 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2769 VMXNET3_CMD_SET_COALESCE);
2770 }
2771
2772 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2773 }
2774
2775 static void
vmxnet3_init_rssfields(struct vmxnet3_adapter * adapter)2776 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2777 {
2778 struct Vmxnet3_DriverShared *shared = adapter->shared;
2779 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2780 unsigned long flags;
2781
2782 if (!VMXNET3_VERSION_GE_4(adapter))
2783 return;
2784
2785 spin_lock_irqsave(&adapter->cmd_lock, flags);
2786
2787 if (adapter->default_rss_fields) {
2788 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2789 VMXNET3_CMD_GET_RSS_FIELDS);
2790 adapter->rss_fields =
2791 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2792 } else {
2793 if (VMXNET3_VERSION_GE_7(adapter)) {
2794 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
2795 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
2796 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2797 VMXNET3_CAP_UDP_RSS)) {
2798 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
2799 } else {
2800 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
2801 }
2802
2803 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
2804 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2805 VMXNET3_CAP_ESP_RSS_IPV4)) {
2806 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
2807 } else {
2808 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
2809 }
2810
2811 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
2812 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2813 VMXNET3_CAP_ESP_RSS_IPV6)) {
2814 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
2815 } else {
2816 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
2817 }
2818
2819 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
2820 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
2821 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2822 }
2823 cmdInfo->setRssFields = adapter->rss_fields;
2824 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2825 VMXNET3_CMD_SET_RSS_FIELDS);
2826 /* Not all requested RSS may get applied, so get and
2827 * cache what was actually applied.
2828 */
2829 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2830 VMXNET3_CMD_GET_RSS_FIELDS);
2831 adapter->rss_fields =
2832 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2833 }
2834
2835 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2836 }
2837
2838 int
vmxnet3_activate_dev(struct vmxnet3_adapter * adapter)2839 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2840 {
2841 int err, i;
2842 u32 ret;
2843 unsigned long flags;
2844
2845 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2846 " ring sizes %u %u %u\n", adapter->netdev->name,
2847 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2848 adapter->tx_queue[0].tx_ring.size,
2849 adapter->rx_queue[0].rx_ring[0].size,
2850 adapter->rx_queue[0].rx_ring[1].size);
2851
2852 vmxnet3_tq_init_all(adapter);
2853 err = vmxnet3_rq_init_all(adapter);
2854 if (err) {
2855 netdev_err(adapter->netdev,
2856 "Failed to init rx queue error %d\n", err);
2857 goto rq_err;
2858 }
2859
2860 err = vmxnet3_request_irqs(adapter);
2861 if (err) {
2862 netdev_err(adapter->netdev,
2863 "Failed to setup irq for error %d\n", err);
2864 goto irq_err;
2865 }
2866
2867 vmxnet3_setup_driver_shared(adapter);
2868
2869 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2870 adapter->shared_pa));
2871 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2872 adapter->shared_pa));
2873 spin_lock_irqsave(&adapter->cmd_lock, flags);
2874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2875 VMXNET3_CMD_ACTIVATE_DEV);
2876 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2877 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2878
2879 if (ret != 0) {
2880 netdev_err(adapter->netdev,
2881 "Failed to activate dev: error %u\n", ret);
2882 err = -EINVAL;
2883 goto activate_err;
2884 }
2885
2886 vmxnet3_init_bufsize(adapter);
2887 vmxnet3_init_coalesce(adapter);
2888 vmxnet3_init_rssfields(adapter);
2889
2890 for (i = 0; i < adapter->num_rx_queues; i++) {
2891 VMXNET3_WRITE_BAR0_REG(adapter,
2892 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
2893 adapter->rx_queue[i].rx_ring[0].next2fill);
2894 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
2895 (i * VMXNET3_REG_ALIGN)),
2896 adapter->rx_queue[i].rx_ring[1].next2fill);
2897 }
2898
2899 /* Apply the rx filter settins last. */
2900 vmxnet3_set_mc(adapter->netdev);
2901
2902 /*
2903 * Check link state when first activating device. It will start the
2904 * tx queue if the link is up.
2905 */
2906 vmxnet3_check_link(adapter, true);
2907 netif_tx_wake_all_queues(adapter->netdev);
2908 for (i = 0; i < adapter->num_rx_queues; i++)
2909 napi_enable(&adapter->rx_queue[i].napi);
2910 vmxnet3_enable_all_intrs(adapter);
2911 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2912 return 0;
2913
2914 activate_err:
2915 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2916 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2917 vmxnet3_free_irqs(adapter);
2918 irq_err:
2919 rq_err:
2920 /* free up buffers we allocated */
2921 vmxnet3_rq_cleanup_all(adapter);
2922 return err;
2923 }
2924
2925
2926 void
vmxnet3_reset_dev(struct vmxnet3_adapter * adapter)2927 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2928 {
2929 unsigned long flags;
2930 spin_lock_irqsave(&adapter->cmd_lock, flags);
2931 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2932 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2933 }
2934
2935
2936 int
vmxnet3_quiesce_dev(struct vmxnet3_adapter * adapter)2937 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2938 {
2939 int i;
2940 unsigned long flags;
2941 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2942 return 0;
2943
2944
2945 spin_lock_irqsave(&adapter->cmd_lock, flags);
2946 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2947 VMXNET3_CMD_QUIESCE_DEV);
2948 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2949 vmxnet3_disable_all_intrs(adapter);
2950
2951 for (i = 0; i < adapter->num_rx_queues; i++)
2952 napi_disable(&adapter->rx_queue[i].napi);
2953 netif_tx_disable(adapter->netdev);
2954 adapter->link_speed = 0;
2955 netif_carrier_off(adapter->netdev);
2956
2957 vmxnet3_tq_cleanup_all(adapter);
2958 vmxnet3_rq_cleanup_all(adapter);
2959 vmxnet3_free_irqs(adapter);
2960 return 0;
2961 }
2962
2963
2964 static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter * adapter,const u8 * mac)2965 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
2966 {
2967 u32 tmp;
2968
2969 tmp = *(u32 *)mac;
2970 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2971
2972 tmp = (mac[5] << 8) | mac[4];
2973 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2974 }
2975
2976
2977 static int
vmxnet3_set_mac_addr(struct net_device * netdev,void * p)2978 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2979 {
2980 struct sockaddr *addr = p;
2981 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2982
2983 dev_addr_set(netdev, addr->sa_data);
2984 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2985
2986 return 0;
2987 }
2988
2989
2990 /* ==================== initialization and cleanup routines ============ */
2991
2992 static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter * adapter)2993 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2994 {
2995 int err;
2996 unsigned long mmio_start, mmio_len;
2997 struct pci_dev *pdev = adapter->pdev;
2998
2999 err = pci_enable_device(pdev);
3000 if (err) {
3001 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3002 return err;
3003 }
3004
3005 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3006 vmxnet3_driver_name);
3007 if (err) {
3008 dev_err(&pdev->dev,
3009 "Failed to request region for adapter: error %d\n", err);
3010 goto err_enable_device;
3011 }
3012
3013 pci_set_master(pdev);
3014
3015 mmio_start = pci_resource_start(pdev, 0);
3016 mmio_len = pci_resource_len(pdev, 0);
3017 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3018 if (!adapter->hw_addr0) {
3019 dev_err(&pdev->dev, "Failed to map bar0\n");
3020 err = -EIO;
3021 goto err_ioremap;
3022 }
3023
3024 mmio_start = pci_resource_start(pdev, 1);
3025 mmio_len = pci_resource_len(pdev, 1);
3026 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3027 if (!adapter->hw_addr1) {
3028 dev_err(&pdev->dev, "Failed to map bar1\n");
3029 err = -EIO;
3030 goto err_bar1;
3031 }
3032 return 0;
3033
3034 err_bar1:
3035 iounmap(adapter->hw_addr0);
3036 err_ioremap:
3037 pci_release_selected_regions(pdev, (1 << 2) - 1);
3038 err_enable_device:
3039 pci_disable_device(pdev);
3040 return err;
3041 }
3042
3043
3044 static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter * adapter)3045 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3046 {
3047 BUG_ON(!adapter->pdev);
3048
3049 iounmap(adapter->hw_addr0);
3050 iounmap(adapter->hw_addr1);
3051 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3052 pci_disable_device(adapter->pdev);
3053 }
3054
3055
3056 static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter * adapter)3057 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3058 {
3059 size_t sz, i, ring0_size, ring1_size, comp_size;
3060 /* With version7 ring1 will have only T0 buffers */
3061 if (!VMXNET3_VERSION_GE_7(adapter)) {
3062 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3063 VMXNET3_MAX_ETH_HDR_SIZE) {
3064 adapter->skb_buf_size = adapter->netdev->mtu +
3065 VMXNET3_MAX_ETH_HDR_SIZE;
3066 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3067 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3068
3069 adapter->rx_buf_per_pkt = 1;
3070 } else {
3071 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3072 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3073 VMXNET3_MAX_ETH_HDR_SIZE;
3074 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3075 }
3076 } else {
3077 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3078 VMXNET3_MAX_SKB_BUF_SIZE);
3079 adapter->rx_buf_per_pkt = 1;
3080 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3081 adapter->ringBufSize.ring1BufSizeType1 = 0;
3082 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3083 }
3084
3085 /*
3086 * for simplicity, force the ring0 size to be a multiple of
3087 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3088 */
3089 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3090 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3091 ring0_size = (ring0_size + sz - 1) / sz * sz;
3092 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3093 sz * sz);
3094 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3095 ring1_size = (ring1_size + sz - 1) / sz * sz;
3096 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3097 sz * sz);
3098 /* For v7 and later, keep ring size power of 2 for UPT */
3099 if (VMXNET3_VERSION_GE_7(adapter)) {
3100 ring0_size = rounddown_pow_of_two(ring0_size);
3101 ring1_size = rounddown_pow_of_two(ring1_size);
3102 }
3103 comp_size = ring0_size + ring1_size;
3104
3105 for (i = 0; i < adapter->num_rx_queues; i++) {
3106 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3107
3108 rq->rx_ring[0].size = ring0_size;
3109 rq->rx_ring[1].size = ring1_size;
3110 rq->comp_ring.size = comp_size;
3111 }
3112 }
3113
3114
3115 int
vmxnet3_create_queues(struct vmxnet3_adapter * adapter,u32 tx_ring_size,u32 rx_ring_size,u32 rx_ring2_size,u16 txdata_desc_size,u16 rxdata_desc_size)3116 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3117 u32 rx_ring_size, u32 rx_ring2_size,
3118 u16 txdata_desc_size, u16 rxdata_desc_size)
3119 {
3120 int err = 0, i;
3121
3122 for (i = 0; i < adapter->num_tx_queues; i++) {
3123 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3124 tq->tx_ring.size = tx_ring_size;
3125 tq->data_ring.size = tx_ring_size;
3126 tq->comp_ring.size = tx_ring_size;
3127 tq->txdata_desc_size = txdata_desc_size;
3128 tq->shared = &adapter->tqd_start[i].ctrl;
3129 tq->stopped = true;
3130 tq->adapter = adapter;
3131 tq->qid = i;
3132 err = vmxnet3_tq_create(tq, adapter);
3133 /*
3134 * Too late to change num_tx_queues. We cannot do away with
3135 * lesser number of queues than what we asked for
3136 */
3137 if (err)
3138 goto queue_err;
3139 }
3140
3141 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3142 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3143 vmxnet3_adjust_rx_ring_size(adapter);
3144
3145 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3146 for (i = 0; i < adapter->num_rx_queues; i++) {
3147 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3148 /* qid and qid2 for rx queues will be assigned later when num
3149 * of rx queues is finalized after allocating intrs */
3150 rq->shared = &adapter->rqd_start[i].ctrl;
3151 rq->adapter = adapter;
3152 rq->data_ring.desc_size = rxdata_desc_size;
3153 err = vmxnet3_rq_create(rq, adapter);
3154 if (err) {
3155 if (i == 0) {
3156 netdev_err(adapter->netdev,
3157 "Could not allocate any rx queues. "
3158 "Aborting.\n");
3159 goto queue_err;
3160 } else {
3161 netdev_info(adapter->netdev,
3162 "Number of rx queues changed "
3163 "to : %d.\n", i);
3164 adapter->num_rx_queues = i;
3165 err = 0;
3166 break;
3167 }
3168 }
3169 }
3170
3171 if (!adapter->rxdataring_enabled)
3172 vmxnet3_rq_destroy_all_rxdataring(adapter);
3173
3174 return err;
3175 queue_err:
3176 vmxnet3_tq_destroy_all(adapter);
3177 return err;
3178 }
3179
3180 static int
vmxnet3_open(struct net_device * netdev)3181 vmxnet3_open(struct net_device *netdev)
3182 {
3183 struct vmxnet3_adapter *adapter;
3184 int err, i;
3185
3186 adapter = netdev_priv(netdev);
3187
3188 for (i = 0; i < adapter->num_tx_queues; i++)
3189 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3190
3191 if (VMXNET3_VERSION_GE_3(adapter)) {
3192 unsigned long flags;
3193 u16 txdata_desc_size;
3194
3195 spin_lock_irqsave(&adapter->cmd_lock, flags);
3196 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3197 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3198 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3199 VMXNET3_REG_CMD);
3200 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3201
3202 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3203 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3204 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3205 adapter->txdata_desc_size =
3206 sizeof(struct Vmxnet3_TxDataDesc);
3207 } else {
3208 adapter->txdata_desc_size = txdata_desc_size;
3209 }
3210 } else {
3211 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3212 }
3213
3214 err = vmxnet3_create_queues(adapter,
3215 adapter->tx_ring_size,
3216 adapter->rx_ring_size,
3217 adapter->rx_ring2_size,
3218 adapter->txdata_desc_size,
3219 adapter->rxdata_desc_size);
3220 if (err)
3221 goto queue_err;
3222
3223 err = vmxnet3_activate_dev(adapter);
3224 if (err)
3225 goto activate_err;
3226
3227 return 0;
3228
3229 activate_err:
3230 vmxnet3_rq_destroy_all(adapter);
3231 vmxnet3_tq_destroy_all(adapter);
3232 queue_err:
3233 return err;
3234 }
3235
3236
3237 static int
vmxnet3_close(struct net_device * netdev)3238 vmxnet3_close(struct net_device *netdev)
3239 {
3240 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3241
3242 /*
3243 * Reset_work may be in the middle of resetting the device, wait for its
3244 * completion.
3245 */
3246 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3247 usleep_range(1000, 2000);
3248
3249 vmxnet3_quiesce_dev(adapter);
3250
3251 vmxnet3_rq_destroy_all(adapter);
3252 vmxnet3_tq_destroy_all(adapter);
3253
3254 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3255
3256
3257 return 0;
3258 }
3259
3260
3261 void
vmxnet3_force_close(struct vmxnet3_adapter * adapter)3262 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3263 {
3264 int i;
3265
3266 /*
3267 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3268 * vmxnet3_close() will deadlock.
3269 */
3270 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3271
3272 /* we need to enable NAPI, otherwise dev_close will deadlock */
3273 for (i = 0; i < adapter->num_rx_queues; i++)
3274 napi_enable(&adapter->rx_queue[i].napi);
3275 /*
3276 * Need to clear the quiesce bit to ensure that vmxnet3_close
3277 * can quiesce the device properly
3278 */
3279 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3280 dev_close(adapter->netdev);
3281 }
3282
3283
3284 static int
vmxnet3_change_mtu(struct net_device * netdev,int new_mtu)3285 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3286 {
3287 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3288 int err = 0;
3289
3290 netdev->mtu = new_mtu;
3291
3292 /*
3293 * Reset_work may be in the middle of resetting the device, wait for its
3294 * completion.
3295 */
3296 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3297 usleep_range(1000, 2000);
3298
3299 if (netif_running(netdev)) {
3300 vmxnet3_quiesce_dev(adapter);
3301 vmxnet3_reset_dev(adapter);
3302
3303 /* we need to re-create the rx queue based on the new mtu */
3304 vmxnet3_rq_destroy_all(adapter);
3305 vmxnet3_adjust_rx_ring_size(adapter);
3306 err = vmxnet3_rq_create_all(adapter);
3307 if (err) {
3308 netdev_err(netdev,
3309 "failed to re-create rx queues, "
3310 " error %d. Closing it.\n", err);
3311 goto out;
3312 }
3313
3314 err = vmxnet3_activate_dev(adapter);
3315 if (err) {
3316 netdev_err(netdev,
3317 "failed to re-activate, error %d. "
3318 "Closing it\n", err);
3319 goto out;
3320 }
3321 }
3322
3323 out:
3324 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3325 if (err)
3326 vmxnet3_force_close(adapter);
3327
3328 return err;
3329 }
3330
3331
3332 static void
vmxnet3_declare_features(struct vmxnet3_adapter * adapter)3333 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3334 {
3335 struct net_device *netdev = adapter->netdev;
3336
3337 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3338 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3339 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3340 NETIF_F_LRO | NETIF_F_HIGHDMA;
3341
3342 if (VMXNET3_VERSION_GE_4(adapter)) {
3343 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3344 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3345
3346 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3347 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3348 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3349 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3350 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3351 }
3352
3353 if (VMXNET3_VERSION_GE_7(adapter)) {
3354 unsigned long flags;
3355
3356 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3357 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3358 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3359 }
3360 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3361 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3362 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3363 }
3364 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3365 VMXNET3_CAP_GENEVE_TSO)) {
3366 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3367 }
3368 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3369 VMXNET3_CAP_VXLAN_TSO)) {
3370 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3371 }
3372 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3373 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3374 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3375 }
3376 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3377 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3378 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3379 }
3380
3381 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3382 spin_lock_irqsave(&adapter->cmd_lock, flags);
3383 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3384 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3385 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3386
3387 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3388 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3389 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3390 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3391 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3392 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3393 }
3394 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3395 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3396 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3397 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3398 }
3399 }
3400
3401 netdev->vlan_features = netdev->hw_features &
3402 ~(NETIF_F_HW_VLAN_CTAG_TX |
3403 NETIF_F_HW_VLAN_CTAG_RX);
3404 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3405 }
3406
3407
3408 static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter * adapter,u8 * mac)3409 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3410 {
3411 u32 tmp;
3412
3413 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3414 *(u32 *)mac = tmp;
3415
3416 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3417 mac[4] = tmp & 0xff;
3418 mac[5] = (tmp >> 8) & 0xff;
3419 }
3420
3421 #ifdef CONFIG_PCI_MSI
3422
3423 /*
3424 * Enable MSIx vectors.
3425 * Returns :
3426 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3427 * were enabled.
3428 * number of vectors which were enabled otherwise (this number is greater
3429 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3430 */
3431
3432 static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter * adapter,int nvec)3433 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3434 {
3435 int ret = pci_enable_msix_range(adapter->pdev,
3436 adapter->intr.msix_entries, nvec, nvec);
3437
3438 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3439 dev_err(&adapter->netdev->dev,
3440 "Failed to enable %d MSI-X, trying %d\n",
3441 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3442
3443 ret = pci_enable_msix_range(adapter->pdev,
3444 adapter->intr.msix_entries,
3445 VMXNET3_LINUX_MIN_MSIX_VECT,
3446 VMXNET3_LINUX_MIN_MSIX_VECT);
3447 }
3448
3449 if (ret < 0) {
3450 dev_err(&adapter->netdev->dev,
3451 "Failed to enable MSI-X, error: %d\n", ret);
3452 }
3453
3454 return ret;
3455 }
3456
3457
3458 #endif /* CONFIG_PCI_MSI */
3459
3460 static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter * adapter)3461 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3462 {
3463 u32 cfg;
3464 unsigned long flags;
3465
3466 /* intr settings */
3467 spin_lock_irqsave(&adapter->cmd_lock, flags);
3468 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3469 VMXNET3_CMD_GET_CONF_INTR);
3470 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3471 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3472 adapter->intr.type = cfg & 0x3;
3473 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3474
3475 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3476 adapter->intr.type = VMXNET3_IT_MSIX;
3477 }
3478
3479 #ifdef CONFIG_PCI_MSI
3480 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3481 int i, nvec, nvec_allocated;
3482
3483 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3484 1 : adapter->num_tx_queues;
3485 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3486 0 : adapter->num_rx_queues;
3487 nvec += 1; /* for link event */
3488 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3489 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3490
3491 for (i = 0; i < nvec; i++)
3492 adapter->intr.msix_entries[i].entry = i;
3493
3494 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3495 if (nvec_allocated < 0)
3496 goto msix_err;
3497
3498 /* If we cannot allocate one MSIx vector per queue
3499 * then limit the number of rx queues to 1
3500 */
3501 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3502 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3503 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3504 || adapter->num_rx_queues != 1) {
3505 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3506 netdev_err(adapter->netdev,
3507 "Number of rx queues : 1\n");
3508 adapter->num_rx_queues = 1;
3509 }
3510 }
3511
3512 adapter->intr.num_intrs = nvec_allocated;
3513 return;
3514
3515 msix_err:
3516 /* If we cannot allocate MSIx vectors use only one rx queue */
3517 dev_info(&adapter->pdev->dev,
3518 "Failed to enable MSI-X, error %d. "
3519 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3520
3521 adapter->intr.type = VMXNET3_IT_MSI;
3522 }
3523
3524 if (adapter->intr.type == VMXNET3_IT_MSI) {
3525 if (!pci_enable_msi(adapter->pdev)) {
3526 adapter->num_rx_queues = 1;
3527 adapter->intr.num_intrs = 1;
3528 return;
3529 }
3530 }
3531 #endif /* CONFIG_PCI_MSI */
3532
3533 adapter->num_rx_queues = 1;
3534 dev_info(&adapter->netdev->dev,
3535 "Using INTx interrupt, #Rx queues: 1.\n");
3536 adapter->intr.type = VMXNET3_IT_INTX;
3537
3538 /* INT-X related setting */
3539 adapter->intr.num_intrs = 1;
3540 }
3541
3542
3543 static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter * adapter)3544 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3545 {
3546 if (adapter->intr.type == VMXNET3_IT_MSIX)
3547 pci_disable_msix(adapter->pdev);
3548 else if (adapter->intr.type == VMXNET3_IT_MSI)
3549 pci_disable_msi(adapter->pdev);
3550 else
3551 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3552 }
3553
3554
3555 static void
vmxnet3_tx_timeout(struct net_device * netdev,unsigned int txqueue)3556 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3557 {
3558 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3559 adapter->tx_timeout_count++;
3560
3561 netdev_err(adapter->netdev, "tx hang\n");
3562 schedule_work(&adapter->work);
3563 }
3564
3565
3566 static void
vmxnet3_reset_work(struct work_struct * data)3567 vmxnet3_reset_work(struct work_struct *data)
3568 {
3569 struct vmxnet3_adapter *adapter;
3570
3571 adapter = container_of(data, struct vmxnet3_adapter, work);
3572
3573 /* if another thread is resetting the device, no need to proceed */
3574 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3575 return;
3576
3577 /* if the device is closed, we must leave it alone */
3578 rtnl_lock();
3579 if (netif_running(adapter->netdev)) {
3580 netdev_notice(adapter->netdev, "resetting\n");
3581 vmxnet3_quiesce_dev(adapter);
3582 vmxnet3_reset_dev(adapter);
3583 vmxnet3_activate_dev(adapter);
3584 } else {
3585 netdev_info(adapter->netdev, "already closed\n");
3586 }
3587 rtnl_unlock();
3588
3589 netif_wake_queue(adapter->netdev);
3590 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3591 }
3592
3593
3594 static int
vmxnet3_probe_device(struct pci_dev * pdev,const struct pci_device_id * id)3595 vmxnet3_probe_device(struct pci_dev *pdev,
3596 const struct pci_device_id *id)
3597 {
3598 static const struct net_device_ops vmxnet3_netdev_ops = {
3599 .ndo_open = vmxnet3_open,
3600 .ndo_stop = vmxnet3_close,
3601 .ndo_start_xmit = vmxnet3_xmit_frame,
3602 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3603 .ndo_change_mtu = vmxnet3_change_mtu,
3604 .ndo_fix_features = vmxnet3_fix_features,
3605 .ndo_set_features = vmxnet3_set_features,
3606 .ndo_features_check = vmxnet3_features_check,
3607 .ndo_get_stats64 = vmxnet3_get_stats64,
3608 .ndo_tx_timeout = vmxnet3_tx_timeout,
3609 .ndo_set_rx_mode = vmxnet3_set_mc,
3610 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3611 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3612 #ifdef CONFIG_NET_POLL_CONTROLLER
3613 .ndo_poll_controller = vmxnet3_netpoll,
3614 #endif
3615 };
3616 int err;
3617 u32 ver;
3618 struct net_device *netdev;
3619 struct vmxnet3_adapter *adapter;
3620 u8 mac[ETH_ALEN];
3621 int size;
3622 int num_tx_queues;
3623 int num_rx_queues;
3624 int queues;
3625 unsigned long flags;
3626
3627 if (!pci_msi_enabled())
3628 enable_mq = 0;
3629
3630 #ifdef VMXNET3_RSS
3631 if (enable_mq)
3632 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3633 (int)num_online_cpus());
3634 else
3635 #endif
3636 num_rx_queues = 1;
3637
3638 if (enable_mq)
3639 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3640 (int)num_online_cpus());
3641 else
3642 num_tx_queues = 1;
3643
3644 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3645 max(num_tx_queues, num_rx_queues));
3646 if (!netdev)
3647 return -ENOMEM;
3648
3649 pci_set_drvdata(pdev, netdev);
3650 adapter = netdev_priv(netdev);
3651 adapter->netdev = netdev;
3652 adapter->pdev = pdev;
3653
3654 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3655 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3656 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3657
3658 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3659 if (err) {
3660 dev_err(&pdev->dev, "dma_set_mask failed\n");
3661 goto err_set_mask;
3662 }
3663
3664 spin_lock_init(&adapter->cmd_lock);
3665 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3666 sizeof(struct vmxnet3_adapter),
3667 DMA_TO_DEVICE);
3668 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3669 dev_err(&pdev->dev, "Failed to map dma\n");
3670 err = -EFAULT;
3671 goto err_set_mask;
3672 }
3673 adapter->shared = dma_alloc_coherent(
3674 &adapter->pdev->dev,
3675 sizeof(struct Vmxnet3_DriverShared),
3676 &adapter->shared_pa, GFP_KERNEL);
3677 if (!adapter->shared) {
3678 dev_err(&pdev->dev, "Failed to allocate memory\n");
3679 err = -ENOMEM;
3680 goto err_alloc_shared;
3681 }
3682
3683 err = vmxnet3_alloc_pci_resources(adapter);
3684 if (err < 0)
3685 goto err_alloc_pci;
3686
3687 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3688 if (ver & (1 << VMXNET3_REV_7)) {
3689 VMXNET3_WRITE_BAR1_REG(adapter,
3690 VMXNET3_REG_VRRS,
3691 1 << VMXNET3_REV_7);
3692 adapter->version = VMXNET3_REV_7 + 1;
3693 } else if (ver & (1 << VMXNET3_REV_6)) {
3694 VMXNET3_WRITE_BAR1_REG(adapter,
3695 VMXNET3_REG_VRRS,
3696 1 << VMXNET3_REV_6);
3697 adapter->version = VMXNET3_REV_6 + 1;
3698 } else if (ver & (1 << VMXNET3_REV_5)) {
3699 VMXNET3_WRITE_BAR1_REG(adapter,
3700 VMXNET3_REG_VRRS,
3701 1 << VMXNET3_REV_5);
3702 adapter->version = VMXNET3_REV_5 + 1;
3703 } else if (ver & (1 << VMXNET3_REV_4)) {
3704 VMXNET3_WRITE_BAR1_REG(adapter,
3705 VMXNET3_REG_VRRS,
3706 1 << VMXNET3_REV_4);
3707 adapter->version = VMXNET3_REV_4 + 1;
3708 } else if (ver & (1 << VMXNET3_REV_3)) {
3709 VMXNET3_WRITE_BAR1_REG(adapter,
3710 VMXNET3_REG_VRRS,
3711 1 << VMXNET3_REV_3);
3712 adapter->version = VMXNET3_REV_3 + 1;
3713 } else if (ver & (1 << VMXNET3_REV_2)) {
3714 VMXNET3_WRITE_BAR1_REG(adapter,
3715 VMXNET3_REG_VRRS,
3716 1 << VMXNET3_REV_2);
3717 adapter->version = VMXNET3_REV_2 + 1;
3718 } else if (ver & (1 << VMXNET3_REV_1)) {
3719 VMXNET3_WRITE_BAR1_REG(adapter,
3720 VMXNET3_REG_VRRS,
3721 1 << VMXNET3_REV_1);
3722 adapter->version = VMXNET3_REV_1 + 1;
3723 } else {
3724 dev_err(&pdev->dev,
3725 "Incompatible h/w version (0x%x) for adapter\n", ver);
3726 err = -EBUSY;
3727 goto err_ver;
3728 }
3729 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3730
3731 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3732 if (ver & 1) {
3733 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3734 } else {
3735 dev_err(&pdev->dev,
3736 "Incompatible upt version (0x%x) for adapter\n", ver);
3737 err = -EBUSY;
3738 goto err_ver;
3739 }
3740
3741 if (VMXNET3_VERSION_GE_7(adapter)) {
3742 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
3743 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3744 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3745 adapter->dev_caps[0] = adapter->devcap_supported[0] &
3746 (1UL << VMXNET3_CAP_LARGE_BAR);
3747 }
3748 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
3749 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
3750 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
3751 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
3752 (1UL << VMXNET3_CAP_OOORX_COMP);
3753 }
3754 if (adapter->dev_caps[0])
3755 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3756
3757 spin_lock_irqsave(&adapter->cmd_lock, flags);
3758 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3759 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3760 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3761 }
3762
3763 if (VMXNET3_VERSION_GE_7(adapter) &&
3764 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3765 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3766 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3767 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3768 } else {
3769 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3770 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3771 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3772 }
3773
3774 if (VMXNET3_VERSION_GE_6(adapter)) {
3775 spin_lock_irqsave(&adapter->cmd_lock, flags);
3776 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3777 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3778 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3779 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3780 if (queues > 0) {
3781 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3782 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3783 } else {
3784 adapter->num_rx_queues = min(num_rx_queues,
3785 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3786 adapter->num_tx_queues = min(num_tx_queues,
3787 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3788 }
3789 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3790 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3791 adapter->queuesExtEnabled = true;
3792 } else {
3793 adapter->queuesExtEnabled = false;
3794 }
3795 } else {
3796 adapter->queuesExtEnabled = false;
3797 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3798 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3799 adapter->num_rx_queues = min(num_rx_queues,
3800 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3801 adapter->num_tx_queues = min(num_tx_queues,
3802 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3803 }
3804 dev_info(&pdev->dev,
3805 "# of Tx queues : %d, # of Rx queues : %d\n",
3806 adapter->num_tx_queues, adapter->num_rx_queues);
3807
3808 adapter->rx_buf_per_pkt = 1;
3809
3810 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3811 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3812 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3813 &adapter->queue_desc_pa,
3814 GFP_KERNEL);
3815
3816 if (!adapter->tqd_start) {
3817 dev_err(&pdev->dev, "Failed to allocate memory\n");
3818 err = -ENOMEM;
3819 goto err_ver;
3820 }
3821 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3822 adapter->num_tx_queues);
3823
3824 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3825 sizeof(struct Vmxnet3_PMConf),
3826 &adapter->pm_conf_pa,
3827 GFP_KERNEL);
3828 if (adapter->pm_conf == NULL) {
3829 err = -ENOMEM;
3830 goto err_alloc_pm;
3831 }
3832
3833 #ifdef VMXNET3_RSS
3834
3835 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3836 sizeof(struct UPT1_RSSConf),
3837 &adapter->rss_conf_pa,
3838 GFP_KERNEL);
3839 if (adapter->rss_conf == NULL) {
3840 err = -ENOMEM;
3841 goto err_alloc_rss;
3842 }
3843 #endif /* VMXNET3_RSS */
3844
3845 if (VMXNET3_VERSION_GE_3(adapter)) {
3846 adapter->coal_conf =
3847 dma_alloc_coherent(&adapter->pdev->dev,
3848 sizeof(struct Vmxnet3_CoalesceScheme)
3849 ,
3850 &adapter->coal_conf_pa,
3851 GFP_KERNEL);
3852 if (!adapter->coal_conf) {
3853 err = -ENOMEM;
3854 goto err_coal_conf;
3855 }
3856 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3857 adapter->default_coal_mode = true;
3858 }
3859
3860 if (VMXNET3_VERSION_GE_4(adapter)) {
3861 adapter->default_rss_fields = true;
3862 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3863 }
3864
3865 SET_NETDEV_DEV(netdev, &pdev->dev);
3866 vmxnet3_declare_features(adapter);
3867
3868 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3869 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3870
3871 if (adapter->num_tx_queues == adapter->num_rx_queues)
3872 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3873 else
3874 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3875
3876 vmxnet3_alloc_intr_resources(adapter);
3877
3878 #ifdef VMXNET3_RSS
3879 if (adapter->num_rx_queues > 1 &&
3880 adapter->intr.type == VMXNET3_IT_MSIX) {
3881 adapter->rss = true;
3882 netdev->hw_features |= NETIF_F_RXHASH;
3883 netdev->features |= NETIF_F_RXHASH;
3884 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3885 } else {
3886 adapter->rss = false;
3887 }
3888 #endif
3889
3890 vmxnet3_read_mac_addr(adapter, mac);
3891 dev_addr_set(netdev, mac);
3892
3893 netdev->netdev_ops = &vmxnet3_netdev_ops;
3894 vmxnet3_set_ethtool_ops(netdev);
3895 netdev->watchdog_timeo = 5 * HZ;
3896
3897 /* MTU range: 60 - 9190 */
3898 netdev->min_mtu = VMXNET3_MIN_MTU;
3899 if (VMXNET3_VERSION_GE_6(adapter))
3900 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
3901 else
3902 netdev->max_mtu = VMXNET3_MAX_MTU;
3903
3904 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3905 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3906
3907 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3908 int i;
3909 for (i = 0; i < adapter->num_rx_queues; i++) {
3910 netif_napi_add(adapter->netdev,
3911 &adapter->rx_queue[i].napi,
3912 vmxnet3_poll_rx_only);
3913 }
3914 } else {
3915 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3916 vmxnet3_poll);
3917 }
3918
3919 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3920 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3921
3922 netif_carrier_off(netdev);
3923 err = register_netdev(netdev);
3924
3925 if (err) {
3926 dev_err(&pdev->dev, "Failed to register adapter\n");
3927 goto err_register;
3928 }
3929
3930 vmxnet3_check_link(adapter, false);
3931 return 0;
3932
3933 err_register:
3934 if (VMXNET3_VERSION_GE_3(adapter)) {
3935 dma_free_coherent(&adapter->pdev->dev,
3936 sizeof(struct Vmxnet3_CoalesceScheme),
3937 adapter->coal_conf, adapter->coal_conf_pa);
3938 }
3939 vmxnet3_free_intr_resources(adapter);
3940 err_coal_conf:
3941 #ifdef VMXNET3_RSS
3942 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3943 adapter->rss_conf, adapter->rss_conf_pa);
3944 err_alloc_rss:
3945 #endif
3946 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3947 adapter->pm_conf, adapter->pm_conf_pa);
3948 err_alloc_pm:
3949 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3950 adapter->queue_desc_pa);
3951 err_ver:
3952 vmxnet3_free_pci_resources(adapter);
3953 err_alloc_pci:
3954 dma_free_coherent(&adapter->pdev->dev,
3955 sizeof(struct Vmxnet3_DriverShared),
3956 adapter->shared, adapter->shared_pa);
3957 err_alloc_shared:
3958 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3959 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
3960 err_set_mask:
3961 free_netdev(netdev);
3962 return err;
3963 }
3964
3965
3966 static void
vmxnet3_remove_device(struct pci_dev * pdev)3967 vmxnet3_remove_device(struct pci_dev *pdev)
3968 {
3969 struct net_device *netdev = pci_get_drvdata(pdev);
3970 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3971 int size = 0;
3972 int num_rx_queues, rx_queues;
3973 unsigned long flags;
3974
3975 #ifdef VMXNET3_RSS
3976 if (enable_mq)
3977 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3978 (int)num_online_cpus());
3979 else
3980 #endif
3981 num_rx_queues = 1;
3982 if (!VMXNET3_VERSION_GE_6(adapter)) {
3983 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3984 }
3985 if (VMXNET3_VERSION_GE_6(adapter)) {
3986 spin_lock_irqsave(&adapter->cmd_lock, flags);
3987 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3988 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3989 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3990 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3991 if (rx_queues > 0)
3992 rx_queues = (rx_queues >> 8) & 0xff;
3993 else
3994 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3995 num_rx_queues = min(num_rx_queues, rx_queues);
3996 } else {
3997 num_rx_queues = min(num_rx_queues,
3998 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3999 }
4000
4001 cancel_work_sync(&adapter->work);
4002
4003 unregister_netdev(netdev);
4004
4005 vmxnet3_free_intr_resources(adapter);
4006 vmxnet3_free_pci_resources(adapter);
4007 if (VMXNET3_VERSION_GE_3(adapter)) {
4008 dma_free_coherent(&adapter->pdev->dev,
4009 sizeof(struct Vmxnet3_CoalesceScheme),
4010 adapter->coal_conf, adapter->coal_conf_pa);
4011 }
4012 #ifdef VMXNET3_RSS
4013 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4014 adapter->rss_conf, adapter->rss_conf_pa);
4015 #endif
4016 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4017 adapter->pm_conf, adapter->pm_conf_pa);
4018
4019 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4020 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4021 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4022 adapter->queue_desc_pa);
4023 dma_free_coherent(&adapter->pdev->dev,
4024 sizeof(struct Vmxnet3_DriverShared),
4025 adapter->shared, adapter->shared_pa);
4026 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4027 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4028 free_netdev(netdev);
4029 }
4030
vmxnet3_shutdown_device(struct pci_dev * pdev)4031 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4032 {
4033 struct net_device *netdev = pci_get_drvdata(pdev);
4034 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4035 unsigned long flags;
4036
4037 /* Reset_work may be in the middle of resetting the device, wait for its
4038 * completion.
4039 */
4040 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4041 usleep_range(1000, 2000);
4042
4043 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4044 &adapter->state)) {
4045 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4046 return;
4047 }
4048 spin_lock_irqsave(&adapter->cmd_lock, flags);
4049 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4050 VMXNET3_CMD_QUIESCE_DEV);
4051 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4052 vmxnet3_disable_all_intrs(adapter);
4053
4054 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4055 }
4056
4057
4058 #ifdef CONFIG_PM
4059
4060 static int
vmxnet3_suspend(struct device * device)4061 vmxnet3_suspend(struct device *device)
4062 {
4063 struct pci_dev *pdev = to_pci_dev(device);
4064 struct net_device *netdev = pci_get_drvdata(pdev);
4065 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4066 struct Vmxnet3_PMConf *pmConf;
4067 struct ethhdr *ehdr;
4068 struct arphdr *ahdr;
4069 u8 *arpreq;
4070 struct in_device *in_dev;
4071 struct in_ifaddr *ifa;
4072 unsigned long flags;
4073 int i = 0;
4074
4075 if (!netif_running(netdev))
4076 return 0;
4077
4078 for (i = 0; i < adapter->num_rx_queues; i++)
4079 napi_disable(&adapter->rx_queue[i].napi);
4080
4081 vmxnet3_disable_all_intrs(adapter);
4082 vmxnet3_free_irqs(adapter);
4083 vmxnet3_free_intr_resources(adapter);
4084
4085 netif_device_detach(netdev);
4086
4087 /* Create wake-up filters. */
4088 pmConf = adapter->pm_conf;
4089 memset(pmConf, 0, sizeof(*pmConf));
4090
4091 if (adapter->wol & WAKE_UCAST) {
4092 pmConf->filters[i].patternSize = ETH_ALEN;
4093 pmConf->filters[i].maskSize = 1;
4094 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4095 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4096
4097 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4098 i++;
4099 }
4100
4101 if (adapter->wol & WAKE_ARP) {
4102 rcu_read_lock();
4103
4104 in_dev = __in_dev_get_rcu(netdev);
4105 if (!in_dev) {
4106 rcu_read_unlock();
4107 goto skip_arp;
4108 }
4109
4110 ifa = rcu_dereference(in_dev->ifa_list);
4111 if (!ifa) {
4112 rcu_read_unlock();
4113 goto skip_arp;
4114 }
4115
4116 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4117 sizeof(struct arphdr) + /* ARP header */
4118 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4119 2 * sizeof(u32); /*2 IPv4 addresses */
4120 pmConf->filters[i].maskSize =
4121 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4122
4123 /* ETH_P_ARP in Ethernet header. */
4124 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4125 ehdr->h_proto = htons(ETH_P_ARP);
4126
4127 /* ARPOP_REQUEST in ARP header. */
4128 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4129 ahdr->ar_op = htons(ARPOP_REQUEST);
4130 arpreq = (u8 *)(ahdr + 1);
4131
4132 /* The Unicast IPv4 address in 'tip' field. */
4133 arpreq += 2 * ETH_ALEN + sizeof(u32);
4134 *(__be32 *)arpreq = ifa->ifa_address;
4135
4136 rcu_read_unlock();
4137
4138 /* The mask for the relevant bits. */
4139 pmConf->filters[i].mask[0] = 0x00;
4140 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4141 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4142 pmConf->filters[i].mask[3] = 0x00;
4143 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4144 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4145
4146 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4147 i++;
4148 }
4149
4150 skip_arp:
4151 if (adapter->wol & WAKE_MAGIC)
4152 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4153
4154 pmConf->numFilters = i;
4155
4156 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4157 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4158 *pmConf));
4159 adapter->shared->devRead.pmConfDesc.confPA =
4160 cpu_to_le64(adapter->pm_conf_pa);
4161
4162 spin_lock_irqsave(&adapter->cmd_lock, flags);
4163 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4164 VMXNET3_CMD_UPDATE_PMCFG);
4165 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4166
4167 pci_save_state(pdev);
4168 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4169 adapter->wol);
4170 pci_disable_device(pdev);
4171 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4172
4173 return 0;
4174 }
4175
4176
4177 static int
vmxnet3_resume(struct device * device)4178 vmxnet3_resume(struct device *device)
4179 {
4180 int err;
4181 unsigned long flags;
4182 struct pci_dev *pdev = to_pci_dev(device);
4183 struct net_device *netdev = pci_get_drvdata(pdev);
4184 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4185
4186 if (!netif_running(netdev))
4187 return 0;
4188
4189 pci_set_power_state(pdev, PCI_D0);
4190 pci_restore_state(pdev);
4191 err = pci_enable_device_mem(pdev);
4192 if (err != 0)
4193 return err;
4194
4195 pci_enable_wake(pdev, PCI_D0, 0);
4196
4197 vmxnet3_alloc_intr_resources(adapter);
4198
4199 /* During hibernate and suspend, device has to be reinitialized as the
4200 * device state need not be preserved.
4201 */
4202
4203 /* Need not check adapter state as other reset tasks cannot run during
4204 * device resume.
4205 */
4206 spin_lock_irqsave(&adapter->cmd_lock, flags);
4207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4208 VMXNET3_CMD_QUIESCE_DEV);
4209 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4210 vmxnet3_tq_cleanup_all(adapter);
4211 vmxnet3_rq_cleanup_all(adapter);
4212
4213 vmxnet3_reset_dev(adapter);
4214 err = vmxnet3_activate_dev(adapter);
4215 if (err != 0) {
4216 netdev_err(netdev,
4217 "failed to re-activate on resume, error: %d", err);
4218 vmxnet3_force_close(adapter);
4219 return err;
4220 }
4221 netif_device_attach(netdev);
4222
4223 return 0;
4224 }
4225
4226 static const struct dev_pm_ops vmxnet3_pm_ops = {
4227 .suspend = vmxnet3_suspend,
4228 .resume = vmxnet3_resume,
4229 .freeze = vmxnet3_suspend,
4230 .restore = vmxnet3_resume,
4231 };
4232 #endif
4233
4234 static struct pci_driver vmxnet3_driver = {
4235 .name = vmxnet3_driver_name,
4236 .id_table = vmxnet3_pciid_table,
4237 .probe = vmxnet3_probe_device,
4238 .remove = vmxnet3_remove_device,
4239 .shutdown = vmxnet3_shutdown_device,
4240 #ifdef CONFIG_PM
4241 .driver.pm = &vmxnet3_pm_ops,
4242 #endif
4243 };
4244
4245
4246 static int __init
vmxnet3_init_module(void)4247 vmxnet3_init_module(void)
4248 {
4249 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4250 VMXNET3_DRIVER_VERSION_REPORT);
4251 return pci_register_driver(&vmxnet3_driver);
4252 }
4253
4254 module_init(vmxnet3_init_module);
4255
4256
4257 static void
vmxnet3_exit_module(void)4258 vmxnet3_exit_module(void)
4259 {
4260 pci_unregister_driver(&vmxnet3_driver);
4261 }
4262
4263 module_exit(vmxnet3_exit_module);
4264
4265 MODULE_AUTHOR("VMware, Inc.");
4266 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4267 MODULE_LICENSE("GPL v2");
4268 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
4269