1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
3 /* */
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* */
10 /* */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
15 /* */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
25 /* */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
32 /* */
33 /**************************************************************************/
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/irqdomain.h>
57 #include <linux/kthread.h>
58 #include <linux/seq_file.h>
59 #include <linux/interrupt.h>
60 #include <net/net_namespace.h>
61 #include <asm/hvcall.h>
62 #include <linux/atomic.h>
63 #include <asm/vio.h>
64 #include <asm/xive.h>
65 #include <asm/iommu.h>
66 #include <linux/uaccess.h>
67 #include <asm/firmware.h>
68 #include <linux/workqueue.h>
69 #include <linux/if_vlan.h>
70 #include <linux/utsname.h>
71
72 #include "ibmvnic.h"
73
74 static const char ibmvnic_driver_name[] = "ibmvnic";
75 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
76
77 MODULE_AUTHOR("Santiago Leon");
78 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
81
82 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
83 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
84 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
85 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
86 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
87 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
88 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
89 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
90 static int enable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92 static int disable_scrq_irq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94 static int pending_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
97 struct ibmvnic_sub_crq_queue *);
98 static int ibmvnic_poll(struct napi_struct *napi, int data);
99 static void send_query_map(struct ibmvnic_adapter *adapter);
100 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
101 static int send_request_unmap(struct ibmvnic_adapter *, u8);
102 static int send_login(struct ibmvnic_adapter *adapter);
103 static void send_query_cap(struct ibmvnic_adapter *adapter);
104 static int init_sub_crqs(struct ibmvnic_adapter *);
105 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
106 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
107 static void release_crq_queue(struct ibmvnic_adapter *);
108 static int __ibmvnic_set_mac(struct net_device *, u8 *);
109 static int init_crq_queue(struct ibmvnic_adapter *adapter);
110 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
111 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
112 struct ibmvnic_sub_crq_queue *tx_scrq);
113 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
114 struct ibmvnic_long_term_buff *ltb);
115 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
116
117 struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
119 int offset;
120 };
121
122 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
125
126 static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149 };
150
send_crq_init_complete(struct ibmvnic_adapter * adapter)151 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
152 {
153 union ibmvnic_crq crq;
154
155 memset(&crq, 0, sizeof(crq));
156 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
157 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
158
159 return ibmvnic_send_crq(adapter, &crq);
160 }
161
send_version_xchg(struct ibmvnic_adapter * adapter)162 static int send_version_xchg(struct ibmvnic_adapter *adapter)
163 {
164 union ibmvnic_crq crq;
165
166 memset(&crq, 0, sizeof(crq));
167 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
168 crq.version_exchange.cmd = VERSION_EXCHANGE;
169 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
170
171 return ibmvnic_send_crq(adapter, &crq);
172 }
173
h_reg_sub_crq(unsigned long unit_address,unsigned long token,unsigned long length,unsigned long * number,unsigned long * irq)174 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
175 unsigned long length, unsigned long *number,
176 unsigned long *irq)
177 {
178 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
179 long rc;
180
181 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
182 *number = retbuf[0];
183 *irq = retbuf[1];
184
185 return rc;
186 }
187
188 /**
189 * ibmvnic_wait_for_completion - Check device state and wait for completion
190 * @adapter: private device data
191 * @comp_done: completion structure to wait for
192 * @timeout: time to wait in milliseconds
193 *
194 * Wait for a completion signal or until the timeout limit is reached
195 * while checking that the device is still active.
196 */
ibmvnic_wait_for_completion(struct ibmvnic_adapter * adapter,struct completion * comp_done,unsigned long timeout)197 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
198 struct completion *comp_done,
199 unsigned long timeout)
200 {
201 struct net_device *netdev;
202 unsigned long div_timeout;
203 u8 retry;
204
205 netdev = adapter->netdev;
206 retry = 5;
207 div_timeout = msecs_to_jiffies(timeout / retry);
208 while (true) {
209 if (!adapter->crq.active) {
210 netdev_err(netdev, "Device down!\n");
211 return -ENODEV;
212 }
213 if (!retry--)
214 break;
215 if (wait_for_completion_timeout(comp_done, div_timeout))
216 return 0;
217 }
218 netdev_err(netdev, "Operation timed out.\n");
219 return -ETIMEDOUT;
220 }
221
222 /**
223 * reuse_ltb() - Check if a long term buffer can be reused
224 * @ltb: The long term buffer to be checked
225 * @size: The size of the long term buffer.
226 *
227 * An LTB can be reused unless its size has changed.
228 *
229 * Return: Return true if the LTB can be reused, false otherwise.
230 */
reuse_ltb(struct ibmvnic_long_term_buff * ltb,int size)231 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
232 {
233 return (ltb->buff && ltb->size == size);
234 }
235
236 /**
237 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
238 *
239 * @adapter: ibmvnic adapter associated to the LTB
240 * @ltb: container object for the LTB
241 * @size: size of the LTB
242 *
243 * Allocate an LTB of the specified size and notify VIOS.
244 *
245 * If the given @ltb already has the correct size, reuse it. Otherwise if
246 * its non-NULL, free it. Then allocate a new one of the correct size.
247 * Notify the VIOS either way since we may now be working with a new VIOS.
248 *
249 * Allocating larger chunks of memory during resets, specially LPM or under
250 * low memory situations can cause resets to fail/timeout and for LPAR to
251 * lose connectivity. So hold onto the LTB even if we fail to communicate
252 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
253 *
254 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
255 * a negative value otherwise.
256 */
alloc_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb,int size)257 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
258 struct ibmvnic_long_term_buff *ltb, int size)
259 {
260 struct device *dev = &adapter->vdev->dev;
261 u64 prev = 0;
262 int rc;
263
264 if (!reuse_ltb(ltb, size)) {
265 dev_dbg(dev,
266 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
267 ltb->size, size);
268 prev = ltb->size;
269 free_long_term_buff(adapter, ltb);
270 }
271
272 if (ltb->buff) {
273 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
274 ltb->map_id, ltb->size);
275 } else {
276 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
277 GFP_KERNEL);
278 if (!ltb->buff) {
279 dev_err(dev, "Couldn't alloc long term buffer\n");
280 return -ENOMEM;
281 }
282 ltb->size = size;
283
284 ltb->map_id = find_first_zero_bit(adapter->map_ids,
285 MAX_MAP_ID);
286 bitmap_set(adapter->map_ids, ltb->map_id, 1);
287
288 dev_dbg(dev,
289 "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
290 ltb->map_id, ltb->size, prev);
291 }
292
293 /* Ensure ltb is zeroed - specially when reusing it. */
294 memset(ltb->buff, 0, ltb->size);
295
296 mutex_lock(&adapter->fw_lock);
297 adapter->fw_done_rc = 0;
298 reinit_completion(&adapter->fw_done);
299
300 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
301 if (rc) {
302 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
303 goto out;
304 }
305
306 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
307 if (rc) {
308 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
309 rc);
310 goto out;
311 }
312
313 if (adapter->fw_done_rc) {
314 dev_err(dev, "Couldn't map LTB, rc = %d\n",
315 adapter->fw_done_rc);
316 rc = -EIO;
317 goto out;
318 }
319 rc = 0;
320 out:
321 /* don't free LTB on communication error - see function header */
322 mutex_unlock(&adapter->fw_lock);
323 return rc;
324 }
325
free_long_term_buff(struct ibmvnic_adapter * adapter,struct ibmvnic_long_term_buff * ltb)326 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
327 struct ibmvnic_long_term_buff *ltb)
328 {
329 struct device *dev = &adapter->vdev->dev;
330
331 if (!ltb->buff)
332 return;
333
334 /* VIOS automatically unmaps the long term buffer at remote
335 * end for the following resets:
336 * FAILOVER, MOBILITY, TIMEOUT.
337 */
338 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
339 adapter->reset_reason != VNIC_RESET_MOBILITY &&
340 adapter->reset_reason != VNIC_RESET_TIMEOUT)
341 send_request_unmap(adapter, ltb->map_id);
342
343 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
344
345 ltb->buff = NULL;
346 /* mark this map_id free */
347 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
348 ltb->map_id = 0;
349 }
350
351 /**
352 * free_ltb_set - free the given set of long term buffers (LTBS)
353 * @adapter: The ibmvnic adapter containing this ltb set
354 * @ltb_set: The ltb_set to be freed
355 *
356 * Free the set of LTBs in the given set.
357 */
358
free_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set)359 static void free_ltb_set(struct ibmvnic_adapter *adapter,
360 struct ibmvnic_ltb_set *ltb_set)
361 {
362 int i;
363
364 for (i = 0; i < ltb_set->num_ltbs; i++)
365 free_long_term_buff(adapter, <b_set->ltbs[i]);
366
367 kfree(ltb_set->ltbs);
368 ltb_set->ltbs = NULL;
369 ltb_set->num_ltbs = 0;
370 }
371
372 /**
373 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
374 *
375 * @adapter: ibmvnic adapter associated to the LTB
376 * @ltb_set: container object for the set of LTBs
377 * @num_buffs: Number of buffers in the LTB
378 * @buff_size: Size of each buffer in the LTB
379 *
380 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
381 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
382 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
383 * If new set needs more than in old set, allocate the remaining ones.
384 * Try and reuse as many LTBs as possible and avoid reallocation.
385 *
386 * Any changes to this allocation strategy must be reflected in
387 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
388 */
alloc_ltb_set(struct ibmvnic_adapter * adapter,struct ibmvnic_ltb_set * ltb_set,int num_buffs,int buff_size)389 static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
390 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
391 int buff_size)
392 {
393 struct device *dev = &adapter->vdev->dev;
394 struct ibmvnic_ltb_set old_set;
395 struct ibmvnic_ltb_set new_set;
396 int rem_size;
397 int tot_size; /* size of all ltbs */
398 int ltb_size; /* size of one ltb */
399 int nltbs;
400 int rc;
401 int n;
402 int i;
403
404 dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
405 buff_size);
406
407 ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
408 tot_size = num_buffs * buff_size;
409
410 if (ltb_size > tot_size)
411 ltb_size = tot_size;
412
413 nltbs = tot_size / ltb_size;
414 if (tot_size % ltb_size)
415 nltbs++;
416
417 old_set = *ltb_set;
418
419 if (old_set.num_ltbs == nltbs) {
420 new_set = old_set;
421 } else {
422 int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
423
424 new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
425 if (!new_set.ltbs)
426 return -ENOMEM;
427
428 new_set.num_ltbs = nltbs;
429
430 /* Free any excess ltbs in old set */
431 for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
432 free_long_term_buff(adapter, &old_set.ltbs[i]);
433
434 /* Copy remaining ltbs to new set. All LTBs except the
435 * last one are of the same size. alloc_long_term_buff()
436 * will realloc if the size changes.
437 */
438 n = min(old_set.num_ltbs, new_set.num_ltbs);
439 for (i = 0; i < n; i++)
440 new_set.ltbs[i] = old_set.ltbs[i];
441
442 /* Any additional ltbs in new set will have NULL ltbs for
443 * now and will be allocated in alloc_long_term_buff().
444 */
445
446 /* We no longer need the old_set so free it. Note that we
447 * may have reused some ltbs from old set and freed excess
448 * ltbs above. So we only need to free the container now
449 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
450 */
451 kfree(old_set.ltbs);
452 old_set.ltbs = NULL;
453 old_set.num_ltbs = 0;
454
455 /* Install the new set. If allocations fail below, we will
456 * retry later and know what size LTBs we need.
457 */
458 *ltb_set = new_set;
459 }
460
461 i = 0;
462 rem_size = tot_size;
463 while (rem_size) {
464 if (ltb_size > rem_size)
465 ltb_size = rem_size;
466
467 rem_size -= ltb_size;
468
469 rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
470 if (rc)
471 goto out;
472 i++;
473 }
474
475 WARN_ON(i != new_set.num_ltbs);
476
477 return 0;
478 out:
479 /* We may have allocated one/more LTBs before failing and we
480 * want to try and reuse on next reset. So don't free ltb set.
481 */
482 return rc;
483 }
484
485 /**
486 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
487 * @rxpool: The receive buffer pool containing buffer
488 * @bufidx: Index of buffer in rxpool
489 * @ltbp: (Output) pointer to the long term buffer containing the buffer
490 * @offset: (Output) offset of buffer in the LTB from @ltbp
491 *
492 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
493 * pool and its corresponding offset. Assume for now that each LTB is of
494 * different size but could possibly be optimized based on the allocation
495 * strategy in alloc_ltb_set().
496 */
map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool * rxpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)497 static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
498 unsigned int bufidx,
499 struct ibmvnic_long_term_buff **ltbp,
500 unsigned int *offset)
501 {
502 struct ibmvnic_long_term_buff *ltb;
503 int nbufs; /* # of buffers in one ltb */
504 int i;
505
506 WARN_ON(bufidx >= rxpool->size);
507
508 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
509 ltb = &rxpool->ltb_set.ltbs[i];
510 nbufs = ltb->size / rxpool->buff_size;
511 if (bufidx < nbufs)
512 break;
513 bufidx -= nbufs;
514 }
515
516 *ltbp = ltb;
517 *offset = bufidx * rxpool->buff_size;
518 }
519
520 /**
521 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
522 * @txpool: The transmit buffer pool containing buffer
523 * @bufidx: Index of buffer in txpool
524 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
525 * @offset: (Output) offset of buffer in the LTB from @ltbp
526 *
527 * Map the given buffer identified by [txpool, bufidx] to an LTB in the
528 * pool and its corresponding offset.
529 */
map_txpool_buf_to_ltb(struct ibmvnic_tx_pool * txpool,unsigned int bufidx,struct ibmvnic_long_term_buff ** ltbp,unsigned int * offset)530 static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
531 unsigned int bufidx,
532 struct ibmvnic_long_term_buff **ltbp,
533 unsigned int *offset)
534 {
535 struct ibmvnic_long_term_buff *ltb;
536 int nbufs; /* # of buffers in one ltb */
537 int i;
538
539 WARN_ON_ONCE(bufidx >= txpool->num_buffers);
540
541 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
542 ltb = &txpool->ltb_set.ltbs[i];
543 nbufs = ltb->size / txpool->buf_size;
544 if (bufidx < nbufs)
545 break;
546 bufidx -= nbufs;
547 }
548
549 *ltbp = ltb;
550 *offset = bufidx * txpool->buf_size;
551 }
552
deactivate_rx_pools(struct ibmvnic_adapter * adapter)553 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
554 {
555 int i;
556
557 for (i = 0; i < adapter->num_active_rx_pools; i++)
558 adapter->rx_pool[i].active = 0;
559 }
560
replenish_rx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_pool * pool)561 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
562 struct ibmvnic_rx_pool *pool)
563 {
564 int count = pool->size - atomic_read(&pool->available);
565 u64 handle = adapter->rx_scrq[pool->index]->handle;
566 struct device *dev = &adapter->vdev->dev;
567 struct ibmvnic_ind_xmit_queue *ind_bufp;
568 struct ibmvnic_sub_crq_queue *rx_scrq;
569 struct ibmvnic_long_term_buff *ltb;
570 union sub_crq *sub_crq;
571 int buffers_added = 0;
572 unsigned long lpar_rc;
573 struct sk_buff *skb;
574 unsigned int offset;
575 dma_addr_t dma_addr;
576 unsigned char *dst;
577 int shift = 0;
578 int bufidx;
579 int i;
580
581 if (!pool->active)
582 return;
583
584 rx_scrq = adapter->rx_scrq[pool->index];
585 ind_bufp = &rx_scrq->ind_buf;
586
587 /* netdev_skb_alloc() could have failed after we saved a few skbs
588 * in the indir_buf and we would not have sent them to VIOS yet.
589 * To account for them, start the loop at ind_bufp->index rather
590 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
591 * be 0.
592 */
593 for (i = ind_bufp->index; i < count; ++i) {
594 bufidx = pool->free_map[pool->next_free];
595
596 /* We maybe reusing the skb from earlier resets. Allocate
597 * only if necessary. But since the LTB may have changed
598 * during reset (see init_rx_pools()), update LTB below
599 * even if reusing skb.
600 */
601 skb = pool->rx_buff[bufidx].skb;
602 if (!skb) {
603 skb = netdev_alloc_skb(adapter->netdev,
604 pool->buff_size);
605 if (!skb) {
606 dev_err(dev, "Couldn't replenish rx buff\n");
607 adapter->replenish_no_mem++;
608 break;
609 }
610 }
611
612 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
613 pool->next_free = (pool->next_free + 1) % pool->size;
614
615 /* Copy the skb to the long term mapped DMA buffer */
616 map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset);
617 dst = ltb->buff + offset;
618 memset(dst, 0, pool->buff_size);
619 dma_addr = ltb->addr + offset;
620
621 /* add the skb to an rx_buff in the pool */
622 pool->rx_buff[bufidx].data = dst;
623 pool->rx_buff[bufidx].dma = dma_addr;
624 pool->rx_buff[bufidx].skb = skb;
625 pool->rx_buff[bufidx].pool_index = pool->index;
626 pool->rx_buff[bufidx].size = pool->buff_size;
627
628 /* queue the rx_buff for the next send_subcrq_indirect */
629 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
630 memset(sub_crq, 0, sizeof(*sub_crq));
631 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
632 sub_crq->rx_add.correlator =
633 cpu_to_be64((u64)&pool->rx_buff[bufidx]);
634 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
635 sub_crq->rx_add.map_id = ltb->map_id;
636
637 /* The length field of the sCRQ is defined to be 24 bits so the
638 * buffer size needs to be left shifted by a byte before it is
639 * converted to big endian to prevent the last byte from being
640 * truncated.
641 */
642 #ifdef __LITTLE_ENDIAN__
643 shift = 8;
644 #endif
645 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
646
647 /* if send_subcrq_indirect queue is full, flush to VIOS */
648 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
649 i == count - 1) {
650 lpar_rc =
651 send_subcrq_indirect(adapter, handle,
652 (u64)ind_bufp->indir_dma,
653 (u64)ind_bufp->index);
654 if (lpar_rc != H_SUCCESS)
655 goto failure;
656 buffers_added += ind_bufp->index;
657 adapter->replenish_add_buff_success += ind_bufp->index;
658 ind_bufp->index = 0;
659 }
660 }
661 atomic_add(buffers_added, &pool->available);
662 return;
663
664 failure:
665 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
666 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
667 for (i = ind_bufp->index - 1; i >= 0; --i) {
668 struct ibmvnic_rx_buff *rx_buff;
669
670 pool->next_free = pool->next_free == 0 ?
671 pool->size - 1 : pool->next_free - 1;
672 sub_crq = &ind_bufp->indir_arr[i];
673 rx_buff = (struct ibmvnic_rx_buff *)
674 be64_to_cpu(sub_crq->rx_add.correlator);
675 bufidx = (int)(rx_buff - pool->rx_buff);
676 pool->free_map[pool->next_free] = bufidx;
677 dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
678 pool->rx_buff[bufidx].skb = NULL;
679 }
680 adapter->replenish_add_buff_failure += ind_bufp->index;
681 atomic_add(buffers_added, &pool->available);
682 ind_bufp->index = 0;
683 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
684 /* Disable buffer pool replenishment and report carrier off if
685 * queue is closed or pending failover.
686 * Firmware guarantees that a signal will be sent to the
687 * driver, triggering a reset.
688 */
689 deactivate_rx_pools(adapter);
690 netif_carrier_off(adapter->netdev);
691 }
692 }
693
replenish_pools(struct ibmvnic_adapter * adapter)694 static void replenish_pools(struct ibmvnic_adapter *adapter)
695 {
696 int i;
697
698 adapter->replenish_task_cycles++;
699 for (i = 0; i < adapter->num_active_rx_pools; i++) {
700 if (adapter->rx_pool[i].active)
701 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
702 }
703
704 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
705 }
706
release_stats_buffers(struct ibmvnic_adapter * adapter)707 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
708 {
709 kfree(adapter->tx_stats_buffers);
710 kfree(adapter->rx_stats_buffers);
711 adapter->tx_stats_buffers = NULL;
712 adapter->rx_stats_buffers = NULL;
713 }
714
init_stats_buffers(struct ibmvnic_adapter * adapter)715 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
716 {
717 adapter->tx_stats_buffers =
718 kcalloc(IBMVNIC_MAX_QUEUES,
719 sizeof(struct ibmvnic_tx_queue_stats),
720 GFP_KERNEL);
721 if (!adapter->tx_stats_buffers)
722 return -ENOMEM;
723
724 adapter->rx_stats_buffers =
725 kcalloc(IBMVNIC_MAX_QUEUES,
726 sizeof(struct ibmvnic_rx_queue_stats),
727 GFP_KERNEL);
728 if (!adapter->rx_stats_buffers)
729 return -ENOMEM;
730
731 return 0;
732 }
733
release_stats_token(struct ibmvnic_adapter * adapter)734 static void release_stats_token(struct ibmvnic_adapter *adapter)
735 {
736 struct device *dev = &adapter->vdev->dev;
737
738 if (!adapter->stats_token)
739 return;
740
741 dma_unmap_single(dev, adapter->stats_token,
742 sizeof(struct ibmvnic_statistics),
743 DMA_FROM_DEVICE);
744 adapter->stats_token = 0;
745 }
746
init_stats_token(struct ibmvnic_adapter * adapter)747 static int init_stats_token(struct ibmvnic_adapter *adapter)
748 {
749 struct device *dev = &adapter->vdev->dev;
750 dma_addr_t stok;
751 int rc;
752
753 stok = dma_map_single(dev, &adapter->stats,
754 sizeof(struct ibmvnic_statistics),
755 DMA_FROM_DEVICE);
756 rc = dma_mapping_error(dev, stok);
757 if (rc) {
758 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
759 return rc;
760 }
761
762 adapter->stats_token = stok;
763 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
764 return 0;
765 }
766
767 /**
768 * release_rx_pools() - Release any rx pools attached to @adapter.
769 * @adapter: ibmvnic adapter
770 *
771 * Safe to call this multiple times - even if no pools are attached.
772 */
release_rx_pools(struct ibmvnic_adapter * adapter)773 static void release_rx_pools(struct ibmvnic_adapter *adapter)
774 {
775 struct ibmvnic_rx_pool *rx_pool;
776 int i, j;
777
778 if (!adapter->rx_pool)
779 return;
780
781 for (i = 0; i < adapter->num_active_rx_pools; i++) {
782 rx_pool = &adapter->rx_pool[i];
783
784 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
785
786 kfree(rx_pool->free_map);
787
788 free_ltb_set(adapter, &rx_pool->ltb_set);
789
790 if (!rx_pool->rx_buff)
791 continue;
792
793 for (j = 0; j < rx_pool->size; j++) {
794 if (rx_pool->rx_buff[j].skb) {
795 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
796 rx_pool->rx_buff[j].skb = NULL;
797 }
798 }
799
800 kfree(rx_pool->rx_buff);
801 }
802
803 kfree(adapter->rx_pool);
804 adapter->rx_pool = NULL;
805 adapter->num_active_rx_pools = 0;
806 adapter->prev_rx_pool_size = 0;
807 }
808
809 /**
810 * reuse_rx_pools() - Check if the existing rx pools can be reused.
811 * @adapter: ibmvnic adapter
812 *
813 * Check if the existing rx pools in the adapter can be reused. The
814 * pools can be reused if the pool parameters (number of pools,
815 * number of buffers in the pool and size of each buffer) have not
816 * changed.
817 *
818 * NOTE: This assumes that all pools have the same number of buffers
819 * which is the case currently. If that changes, we must fix this.
820 *
821 * Return: true if the rx pools can be reused, false otherwise.
822 */
reuse_rx_pools(struct ibmvnic_adapter * adapter)823 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
824 {
825 u64 old_num_pools, new_num_pools;
826 u64 old_pool_size, new_pool_size;
827 u64 old_buff_size, new_buff_size;
828
829 if (!adapter->rx_pool)
830 return false;
831
832 old_num_pools = adapter->num_active_rx_pools;
833 new_num_pools = adapter->req_rx_queues;
834
835 old_pool_size = adapter->prev_rx_pool_size;
836 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
837
838 old_buff_size = adapter->prev_rx_buf_sz;
839 new_buff_size = adapter->cur_rx_buf_sz;
840
841 if (old_buff_size != new_buff_size ||
842 old_num_pools != new_num_pools ||
843 old_pool_size != new_pool_size)
844 return false;
845
846 return true;
847 }
848
849 /**
850 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
851 * @netdev: net device associated with the vnic interface
852 *
853 * Initialize the set of receiver pools in the ibmvnic adapter associated
854 * with the net_device @netdev. If possible, reuse the existing rx pools.
855 * Otherwise free any existing pools and allocate a new set of pools
856 * before initializing them.
857 *
858 * Return: 0 on success and negative value on error.
859 */
init_rx_pools(struct net_device * netdev)860 static int init_rx_pools(struct net_device *netdev)
861 {
862 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
863 struct device *dev = &adapter->vdev->dev;
864 struct ibmvnic_rx_pool *rx_pool;
865 u64 num_pools;
866 u64 pool_size; /* # of buffers in one pool */
867 u64 buff_size;
868 int i, j, rc;
869
870 pool_size = adapter->req_rx_add_entries_per_subcrq;
871 num_pools = adapter->req_rx_queues;
872 buff_size = adapter->cur_rx_buf_sz;
873
874 if (reuse_rx_pools(adapter)) {
875 dev_dbg(dev, "Reusing rx pools\n");
876 goto update_ltb;
877 }
878
879 /* Allocate/populate the pools. */
880 release_rx_pools(adapter);
881
882 adapter->rx_pool = kcalloc(num_pools,
883 sizeof(struct ibmvnic_rx_pool),
884 GFP_KERNEL);
885 if (!adapter->rx_pool) {
886 dev_err(dev, "Failed to allocate rx pools\n");
887 return -ENOMEM;
888 }
889
890 /* Set num_active_rx_pools early. If we fail below after partial
891 * allocation, release_rx_pools() will know how many to look for.
892 */
893 adapter->num_active_rx_pools = num_pools;
894
895 for (i = 0; i < num_pools; i++) {
896 rx_pool = &adapter->rx_pool[i];
897
898 netdev_dbg(adapter->netdev,
899 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
900 i, pool_size, buff_size);
901
902 rx_pool->size = pool_size;
903 rx_pool->index = i;
904 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
905
906 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
907 GFP_KERNEL);
908 if (!rx_pool->free_map) {
909 dev_err(dev, "Couldn't alloc free_map %d\n", i);
910 rc = -ENOMEM;
911 goto out_release;
912 }
913
914 rx_pool->rx_buff = kcalloc(rx_pool->size,
915 sizeof(struct ibmvnic_rx_buff),
916 GFP_KERNEL);
917 if (!rx_pool->rx_buff) {
918 dev_err(dev, "Couldn't alloc rx buffers\n");
919 rc = -ENOMEM;
920 goto out_release;
921 }
922 }
923
924 adapter->prev_rx_pool_size = pool_size;
925 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
926
927 update_ltb:
928 for (i = 0; i < num_pools; i++) {
929 rx_pool = &adapter->rx_pool[i];
930 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
931 i, rx_pool->size, rx_pool->buff_size);
932
933 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
934 rx_pool->size, rx_pool->buff_size);
935 if (rc)
936 goto out;
937
938 for (j = 0; j < rx_pool->size; ++j) {
939 struct ibmvnic_rx_buff *rx_buff;
940
941 rx_pool->free_map[j] = j;
942
943 /* NOTE: Don't clear rx_buff->skb here - will leak
944 * memory! replenish_rx_pool() will reuse skbs or
945 * allocate as necessary.
946 */
947 rx_buff = &rx_pool->rx_buff[j];
948 rx_buff->dma = 0;
949 rx_buff->data = 0;
950 rx_buff->size = 0;
951 rx_buff->pool_index = 0;
952 }
953
954 /* Mark pool "empty" so replenish_rx_pools() will
955 * update the LTB info for each buffer
956 */
957 atomic_set(&rx_pool->available, 0);
958 rx_pool->next_alloc = 0;
959 rx_pool->next_free = 0;
960 /* replenish_rx_pool() may have called deactivate_rx_pools()
961 * on failover. Ensure pool is active now.
962 */
963 rx_pool->active = 1;
964 }
965 return 0;
966 out_release:
967 release_rx_pools(adapter);
968 out:
969 /* We failed to allocate one or more LTBs or map them on the VIOS.
970 * Hold onto the pools and any LTBs that we did allocate/map.
971 */
972 return rc;
973 }
974
release_vpd_data(struct ibmvnic_adapter * adapter)975 static void release_vpd_data(struct ibmvnic_adapter *adapter)
976 {
977 if (!adapter->vpd)
978 return;
979
980 kfree(adapter->vpd->buff);
981 kfree(adapter->vpd);
982
983 adapter->vpd = NULL;
984 }
985
release_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)986 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
987 struct ibmvnic_tx_pool *tx_pool)
988 {
989 kfree(tx_pool->tx_buff);
990 kfree(tx_pool->free_map);
991 free_ltb_set(adapter, &tx_pool->ltb_set);
992 }
993
994 /**
995 * release_tx_pools() - Release any tx pools attached to @adapter.
996 * @adapter: ibmvnic adapter
997 *
998 * Safe to call this multiple times - even if no pools are attached.
999 */
release_tx_pools(struct ibmvnic_adapter * adapter)1000 static void release_tx_pools(struct ibmvnic_adapter *adapter)
1001 {
1002 int i;
1003
1004 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
1005 * both NULL or both non-NULL. So we only need to check one.
1006 */
1007 if (!adapter->tx_pool)
1008 return;
1009
1010 for (i = 0; i < adapter->num_active_tx_pools; i++) {
1011 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1012 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1013 }
1014
1015 kfree(adapter->tx_pool);
1016 adapter->tx_pool = NULL;
1017 kfree(adapter->tso_pool);
1018 adapter->tso_pool = NULL;
1019 adapter->num_active_tx_pools = 0;
1020 adapter->prev_tx_pool_size = 0;
1021 }
1022
init_one_tx_pool(struct net_device * netdev,struct ibmvnic_tx_pool * tx_pool,int pool_size,int buf_size)1023 static int init_one_tx_pool(struct net_device *netdev,
1024 struct ibmvnic_tx_pool *tx_pool,
1025 int pool_size, int buf_size)
1026 {
1027 int i;
1028
1029 tx_pool->tx_buff = kcalloc(pool_size,
1030 sizeof(struct ibmvnic_tx_buff),
1031 GFP_KERNEL);
1032 if (!tx_pool->tx_buff)
1033 return -ENOMEM;
1034
1035 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1036 if (!tx_pool->free_map) {
1037 kfree(tx_pool->tx_buff);
1038 tx_pool->tx_buff = NULL;
1039 return -ENOMEM;
1040 }
1041
1042 for (i = 0; i < pool_size; i++)
1043 tx_pool->free_map[i] = i;
1044
1045 tx_pool->consumer_index = 0;
1046 tx_pool->producer_index = 0;
1047 tx_pool->num_buffers = pool_size;
1048 tx_pool->buf_size = buf_size;
1049
1050 return 0;
1051 }
1052
1053 /**
1054 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1055 * @adapter: ibmvnic adapter
1056 *
1057 * Check if the existing tx pools in the adapter can be reused. The
1058 * pools can be reused if the pool parameters (number of pools,
1059 * number of buffers in the pool and mtu) have not changed.
1060 *
1061 * NOTE: This assumes that all pools have the same number of buffers
1062 * which is the case currently. If that changes, we must fix this.
1063 *
1064 * Return: true if the tx pools can be reused, false otherwise.
1065 */
reuse_tx_pools(struct ibmvnic_adapter * adapter)1066 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1067 {
1068 u64 old_num_pools, new_num_pools;
1069 u64 old_pool_size, new_pool_size;
1070 u64 old_mtu, new_mtu;
1071
1072 if (!adapter->tx_pool)
1073 return false;
1074
1075 old_num_pools = adapter->num_active_tx_pools;
1076 new_num_pools = adapter->num_active_tx_scrqs;
1077 old_pool_size = adapter->prev_tx_pool_size;
1078 new_pool_size = adapter->req_tx_entries_per_subcrq;
1079 old_mtu = adapter->prev_mtu;
1080 new_mtu = adapter->req_mtu;
1081
1082 if (old_mtu != new_mtu ||
1083 old_num_pools != new_num_pools ||
1084 old_pool_size != new_pool_size)
1085 return false;
1086
1087 return true;
1088 }
1089
1090 /**
1091 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1092 * @netdev: net device associated with the vnic interface
1093 *
1094 * Initialize the set of transmit pools in the ibmvnic adapter associated
1095 * with the net_device @netdev. If possible, reuse the existing tx pools.
1096 * Otherwise free any existing pools and allocate a new set of pools
1097 * before initializing them.
1098 *
1099 * Return: 0 on success and negative value on error.
1100 */
init_tx_pools(struct net_device * netdev)1101 static int init_tx_pools(struct net_device *netdev)
1102 {
1103 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1104 struct device *dev = &adapter->vdev->dev;
1105 int num_pools;
1106 u64 pool_size; /* # of buffers in pool */
1107 u64 buff_size;
1108 int i, j, rc;
1109
1110 num_pools = adapter->req_tx_queues;
1111
1112 /* We must notify the VIOS about the LTB on all resets - but we only
1113 * need to alloc/populate pools if either the number of buffers or
1114 * size of each buffer in the pool has changed.
1115 */
1116 if (reuse_tx_pools(adapter)) {
1117 netdev_dbg(netdev, "Reusing tx pools\n");
1118 goto update_ltb;
1119 }
1120
1121 /* Allocate/populate the pools. */
1122 release_tx_pools(adapter);
1123
1124 pool_size = adapter->req_tx_entries_per_subcrq;
1125 num_pools = adapter->num_active_tx_scrqs;
1126
1127 adapter->tx_pool = kcalloc(num_pools,
1128 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1129 if (!adapter->tx_pool)
1130 return -ENOMEM;
1131
1132 adapter->tso_pool = kcalloc(num_pools,
1133 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1134 /* To simplify release_tx_pools() ensure that ->tx_pool and
1135 * ->tso_pool are either both NULL or both non-NULL.
1136 */
1137 if (!adapter->tso_pool) {
1138 kfree(adapter->tx_pool);
1139 adapter->tx_pool = NULL;
1140 return -ENOMEM;
1141 }
1142
1143 /* Set num_active_tx_pools early. If we fail below after partial
1144 * allocation, release_tx_pools() will know how many to look for.
1145 */
1146 adapter->num_active_tx_pools = num_pools;
1147
1148 buff_size = adapter->req_mtu + VLAN_HLEN;
1149 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1150
1151 for (i = 0; i < num_pools; i++) {
1152 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1153 i, adapter->req_tx_entries_per_subcrq, buff_size);
1154
1155 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1156 pool_size, buff_size);
1157 if (rc)
1158 goto out_release;
1159
1160 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1161 IBMVNIC_TSO_BUFS,
1162 IBMVNIC_TSO_BUF_SZ);
1163 if (rc)
1164 goto out_release;
1165 }
1166
1167 adapter->prev_tx_pool_size = pool_size;
1168 adapter->prev_mtu = adapter->req_mtu;
1169
1170 update_ltb:
1171 /* NOTE: All tx_pools have the same number of buffers (which is
1172 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1173 * buffers (see calls init_one_tx_pool() for these).
1174 * For consistency, we use tx_pool->num_buffers and
1175 * tso_pool->num_buffers below.
1176 */
1177 rc = -1;
1178 for (i = 0; i < num_pools; i++) {
1179 struct ibmvnic_tx_pool *tso_pool;
1180 struct ibmvnic_tx_pool *tx_pool;
1181
1182 tx_pool = &adapter->tx_pool[i];
1183
1184 dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1185 i, tx_pool->num_buffers, tx_pool->buf_size);
1186
1187 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1188 tx_pool->num_buffers, tx_pool->buf_size);
1189 if (rc)
1190 goto out;
1191
1192 tx_pool->consumer_index = 0;
1193 tx_pool->producer_index = 0;
1194
1195 for (j = 0; j < tx_pool->num_buffers; j++)
1196 tx_pool->free_map[j] = j;
1197
1198 tso_pool = &adapter->tso_pool[i];
1199
1200 dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1201 i, tso_pool->num_buffers, tso_pool->buf_size);
1202
1203 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1204 tso_pool->num_buffers, tso_pool->buf_size);
1205 if (rc)
1206 goto out;
1207
1208 tso_pool->consumer_index = 0;
1209 tso_pool->producer_index = 0;
1210
1211 for (j = 0; j < tso_pool->num_buffers; j++)
1212 tso_pool->free_map[j] = j;
1213 }
1214
1215 return 0;
1216 out_release:
1217 release_tx_pools(adapter);
1218 out:
1219 /* We failed to allocate one or more LTBs or map them on the VIOS.
1220 * Hold onto the pools and any LTBs that we did allocate/map.
1221 */
1222 return rc;
1223 }
1224
ibmvnic_napi_enable(struct ibmvnic_adapter * adapter)1225 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1226 {
1227 int i;
1228
1229 if (adapter->napi_enabled)
1230 return;
1231
1232 for (i = 0; i < adapter->req_rx_queues; i++)
1233 napi_enable(&adapter->napi[i]);
1234
1235 adapter->napi_enabled = true;
1236 }
1237
ibmvnic_napi_disable(struct ibmvnic_adapter * adapter)1238 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1239 {
1240 int i;
1241
1242 if (!adapter->napi_enabled)
1243 return;
1244
1245 for (i = 0; i < adapter->req_rx_queues; i++) {
1246 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1247 napi_disable(&adapter->napi[i]);
1248 }
1249
1250 adapter->napi_enabled = false;
1251 }
1252
init_napi(struct ibmvnic_adapter * adapter)1253 static int init_napi(struct ibmvnic_adapter *adapter)
1254 {
1255 int i;
1256
1257 adapter->napi = kcalloc(adapter->req_rx_queues,
1258 sizeof(struct napi_struct), GFP_KERNEL);
1259 if (!adapter->napi)
1260 return -ENOMEM;
1261
1262 for (i = 0; i < adapter->req_rx_queues; i++) {
1263 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1264 netif_napi_add(adapter->netdev, &adapter->napi[i],
1265 ibmvnic_poll);
1266 }
1267
1268 adapter->num_active_rx_napi = adapter->req_rx_queues;
1269 return 0;
1270 }
1271
release_napi(struct ibmvnic_adapter * adapter)1272 static void release_napi(struct ibmvnic_adapter *adapter)
1273 {
1274 int i;
1275
1276 if (!adapter->napi)
1277 return;
1278
1279 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1280 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1281 netif_napi_del(&adapter->napi[i]);
1282 }
1283
1284 kfree(adapter->napi);
1285 adapter->napi = NULL;
1286 adapter->num_active_rx_napi = 0;
1287 adapter->napi_enabled = false;
1288 }
1289
adapter_state_to_string(enum vnic_state state)1290 static const char *adapter_state_to_string(enum vnic_state state)
1291 {
1292 switch (state) {
1293 case VNIC_PROBING:
1294 return "PROBING";
1295 case VNIC_PROBED:
1296 return "PROBED";
1297 case VNIC_OPENING:
1298 return "OPENING";
1299 case VNIC_OPEN:
1300 return "OPEN";
1301 case VNIC_CLOSING:
1302 return "CLOSING";
1303 case VNIC_CLOSED:
1304 return "CLOSED";
1305 case VNIC_REMOVING:
1306 return "REMOVING";
1307 case VNIC_REMOVED:
1308 return "REMOVED";
1309 case VNIC_DOWN:
1310 return "DOWN";
1311 }
1312 return "UNKNOWN";
1313 }
1314
ibmvnic_login(struct net_device * netdev)1315 static int ibmvnic_login(struct net_device *netdev)
1316 {
1317 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1318 unsigned long timeout = msecs_to_jiffies(20000);
1319 int retry_count = 0;
1320 int retries = 10;
1321 bool retry;
1322 int rc;
1323
1324 do {
1325 retry = false;
1326 if (retry_count > retries) {
1327 netdev_warn(netdev, "Login attempts exceeded\n");
1328 return -EACCES;
1329 }
1330
1331 adapter->init_done_rc = 0;
1332 reinit_completion(&adapter->init_done);
1333 rc = send_login(adapter);
1334 if (rc)
1335 return rc;
1336
1337 if (!wait_for_completion_timeout(&adapter->init_done,
1338 timeout)) {
1339 netdev_warn(netdev, "Login timed out, retrying...\n");
1340 retry = true;
1341 adapter->init_done_rc = 0;
1342 retry_count++;
1343 continue;
1344 }
1345
1346 if (adapter->init_done_rc == ABORTED) {
1347 netdev_warn(netdev, "Login aborted, retrying...\n");
1348 retry = true;
1349 adapter->init_done_rc = 0;
1350 retry_count++;
1351 /* FW or device may be busy, so
1352 * wait a bit before retrying login
1353 */
1354 msleep(500);
1355 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1356 retry_count++;
1357 release_sub_crqs(adapter, 1);
1358
1359 retry = true;
1360 netdev_dbg(netdev,
1361 "Received partial success, retrying...\n");
1362 adapter->init_done_rc = 0;
1363 reinit_completion(&adapter->init_done);
1364 send_query_cap(adapter);
1365 if (!wait_for_completion_timeout(&adapter->init_done,
1366 timeout)) {
1367 netdev_warn(netdev,
1368 "Capabilities query timed out\n");
1369 return -ETIMEDOUT;
1370 }
1371
1372 rc = init_sub_crqs(adapter);
1373 if (rc) {
1374 netdev_warn(netdev,
1375 "SCRQ initialization failed\n");
1376 return rc;
1377 }
1378
1379 rc = init_sub_crq_irqs(adapter);
1380 if (rc) {
1381 netdev_warn(netdev,
1382 "SCRQ irq initialization failed\n");
1383 return rc;
1384 }
1385 } else if (adapter->init_done_rc) {
1386 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1387 adapter->init_done_rc);
1388 return -EIO;
1389 }
1390 } while (retry);
1391
1392 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1393
1394 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1395 return 0;
1396 }
1397
release_login_buffer(struct ibmvnic_adapter * adapter)1398 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1399 {
1400 kfree(adapter->login_buf);
1401 adapter->login_buf = NULL;
1402 }
1403
release_login_rsp_buffer(struct ibmvnic_adapter * adapter)1404 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1405 {
1406 kfree(adapter->login_rsp_buf);
1407 adapter->login_rsp_buf = NULL;
1408 }
1409
release_resources(struct ibmvnic_adapter * adapter)1410 static void release_resources(struct ibmvnic_adapter *adapter)
1411 {
1412 release_vpd_data(adapter);
1413
1414 release_napi(adapter);
1415 release_login_buffer(adapter);
1416 release_login_rsp_buffer(adapter);
1417 }
1418
set_link_state(struct ibmvnic_adapter * adapter,u8 link_state)1419 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1420 {
1421 struct net_device *netdev = adapter->netdev;
1422 unsigned long timeout = msecs_to_jiffies(20000);
1423 union ibmvnic_crq crq;
1424 bool resend;
1425 int rc;
1426
1427 netdev_dbg(netdev, "setting link state %d\n", link_state);
1428
1429 memset(&crq, 0, sizeof(crq));
1430 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1431 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1432 crq.logical_link_state.link_state = link_state;
1433
1434 do {
1435 resend = false;
1436
1437 reinit_completion(&adapter->init_done);
1438 rc = ibmvnic_send_crq(adapter, &crq);
1439 if (rc) {
1440 netdev_err(netdev, "Failed to set link state\n");
1441 return rc;
1442 }
1443
1444 if (!wait_for_completion_timeout(&adapter->init_done,
1445 timeout)) {
1446 netdev_err(netdev, "timeout setting link state\n");
1447 return -ETIMEDOUT;
1448 }
1449
1450 if (adapter->init_done_rc == PARTIALSUCCESS) {
1451 /* Partuial success, delay and re-send */
1452 mdelay(1000);
1453 resend = true;
1454 } else if (adapter->init_done_rc) {
1455 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1456 adapter->init_done_rc);
1457 return adapter->init_done_rc;
1458 }
1459 } while (resend);
1460
1461 return 0;
1462 }
1463
set_real_num_queues(struct net_device * netdev)1464 static int set_real_num_queues(struct net_device *netdev)
1465 {
1466 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1467 int rc;
1468
1469 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1470 adapter->req_tx_queues, adapter->req_rx_queues);
1471
1472 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1473 if (rc) {
1474 netdev_err(netdev, "failed to set the number of tx queues\n");
1475 return rc;
1476 }
1477
1478 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1479 if (rc)
1480 netdev_err(netdev, "failed to set the number of rx queues\n");
1481
1482 return rc;
1483 }
1484
ibmvnic_get_vpd(struct ibmvnic_adapter * adapter)1485 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1486 {
1487 struct device *dev = &adapter->vdev->dev;
1488 union ibmvnic_crq crq;
1489 int len = 0;
1490 int rc;
1491
1492 if (adapter->vpd->buff)
1493 len = adapter->vpd->len;
1494
1495 mutex_lock(&adapter->fw_lock);
1496 adapter->fw_done_rc = 0;
1497 reinit_completion(&adapter->fw_done);
1498
1499 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1500 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1501 rc = ibmvnic_send_crq(adapter, &crq);
1502 if (rc) {
1503 mutex_unlock(&adapter->fw_lock);
1504 return rc;
1505 }
1506
1507 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1508 if (rc) {
1509 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1510 mutex_unlock(&adapter->fw_lock);
1511 return rc;
1512 }
1513 mutex_unlock(&adapter->fw_lock);
1514
1515 if (!adapter->vpd->len)
1516 return -ENODATA;
1517
1518 if (!adapter->vpd->buff)
1519 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1520 else if (adapter->vpd->len != len)
1521 adapter->vpd->buff =
1522 krealloc(adapter->vpd->buff,
1523 adapter->vpd->len, GFP_KERNEL);
1524
1525 if (!adapter->vpd->buff) {
1526 dev_err(dev, "Could allocate VPD buffer\n");
1527 return -ENOMEM;
1528 }
1529
1530 adapter->vpd->dma_addr =
1531 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1532 DMA_FROM_DEVICE);
1533 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1534 dev_err(dev, "Could not map VPD buffer\n");
1535 kfree(adapter->vpd->buff);
1536 adapter->vpd->buff = NULL;
1537 return -ENOMEM;
1538 }
1539
1540 mutex_lock(&adapter->fw_lock);
1541 adapter->fw_done_rc = 0;
1542 reinit_completion(&adapter->fw_done);
1543
1544 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1545 crq.get_vpd.cmd = GET_VPD;
1546 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1547 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1548 rc = ibmvnic_send_crq(adapter, &crq);
1549 if (rc) {
1550 kfree(adapter->vpd->buff);
1551 adapter->vpd->buff = NULL;
1552 mutex_unlock(&adapter->fw_lock);
1553 return rc;
1554 }
1555
1556 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1557 if (rc) {
1558 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1559 kfree(adapter->vpd->buff);
1560 adapter->vpd->buff = NULL;
1561 mutex_unlock(&adapter->fw_lock);
1562 return rc;
1563 }
1564
1565 mutex_unlock(&adapter->fw_lock);
1566 return 0;
1567 }
1568
init_resources(struct ibmvnic_adapter * adapter)1569 static int init_resources(struct ibmvnic_adapter *adapter)
1570 {
1571 struct net_device *netdev = adapter->netdev;
1572 int rc;
1573
1574 rc = set_real_num_queues(netdev);
1575 if (rc)
1576 return rc;
1577
1578 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1579 if (!adapter->vpd)
1580 return -ENOMEM;
1581
1582 /* Vital Product Data (VPD) */
1583 rc = ibmvnic_get_vpd(adapter);
1584 if (rc) {
1585 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1586 return rc;
1587 }
1588
1589 rc = init_napi(adapter);
1590 if (rc)
1591 return rc;
1592
1593 send_query_map(adapter);
1594
1595 rc = init_rx_pools(netdev);
1596 if (rc)
1597 return rc;
1598
1599 rc = init_tx_pools(netdev);
1600 return rc;
1601 }
1602
__ibmvnic_open(struct net_device * netdev)1603 static int __ibmvnic_open(struct net_device *netdev)
1604 {
1605 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1606 enum vnic_state prev_state = adapter->state;
1607 int i, rc;
1608
1609 adapter->state = VNIC_OPENING;
1610 replenish_pools(adapter);
1611 ibmvnic_napi_enable(adapter);
1612
1613 /* We're ready to receive frames, enable the sub-crq interrupts and
1614 * set the logical link state to up
1615 */
1616 for (i = 0; i < adapter->req_rx_queues; i++) {
1617 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1618 if (prev_state == VNIC_CLOSED)
1619 enable_irq(adapter->rx_scrq[i]->irq);
1620 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1621 }
1622
1623 for (i = 0; i < adapter->req_tx_queues; i++) {
1624 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1625 if (prev_state == VNIC_CLOSED)
1626 enable_irq(adapter->tx_scrq[i]->irq);
1627 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1628 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1629 }
1630
1631 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1632 if (rc) {
1633 ibmvnic_napi_disable(adapter);
1634 ibmvnic_disable_irqs(adapter);
1635 return rc;
1636 }
1637
1638 adapter->tx_queues_active = true;
1639
1640 /* Since queues were stopped until now, there shouldn't be any
1641 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1642 * don't need the synchronize_rcu()? Leaving it for consistency
1643 * with setting ->tx_queues_active = false.
1644 */
1645 synchronize_rcu();
1646
1647 netif_tx_start_all_queues(netdev);
1648
1649 if (prev_state == VNIC_CLOSED) {
1650 for (i = 0; i < adapter->req_rx_queues; i++)
1651 napi_schedule(&adapter->napi[i]);
1652 }
1653
1654 adapter->state = VNIC_OPEN;
1655 return rc;
1656 }
1657
ibmvnic_open(struct net_device * netdev)1658 static int ibmvnic_open(struct net_device *netdev)
1659 {
1660 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1661 int rc;
1662
1663 ASSERT_RTNL();
1664
1665 /* If device failover is pending or we are about to reset, just set
1666 * device state and return. Device operation will be handled by reset
1667 * routine.
1668 *
1669 * It should be safe to overwrite the adapter->state here. Since
1670 * we hold the rtnl, either the reset has not actually started or
1671 * the rtnl got dropped during the set_link_state() in do_reset().
1672 * In the former case, no one else is changing the state (again we
1673 * have the rtnl) and in the latter case, do_reset() will detect and
1674 * honor our setting below.
1675 */
1676 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1677 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1678 adapter_state_to_string(adapter->state),
1679 adapter->failover_pending);
1680 adapter->state = VNIC_OPEN;
1681 rc = 0;
1682 goto out;
1683 }
1684
1685 if (adapter->state != VNIC_CLOSED) {
1686 rc = ibmvnic_login(netdev);
1687 if (rc)
1688 goto out;
1689
1690 rc = init_resources(adapter);
1691 if (rc) {
1692 netdev_err(netdev, "failed to initialize resources\n");
1693 goto out;
1694 }
1695 }
1696
1697 rc = __ibmvnic_open(netdev);
1698
1699 out:
1700 /* If open failed and there is a pending failover or in-progress reset,
1701 * set device state and return. Device operation will be handled by
1702 * reset routine. See also comments above regarding rtnl.
1703 */
1704 if (rc &&
1705 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1706 adapter->state = VNIC_OPEN;
1707 rc = 0;
1708 }
1709
1710 if (rc) {
1711 release_resources(adapter);
1712 release_rx_pools(adapter);
1713 release_tx_pools(adapter);
1714 }
1715
1716 return rc;
1717 }
1718
clean_rx_pools(struct ibmvnic_adapter * adapter)1719 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1720 {
1721 struct ibmvnic_rx_pool *rx_pool;
1722 struct ibmvnic_rx_buff *rx_buff;
1723 u64 rx_entries;
1724 int rx_scrqs;
1725 int i, j;
1726
1727 if (!adapter->rx_pool)
1728 return;
1729
1730 rx_scrqs = adapter->num_active_rx_pools;
1731 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1732
1733 /* Free any remaining skbs in the rx buffer pools */
1734 for (i = 0; i < rx_scrqs; i++) {
1735 rx_pool = &adapter->rx_pool[i];
1736 if (!rx_pool || !rx_pool->rx_buff)
1737 continue;
1738
1739 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1740 for (j = 0; j < rx_entries; j++) {
1741 rx_buff = &rx_pool->rx_buff[j];
1742 if (rx_buff && rx_buff->skb) {
1743 dev_kfree_skb_any(rx_buff->skb);
1744 rx_buff->skb = NULL;
1745 }
1746 }
1747 }
1748 }
1749
clean_one_tx_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_tx_pool * tx_pool)1750 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1751 struct ibmvnic_tx_pool *tx_pool)
1752 {
1753 struct ibmvnic_tx_buff *tx_buff;
1754 u64 tx_entries;
1755 int i;
1756
1757 if (!tx_pool || !tx_pool->tx_buff)
1758 return;
1759
1760 tx_entries = tx_pool->num_buffers;
1761
1762 for (i = 0; i < tx_entries; i++) {
1763 tx_buff = &tx_pool->tx_buff[i];
1764 if (tx_buff && tx_buff->skb) {
1765 dev_kfree_skb_any(tx_buff->skb);
1766 tx_buff->skb = NULL;
1767 }
1768 }
1769 }
1770
clean_tx_pools(struct ibmvnic_adapter * adapter)1771 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1772 {
1773 int tx_scrqs;
1774 int i;
1775
1776 if (!adapter->tx_pool || !adapter->tso_pool)
1777 return;
1778
1779 tx_scrqs = adapter->num_active_tx_pools;
1780
1781 /* Free any remaining skbs in the tx buffer pools */
1782 for (i = 0; i < tx_scrqs; i++) {
1783 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1784 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1785 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1786 }
1787 }
1788
ibmvnic_disable_irqs(struct ibmvnic_adapter * adapter)1789 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1790 {
1791 struct net_device *netdev = adapter->netdev;
1792 int i;
1793
1794 if (adapter->tx_scrq) {
1795 for (i = 0; i < adapter->req_tx_queues; i++)
1796 if (adapter->tx_scrq[i]->irq) {
1797 netdev_dbg(netdev,
1798 "Disabling tx_scrq[%d] irq\n", i);
1799 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1800 disable_irq(adapter->tx_scrq[i]->irq);
1801 }
1802 }
1803
1804 if (adapter->rx_scrq) {
1805 for (i = 0; i < adapter->req_rx_queues; i++) {
1806 if (adapter->rx_scrq[i]->irq) {
1807 netdev_dbg(netdev,
1808 "Disabling rx_scrq[%d] irq\n", i);
1809 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1810 disable_irq(adapter->rx_scrq[i]->irq);
1811 }
1812 }
1813 }
1814 }
1815
ibmvnic_cleanup(struct net_device * netdev)1816 static void ibmvnic_cleanup(struct net_device *netdev)
1817 {
1818 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1819
1820 /* ensure that transmissions are stopped if called by do_reset */
1821
1822 adapter->tx_queues_active = false;
1823
1824 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
1825 * update so they don't restart a queue after we stop it below.
1826 */
1827 synchronize_rcu();
1828
1829 if (test_bit(0, &adapter->resetting))
1830 netif_tx_disable(netdev);
1831 else
1832 netif_tx_stop_all_queues(netdev);
1833
1834 ibmvnic_napi_disable(adapter);
1835 ibmvnic_disable_irqs(adapter);
1836 }
1837
__ibmvnic_close(struct net_device * netdev)1838 static int __ibmvnic_close(struct net_device *netdev)
1839 {
1840 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1841 int rc = 0;
1842
1843 adapter->state = VNIC_CLOSING;
1844 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1845 adapter->state = VNIC_CLOSED;
1846 return rc;
1847 }
1848
ibmvnic_close(struct net_device * netdev)1849 static int ibmvnic_close(struct net_device *netdev)
1850 {
1851 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1852 int rc;
1853
1854 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1855 adapter_state_to_string(adapter->state),
1856 adapter->failover_pending,
1857 adapter->force_reset_recovery);
1858
1859 /* If device failover is pending, just set device state and return.
1860 * Device operation will be handled by reset routine.
1861 */
1862 if (adapter->failover_pending) {
1863 adapter->state = VNIC_CLOSED;
1864 return 0;
1865 }
1866
1867 rc = __ibmvnic_close(netdev);
1868 ibmvnic_cleanup(netdev);
1869 clean_rx_pools(adapter);
1870 clean_tx_pools(adapter);
1871
1872 return rc;
1873 }
1874
1875 /**
1876 * build_hdr_data - creates L2/L3/L4 header data buffer
1877 * @hdr_field: bitfield determining needed headers
1878 * @skb: socket buffer
1879 * @hdr_len: array of header lengths
1880 * @hdr_data: buffer to write the header to
1881 *
1882 * Reads hdr_field to determine which headers are needed by firmware.
1883 * Builds a buffer containing these headers. Saves individual header
1884 * lengths and total buffer length to be used to build descriptors.
1885 */
build_hdr_data(u8 hdr_field,struct sk_buff * skb,int * hdr_len,u8 * hdr_data)1886 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1887 int *hdr_len, u8 *hdr_data)
1888 {
1889 int len = 0;
1890 u8 *hdr;
1891
1892 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1893 hdr_len[0] = sizeof(struct vlan_ethhdr);
1894 else
1895 hdr_len[0] = sizeof(struct ethhdr);
1896
1897 if (skb->protocol == htons(ETH_P_IP)) {
1898 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1899 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1900 hdr_len[2] = tcp_hdrlen(skb);
1901 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1902 hdr_len[2] = sizeof(struct udphdr);
1903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1904 hdr_len[1] = sizeof(struct ipv6hdr);
1905 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1906 hdr_len[2] = tcp_hdrlen(skb);
1907 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1908 hdr_len[2] = sizeof(struct udphdr);
1909 } else if (skb->protocol == htons(ETH_P_ARP)) {
1910 hdr_len[1] = arp_hdr_len(skb->dev);
1911 hdr_len[2] = 0;
1912 }
1913
1914 memset(hdr_data, 0, 120);
1915 if ((hdr_field >> 6) & 1) {
1916 hdr = skb_mac_header(skb);
1917 memcpy(hdr_data, hdr, hdr_len[0]);
1918 len += hdr_len[0];
1919 }
1920
1921 if ((hdr_field >> 5) & 1) {
1922 hdr = skb_network_header(skb);
1923 memcpy(hdr_data + len, hdr, hdr_len[1]);
1924 len += hdr_len[1];
1925 }
1926
1927 if ((hdr_field >> 4) & 1) {
1928 hdr = skb_transport_header(skb);
1929 memcpy(hdr_data + len, hdr, hdr_len[2]);
1930 len += hdr_len[2];
1931 }
1932 return len;
1933 }
1934
1935 /**
1936 * create_hdr_descs - create header and header extension descriptors
1937 * @hdr_field: bitfield determining needed headers
1938 * @hdr_data: buffer containing header data
1939 * @len: length of data buffer
1940 * @hdr_len: array of individual header lengths
1941 * @scrq_arr: descriptor array
1942 *
1943 * Creates header and, if needed, header extension descriptors and
1944 * places them in a descriptor array, scrq_arr
1945 */
1946
create_hdr_descs(u8 hdr_field,u8 * hdr_data,int len,int * hdr_len,union sub_crq * scrq_arr)1947 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1948 union sub_crq *scrq_arr)
1949 {
1950 union sub_crq hdr_desc;
1951 int tmp_len = len;
1952 int num_descs = 0;
1953 u8 *data, *cur;
1954 int tmp;
1955
1956 while (tmp_len > 0) {
1957 cur = hdr_data + len - tmp_len;
1958
1959 memset(&hdr_desc, 0, sizeof(hdr_desc));
1960 if (cur != hdr_data) {
1961 data = hdr_desc.hdr_ext.data;
1962 tmp = tmp_len > 29 ? 29 : tmp_len;
1963 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1964 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1965 hdr_desc.hdr_ext.len = tmp;
1966 } else {
1967 data = hdr_desc.hdr.data;
1968 tmp = tmp_len > 24 ? 24 : tmp_len;
1969 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1970 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1971 hdr_desc.hdr.len = tmp;
1972 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1973 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1974 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1975 hdr_desc.hdr.flag = hdr_field << 1;
1976 }
1977 memcpy(data, cur, tmp);
1978 tmp_len -= tmp;
1979 *scrq_arr = hdr_desc;
1980 scrq_arr++;
1981 num_descs++;
1982 }
1983
1984 return num_descs;
1985 }
1986
1987 /**
1988 * build_hdr_descs_arr - build a header descriptor array
1989 * @skb: tx socket buffer
1990 * @indir_arr: indirect array
1991 * @num_entries: number of descriptors to be sent
1992 * @hdr_field: bit field determining which headers will be sent
1993 *
1994 * This function will build a TX descriptor array with applicable
1995 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1996 */
1997
build_hdr_descs_arr(struct sk_buff * skb,union sub_crq * indir_arr,int * num_entries,u8 hdr_field)1998 static void build_hdr_descs_arr(struct sk_buff *skb,
1999 union sub_crq *indir_arr,
2000 int *num_entries, u8 hdr_field)
2001 {
2002 int hdr_len[3] = {0, 0, 0};
2003 u8 hdr_data[140] = {0};
2004 int tot_len;
2005
2006 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
2007 hdr_data);
2008 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
2009 indir_arr + 1);
2010 }
2011
ibmvnic_xmit_workarounds(struct sk_buff * skb,struct net_device * netdev)2012 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2013 struct net_device *netdev)
2014 {
2015 /* For some backing devices, mishandling of small packets
2016 * can result in a loss of connection or TX stall. Device
2017 * architects recommend that no packet should be smaller
2018 * than the minimum MTU value provided to the driver, so
2019 * pad any packets to that length
2020 */
2021 if (skb->len < netdev->min_mtu)
2022 return skb_put_padto(skb, netdev->min_mtu);
2023
2024 return 0;
2025 }
2026
ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq)2027 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2028 struct ibmvnic_sub_crq_queue *tx_scrq)
2029 {
2030 struct ibmvnic_ind_xmit_queue *ind_bufp;
2031 struct ibmvnic_tx_buff *tx_buff;
2032 struct ibmvnic_tx_pool *tx_pool;
2033 union sub_crq tx_scrq_entry;
2034 int queue_num;
2035 int entries;
2036 int index;
2037 int i;
2038
2039 ind_bufp = &tx_scrq->ind_buf;
2040 entries = (u64)ind_bufp->index;
2041 queue_num = tx_scrq->pool_index;
2042
2043 for (i = entries - 1; i >= 0; --i) {
2044 tx_scrq_entry = ind_bufp->indir_arr[i];
2045 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2046 continue;
2047 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2048 if (index & IBMVNIC_TSO_POOL_MASK) {
2049 tx_pool = &adapter->tso_pool[queue_num];
2050 index &= ~IBMVNIC_TSO_POOL_MASK;
2051 } else {
2052 tx_pool = &adapter->tx_pool[queue_num];
2053 }
2054 tx_pool->free_map[tx_pool->consumer_index] = index;
2055 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2056 tx_pool->num_buffers - 1 :
2057 tx_pool->consumer_index - 1;
2058 tx_buff = &tx_pool->tx_buff[index];
2059 adapter->netdev->stats.tx_packets--;
2060 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2061 adapter->tx_stats_buffers[queue_num].packets--;
2062 adapter->tx_stats_buffers[queue_num].bytes -=
2063 tx_buff->skb->len;
2064 dev_kfree_skb_any(tx_buff->skb);
2065 tx_buff->skb = NULL;
2066 adapter->netdev->stats.tx_dropped++;
2067 }
2068
2069 ind_bufp->index = 0;
2070
2071 if (atomic_sub_return(entries, &tx_scrq->used) <=
2072 (adapter->req_tx_entries_per_subcrq / 2) &&
2073 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2074 rcu_read_lock();
2075
2076 if (adapter->tx_queues_active) {
2077 netif_wake_subqueue(adapter->netdev, queue_num);
2078 netdev_dbg(adapter->netdev, "Started queue %d\n",
2079 queue_num);
2080 }
2081
2082 rcu_read_unlock();
2083 }
2084 }
2085
ibmvnic_tx_scrq_flush(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * tx_scrq)2086 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2087 struct ibmvnic_sub_crq_queue *tx_scrq)
2088 {
2089 struct ibmvnic_ind_xmit_queue *ind_bufp;
2090 u64 dma_addr;
2091 u64 entries;
2092 u64 handle;
2093 int rc;
2094
2095 ind_bufp = &tx_scrq->ind_buf;
2096 dma_addr = (u64)ind_bufp->indir_dma;
2097 entries = (u64)ind_bufp->index;
2098 handle = tx_scrq->handle;
2099
2100 if (!entries)
2101 return 0;
2102 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2103 if (rc)
2104 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2105 else
2106 ind_bufp->index = 0;
2107 return 0;
2108 }
2109
ibmvnic_xmit(struct sk_buff * skb,struct net_device * netdev)2110 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2111 {
2112 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2113 int queue_num = skb_get_queue_mapping(skb);
2114 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2115 struct device *dev = &adapter->vdev->dev;
2116 struct ibmvnic_ind_xmit_queue *ind_bufp;
2117 struct ibmvnic_tx_buff *tx_buff = NULL;
2118 struct ibmvnic_sub_crq_queue *tx_scrq;
2119 struct ibmvnic_long_term_buff *ltb;
2120 struct ibmvnic_tx_pool *tx_pool;
2121 unsigned int tx_send_failed = 0;
2122 netdev_tx_t ret = NETDEV_TX_OK;
2123 unsigned int tx_map_failed = 0;
2124 union sub_crq indir_arr[16];
2125 unsigned int tx_dropped = 0;
2126 unsigned int tx_packets = 0;
2127 unsigned int tx_bytes = 0;
2128 dma_addr_t data_dma_addr;
2129 struct netdev_queue *txq;
2130 unsigned long lpar_rc;
2131 union sub_crq tx_crq;
2132 unsigned int offset;
2133 int num_entries = 1;
2134 unsigned char *dst;
2135 int bufidx = 0;
2136 u8 proto = 0;
2137
2138 /* If a reset is in progress, drop the packet since
2139 * the scrqs may get torn down. Otherwise use the
2140 * rcu to ensure reset waits for us to complete.
2141 */
2142 rcu_read_lock();
2143 if (!adapter->tx_queues_active) {
2144 dev_kfree_skb_any(skb);
2145
2146 tx_send_failed++;
2147 tx_dropped++;
2148 ret = NETDEV_TX_OK;
2149 goto out;
2150 }
2151
2152 tx_scrq = adapter->tx_scrq[queue_num];
2153 txq = netdev_get_tx_queue(netdev, queue_num);
2154 ind_bufp = &tx_scrq->ind_buf;
2155
2156 if (ibmvnic_xmit_workarounds(skb, netdev)) {
2157 tx_dropped++;
2158 tx_send_failed++;
2159 ret = NETDEV_TX_OK;
2160 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2161 goto out;
2162 }
2163
2164 if (skb_is_gso(skb))
2165 tx_pool = &adapter->tso_pool[queue_num];
2166 else
2167 tx_pool = &adapter->tx_pool[queue_num];
2168
2169 bufidx = tx_pool->free_map[tx_pool->consumer_index];
2170
2171 if (bufidx == IBMVNIC_INVALID_MAP) {
2172 dev_kfree_skb_any(skb);
2173 tx_send_failed++;
2174 tx_dropped++;
2175 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2176 ret = NETDEV_TX_OK;
2177 goto out;
2178 }
2179
2180 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2181
2182 map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset);
2183
2184 dst = ltb->buff + offset;
2185 memset(dst, 0, tx_pool->buf_size);
2186 data_dma_addr = ltb->addr + offset;
2187
2188 if (skb_shinfo(skb)->nr_frags) {
2189 int cur, i;
2190
2191 /* Copy the head */
2192 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2193 cur = skb_headlen(skb);
2194
2195 /* Copy the frags */
2196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2197 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2198
2199 memcpy(dst + cur, skb_frag_address(frag),
2200 skb_frag_size(frag));
2201 cur += skb_frag_size(frag);
2202 }
2203 } else {
2204 skb_copy_from_linear_data(skb, dst, skb->len);
2205 }
2206
2207 /* post changes to long_term_buff *dst before VIOS accessing it */
2208 dma_wmb();
2209
2210 tx_pool->consumer_index =
2211 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2212
2213 tx_buff = &tx_pool->tx_buff[bufidx];
2214 tx_buff->skb = skb;
2215 tx_buff->index = bufidx;
2216 tx_buff->pool_index = queue_num;
2217
2218 memset(&tx_crq, 0, sizeof(tx_crq));
2219 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2220 tx_crq.v1.type = IBMVNIC_TX_DESC;
2221 tx_crq.v1.n_crq_elem = 1;
2222 tx_crq.v1.n_sge = 1;
2223 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2224
2225 if (skb_is_gso(skb))
2226 tx_crq.v1.correlator =
2227 cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2228 else
2229 tx_crq.v1.correlator = cpu_to_be32(bufidx);
2230 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2231 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2232 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2233
2234 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2235 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2236 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2237 }
2238
2239 if (skb->protocol == htons(ETH_P_IP)) {
2240 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2241 proto = ip_hdr(skb)->protocol;
2242 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2243 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2244 proto = ipv6_hdr(skb)->nexthdr;
2245 }
2246
2247 if (proto == IPPROTO_TCP)
2248 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2249 else if (proto == IPPROTO_UDP)
2250 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2251
2252 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2253 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2254 hdrs += 2;
2255 }
2256 if (skb_is_gso(skb)) {
2257 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2258 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2259 hdrs += 2;
2260 }
2261
2262 if ((*hdrs >> 7) & 1)
2263 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2264
2265 tx_crq.v1.n_crq_elem = num_entries;
2266 tx_buff->num_entries = num_entries;
2267 /* flush buffer if current entry can not fit */
2268 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2269 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2270 if (lpar_rc != H_SUCCESS)
2271 goto tx_flush_err;
2272 }
2273
2274 indir_arr[0] = tx_crq;
2275 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2276 num_entries * sizeof(struct ibmvnic_generic_scrq));
2277 ind_bufp->index += num_entries;
2278 if (__netdev_tx_sent_queue(txq, skb->len,
2279 netdev_xmit_more() &&
2280 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2281 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2282 if (lpar_rc != H_SUCCESS)
2283 goto tx_err;
2284 }
2285
2286 if (atomic_add_return(num_entries, &tx_scrq->used)
2287 >= adapter->req_tx_entries_per_subcrq) {
2288 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2289 netif_stop_subqueue(netdev, queue_num);
2290 }
2291
2292 tx_packets++;
2293 tx_bytes += skb->len;
2294 txq_trans_cond_update(txq);
2295 ret = NETDEV_TX_OK;
2296 goto out;
2297
2298 tx_flush_err:
2299 dev_kfree_skb_any(skb);
2300 tx_buff->skb = NULL;
2301 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2302 tx_pool->num_buffers - 1 :
2303 tx_pool->consumer_index - 1;
2304 tx_dropped++;
2305 tx_err:
2306 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2307 dev_err_ratelimited(dev, "tx: send failed\n");
2308
2309 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2310 /* Disable TX and report carrier off if queue is closed
2311 * or pending failover.
2312 * Firmware guarantees that a signal will be sent to the
2313 * driver, triggering a reset or some other action.
2314 */
2315 netif_tx_stop_all_queues(netdev);
2316 netif_carrier_off(netdev);
2317 }
2318 out:
2319 rcu_read_unlock();
2320 netdev->stats.tx_dropped += tx_dropped;
2321 netdev->stats.tx_bytes += tx_bytes;
2322 netdev->stats.tx_packets += tx_packets;
2323 adapter->tx_send_failed += tx_send_failed;
2324 adapter->tx_map_failed += tx_map_failed;
2325 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2326 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2327 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2328
2329 return ret;
2330 }
2331
ibmvnic_set_multi(struct net_device * netdev)2332 static void ibmvnic_set_multi(struct net_device *netdev)
2333 {
2334 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2335 struct netdev_hw_addr *ha;
2336 union ibmvnic_crq crq;
2337
2338 memset(&crq, 0, sizeof(crq));
2339 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2340 crq.request_capability.cmd = REQUEST_CAPABILITY;
2341
2342 if (netdev->flags & IFF_PROMISC) {
2343 if (!adapter->promisc_supported)
2344 return;
2345 } else {
2346 if (netdev->flags & IFF_ALLMULTI) {
2347 /* Accept all multicast */
2348 memset(&crq, 0, sizeof(crq));
2349 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2350 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2351 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2352 ibmvnic_send_crq(adapter, &crq);
2353 } else if (netdev_mc_empty(netdev)) {
2354 /* Reject all multicast */
2355 memset(&crq, 0, sizeof(crq));
2356 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2357 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2358 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2359 ibmvnic_send_crq(adapter, &crq);
2360 } else {
2361 /* Accept one or more multicast(s) */
2362 netdev_for_each_mc_addr(ha, netdev) {
2363 memset(&crq, 0, sizeof(crq));
2364 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2365 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2366 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2367 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2368 ha->addr);
2369 ibmvnic_send_crq(adapter, &crq);
2370 }
2371 }
2372 }
2373 }
2374
__ibmvnic_set_mac(struct net_device * netdev,u8 * dev_addr)2375 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2376 {
2377 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2378 union ibmvnic_crq crq;
2379 int rc;
2380
2381 if (!is_valid_ether_addr(dev_addr)) {
2382 rc = -EADDRNOTAVAIL;
2383 goto err;
2384 }
2385
2386 memset(&crq, 0, sizeof(crq));
2387 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2388 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2389 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2390
2391 mutex_lock(&adapter->fw_lock);
2392 adapter->fw_done_rc = 0;
2393 reinit_completion(&adapter->fw_done);
2394
2395 rc = ibmvnic_send_crq(adapter, &crq);
2396 if (rc) {
2397 rc = -EIO;
2398 mutex_unlock(&adapter->fw_lock);
2399 goto err;
2400 }
2401
2402 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2403 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2404 if (rc || adapter->fw_done_rc) {
2405 rc = -EIO;
2406 mutex_unlock(&adapter->fw_lock);
2407 goto err;
2408 }
2409 mutex_unlock(&adapter->fw_lock);
2410 return 0;
2411 err:
2412 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2413 return rc;
2414 }
2415
ibmvnic_set_mac(struct net_device * netdev,void * p)2416 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2417 {
2418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2419 struct sockaddr *addr = p;
2420 int rc;
2421
2422 rc = 0;
2423 if (!is_valid_ether_addr(addr->sa_data))
2424 return -EADDRNOTAVAIL;
2425
2426 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2427 if (adapter->state != VNIC_PROBED)
2428 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2429
2430 return rc;
2431 }
2432
reset_reason_to_string(enum ibmvnic_reset_reason reason)2433 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2434 {
2435 switch (reason) {
2436 case VNIC_RESET_FAILOVER:
2437 return "FAILOVER";
2438 case VNIC_RESET_MOBILITY:
2439 return "MOBILITY";
2440 case VNIC_RESET_FATAL:
2441 return "FATAL";
2442 case VNIC_RESET_NON_FATAL:
2443 return "NON_FATAL";
2444 case VNIC_RESET_TIMEOUT:
2445 return "TIMEOUT";
2446 case VNIC_RESET_CHANGE_PARAM:
2447 return "CHANGE_PARAM";
2448 case VNIC_RESET_PASSIVE_INIT:
2449 return "PASSIVE_INIT";
2450 }
2451 return "UNKNOWN";
2452 }
2453
2454 /*
2455 * Initialize the init_done completion and return code values. We
2456 * can get a transport event just after registering the CRQ and the
2457 * tasklet will use this to communicate the transport event. To ensure
2458 * we don't miss the notification/error, initialize these _before_
2459 * regisering the CRQ.
2460 */
reinit_init_done(struct ibmvnic_adapter * adapter)2461 static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2462 {
2463 reinit_completion(&adapter->init_done);
2464 adapter->init_done_rc = 0;
2465 }
2466
2467 /*
2468 * do_reset returns zero if we are able to keep processing reset events, or
2469 * non-zero if we hit a fatal error and must halt.
2470 */
do_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)2471 static int do_reset(struct ibmvnic_adapter *adapter,
2472 struct ibmvnic_rwi *rwi, u32 reset_state)
2473 {
2474 struct net_device *netdev = adapter->netdev;
2475 u64 old_num_rx_queues, old_num_tx_queues;
2476 u64 old_num_rx_slots, old_num_tx_slots;
2477 int rc;
2478
2479 netdev_dbg(adapter->netdev,
2480 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2481 adapter_state_to_string(adapter->state),
2482 adapter->failover_pending,
2483 reset_reason_to_string(rwi->reset_reason),
2484 adapter_state_to_string(reset_state));
2485
2486 adapter->reset_reason = rwi->reset_reason;
2487 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2488 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2489 rtnl_lock();
2490
2491 /* Now that we have the rtnl lock, clear any pending failover.
2492 * This will ensure ibmvnic_open() has either completed or will
2493 * block until failover is complete.
2494 */
2495 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2496 adapter->failover_pending = false;
2497
2498 /* read the state and check (again) after getting rtnl */
2499 reset_state = adapter->state;
2500
2501 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2502 rc = -EBUSY;
2503 goto out;
2504 }
2505
2506 netif_carrier_off(netdev);
2507
2508 old_num_rx_queues = adapter->req_rx_queues;
2509 old_num_tx_queues = adapter->req_tx_queues;
2510 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2511 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2512
2513 ibmvnic_cleanup(netdev);
2514
2515 if (reset_state == VNIC_OPEN &&
2516 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2517 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2518 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2519 rc = __ibmvnic_close(netdev);
2520 if (rc)
2521 goto out;
2522 } else {
2523 adapter->state = VNIC_CLOSING;
2524
2525 /* Release the RTNL lock before link state change and
2526 * re-acquire after the link state change to allow
2527 * linkwatch_event to grab the RTNL lock and run during
2528 * a reset.
2529 */
2530 rtnl_unlock();
2531 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2532 rtnl_lock();
2533 if (rc)
2534 goto out;
2535
2536 if (adapter->state == VNIC_OPEN) {
2537 /* When we dropped rtnl, ibmvnic_open() got
2538 * it and noticed that we are resetting and
2539 * set the adapter state to OPEN. Update our
2540 * new "target" state, and resume the reset
2541 * from VNIC_CLOSING state.
2542 */
2543 netdev_dbg(netdev,
2544 "Open changed state from %s, updating.\n",
2545 adapter_state_to_string(reset_state));
2546 reset_state = VNIC_OPEN;
2547 adapter->state = VNIC_CLOSING;
2548 }
2549
2550 if (adapter->state != VNIC_CLOSING) {
2551 /* If someone else changed the adapter state
2552 * when we dropped the rtnl, fail the reset
2553 */
2554 rc = -EAGAIN;
2555 goto out;
2556 }
2557 adapter->state = VNIC_CLOSED;
2558 }
2559 }
2560
2561 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2562 release_resources(adapter);
2563 release_sub_crqs(adapter, 1);
2564 release_crq_queue(adapter);
2565 }
2566
2567 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2568 /* remove the closed state so when we call open it appears
2569 * we are coming from the probed state.
2570 */
2571 adapter->state = VNIC_PROBED;
2572
2573 reinit_init_done(adapter);
2574
2575 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2576 rc = init_crq_queue(adapter);
2577 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2578 rc = ibmvnic_reenable_crq_queue(adapter);
2579 release_sub_crqs(adapter, 1);
2580 } else {
2581 rc = ibmvnic_reset_crq(adapter);
2582 if (rc == H_CLOSED || rc == H_SUCCESS) {
2583 rc = vio_enable_interrupts(adapter->vdev);
2584 if (rc)
2585 netdev_err(adapter->netdev,
2586 "Reset failed to enable interrupts. rc=%d\n",
2587 rc);
2588 }
2589 }
2590
2591 if (rc) {
2592 netdev_err(adapter->netdev,
2593 "Reset couldn't initialize crq. rc=%d\n", rc);
2594 goto out;
2595 }
2596
2597 rc = ibmvnic_reset_init(adapter, true);
2598 if (rc)
2599 goto out;
2600
2601 /* If the adapter was in PROBE or DOWN state prior to the reset,
2602 * exit here.
2603 */
2604 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2605 rc = 0;
2606 goto out;
2607 }
2608
2609 rc = ibmvnic_login(netdev);
2610 if (rc)
2611 goto out;
2612
2613 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2614 rc = init_resources(adapter);
2615 if (rc)
2616 goto out;
2617 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2618 adapter->req_tx_queues != old_num_tx_queues ||
2619 adapter->req_rx_add_entries_per_subcrq !=
2620 old_num_rx_slots ||
2621 adapter->req_tx_entries_per_subcrq !=
2622 old_num_tx_slots ||
2623 !adapter->rx_pool ||
2624 !adapter->tso_pool ||
2625 !adapter->tx_pool) {
2626 release_napi(adapter);
2627 release_vpd_data(adapter);
2628
2629 rc = init_resources(adapter);
2630 if (rc)
2631 goto out;
2632
2633 } else {
2634 rc = init_tx_pools(netdev);
2635 if (rc) {
2636 netdev_dbg(netdev,
2637 "init tx pools failed (%d)\n",
2638 rc);
2639 goto out;
2640 }
2641
2642 rc = init_rx_pools(netdev);
2643 if (rc) {
2644 netdev_dbg(netdev,
2645 "init rx pools failed (%d)\n",
2646 rc);
2647 goto out;
2648 }
2649 }
2650 ibmvnic_disable_irqs(adapter);
2651 }
2652 adapter->state = VNIC_CLOSED;
2653
2654 if (reset_state == VNIC_CLOSED) {
2655 rc = 0;
2656 goto out;
2657 }
2658
2659 rc = __ibmvnic_open(netdev);
2660 if (rc) {
2661 rc = IBMVNIC_OPEN_FAILED;
2662 goto out;
2663 }
2664
2665 /* refresh device's multicast list */
2666 ibmvnic_set_multi(netdev);
2667
2668 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2669 adapter->reset_reason == VNIC_RESET_MOBILITY)
2670 __netdev_notify_peers(netdev);
2671
2672 rc = 0;
2673
2674 out:
2675 /* restore the adapter state if reset failed */
2676 if (rc)
2677 adapter->state = reset_state;
2678 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2679 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2680 rtnl_unlock();
2681
2682 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2683 adapter_state_to_string(adapter->state),
2684 adapter->failover_pending, rc);
2685 return rc;
2686 }
2687
do_hard_reset(struct ibmvnic_adapter * adapter,struct ibmvnic_rwi * rwi,u32 reset_state)2688 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2689 struct ibmvnic_rwi *rwi, u32 reset_state)
2690 {
2691 struct net_device *netdev = adapter->netdev;
2692 int rc;
2693
2694 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2695 reset_reason_to_string(rwi->reset_reason));
2696
2697 /* read the state and check (again) after getting rtnl */
2698 reset_state = adapter->state;
2699
2700 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2701 rc = -EBUSY;
2702 goto out;
2703 }
2704
2705 netif_carrier_off(netdev);
2706 adapter->reset_reason = rwi->reset_reason;
2707
2708 ibmvnic_cleanup(netdev);
2709 release_resources(adapter);
2710 release_sub_crqs(adapter, 0);
2711 release_crq_queue(adapter);
2712
2713 /* remove the closed state so when we call open it appears
2714 * we are coming from the probed state.
2715 */
2716 adapter->state = VNIC_PROBED;
2717
2718 reinit_init_done(adapter);
2719
2720 rc = init_crq_queue(adapter);
2721 if (rc) {
2722 netdev_err(adapter->netdev,
2723 "Couldn't initialize crq. rc=%d\n", rc);
2724 goto out;
2725 }
2726
2727 rc = ibmvnic_reset_init(adapter, false);
2728 if (rc)
2729 goto out;
2730
2731 /* If the adapter was in PROBE or DOWN state prior to the reset,
2732 * exit here.
2733 */
2734 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2735 goto out;
2736
2737 rc = ibmvnic_login(netdev);
2738 if (rc)
2739 goto out;
2740
2741 rc = init_resources(adapter);
2742 if (rc)
2743 goto out;
2744
2745 ibmvnic_disable_irqs(adapter);
2746 adapter->state = VNIC_CLOSED;
2747
2748 if (reset_state == VNIC_CLOSED)
2749 goto out;
2750
2751 rc = __ibmvnic_open(netdev);
2752 if (rc) {
2753 rc = IBMVNIC_OPEN_FAILED;
2754 goto out;
2755 }
2756
2757 __netdev_notify_peers(netdev);
2758 out:
2759 /* restore adapter state if reset failed */
2760 if (rc)
2761 adapter->state = reset_state;
2762 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2763 adapter_state_to_string(adapter->state),
2764 adapter->failover_pending, rc);
2765 return rc;
2766 }
2767
get_next_rwi(struct ibmvnic_adapter * adapter)2768 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2769 {
2770 struct ibmvnic_rwi *rwi;
2771 unsigned long flags;
2772
2773 spin_lock_irqsave(&adapter->rwi_lock, flags);
2774
2775 if (!list_empty(&adapter->rwi_list)) {
2776 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2777 list);
2778 list_del(&rwi->list);
2779 } else {
2780 rwi = NULL;
2781 }
2782
2783 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2784 return rwi;
2785 }
2786
2787 /**
2788 * do_passive_init - complete probing when partner device is detected.
2789 * @adapter: ibmvnic_adapter struct
2790 *
2791 * If the ibmvnic device does not have a partner device to communicate with at boot
2792 * and that partner device comes online at a later time, this function is called
2793 * to complete the initialization process of ibmvnic device.
2794 * Caller is expected to hold rtnl_lock().
2795 *
2796 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2797 * in the down state.
2798 * Returns 0 upon success and the device is in PROBED state.
2799 */
2800
do_passive_init(struct ibmvnic_adapter * adapter)2801 static int do_passive_init(struct ibmvnic_adapter *adapter)
2802 {
2803 unsigned long timeout = msecs_to_jiffies(30000);
2804 struct net_device *netdev = adapter->netdev;
2805 struct device *dev = &adapter->vdev->dev;
2806 int rc;
2807
2808 netdev_dbg(netdev, "Partner device found, probing.\n");
2809
2810 adapter->state = VNIC_PROBING;
2811 reinit_completion(&adapter->init_done);
2812 adapter->init_done_rc = 0;
2813 adapter->crq.active = true;
2814
2815 rc = send_crq_init_complete(adapter);
2816 if (rc)
2817 goto out;
2818
2819 rc = send_version_xchg(adapter);
2820 if (rc)
2821 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2822
2823 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2824 dev_err(dev, "Initialization sequence timed out\n");
2825 rc = -ETIMEDOUT;
2826 goto out;
2827 }
2828
2829 rc = init_sub_crqs(adapter);
2830 if (rc) {
2831 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2832 goto out;
2833 }
2834
2835 rc = init_sub_crq_irqs(adapter);
2836 if (rc) {
2837 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2838 goto init_failed;
2839 }
2840
2841 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2842 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2843 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2844
2845 adapter->state = VNIC_PROBED;
2846 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2847
2848 return 0;
2849
2850 init_failed:
2851 release_sub_crqs(adapter, 1);
2852 out:
2853 adapter->state = VNIC_DOWN;
2854 return rc;
2855 }
2856
__ibmvnic_reset(struct work_struct * work)2857 static void __ibmvnic_reset(struct work_struct *work)
2858 {
2859 struct ibmvnic_adapter *adapter;
2860 unsigned int timeout = 5000;
2861 struct ibmvnic_rwi *tmprwi;
2862 bool saved_state = false;
2863 struct ibmvnic_rwi *rwi;
2864 unsigned long flags;
2865 struct device *dev;
2866 bool need_reset;
2867 int num_fails = 0;
2868 u32 reset_state;
2869 int rc = 0;
2870
2871 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2872 dev = &adapter->vdev->dev;
2873
2874 /* Wait for ibmvnic_probe() to complete. If probe is taking too long
2875 * or if another reset is in progress, defer work for now. If probe
2876 * eventually fails it will flush and terminate our work.
2877 *
2878 * Three possibilities here:
2879 * 1. Adpater being removed - just return
2880 * 2. Timed out on probe or another reset in progress - delay the work
2881 * 3. Completed probe - perform any resets in queue
2882 */
2883 if (adapter->state == VNIC_PROBING &&
2884 !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
2885 dev_err(dev, "Reset thread timed out on probe");
2886 queue_delayed_work(system_long_wq,
2887 &adapter->ibmvnic_delayed_reset,
2888 IBMVNIC_RESET_DELAY);
2889 return;
2890 }
2891
2892 /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
2893 if (adapter->state == VNIC_REMOVING)
2894 return;
2895
2896 /* ->rwi_list is stable now (no one else is removing entries) */
2897
2898 /* ibmvnic_probe() may have purged the reset queue after we were
2899 * scheduled to process a reset so there maybe no resets to process.
2900 * Before setting the ->resetting bit though, we have to make sure
2901 * that there is infact a reset to process. Otherwise we may race
2902 * with ibmvnic_open() and end up leaving the vnic down:
2903 *
2904 * __ibmvnic_reset() ibmvnic_open()
2905 * ----------------- --------------
2906 *
2907 * set ->resetting bit
2908 * find ->resetting bit is set
2909 * set ->state to IBMVNIC_OPEN (i.e
2910 * assume reset will open device)
2911 * return
2912 * find reset queue empty
2913 * return
2914 *
2915 * Neither performed vnic login/open and vnic stays down
2916 *
2917 * If we hold the lock and conditionally set the bit, either we
2918 * or ibmvnic_open() will complete the open.
2919 */
2920 need_reset = false;
2921 spin_lock(&adapter->rwi_lock);
2922 if (!list_empty(&adapter->rwi_list)) {
2923 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2924 queue_delayed_work(system_long_wq,
2925 &adapter->ibmvnic_delayed_reset,
2926 IBMVNIC_RESET_DELAY);
2927 } else {
2928 need_reset = true;
2929 }
2930 }
2931 spin_unlock(&adapter->rwi_lock);
2932
2933 if (!need_reset)
2934 return;
2935
2936 rwi = get_next_rwi(adapter);
2937 while (rwi) {
2938 spin_lock_irqsave(&adapter->state_lock, flags);
2939
2940 if (adapter->state == VNIC_REMOVING ||
2941 adapter->state == VNIC_REMOVED) {
2942 spin_unlock_irqrestore(&adapter->state_lock, flags);
2943 kfree(rwi);
2944 rc = EBUSY;
2945 break;
2946 }
2947
2948 if (!saved_state) {
2949 reset_state = adapter->state;
2950 saved_state = true;
2951 }
2952 spin_unlock_irqrestore(&adapter->state_lock, flags);
2953
2954 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2955 rtnl_lock();
2956 rc = do_passive_init(adapter);
2957 rtnl_unlock();
2958 if (!rc)
2959 netif_carrier_on(adapter->netdev);
2960 } else if (adapter->force_reset_recovery) {
2961 /* Since we are doing a hard reset now, clear the
2962 * failover_pending flag so we don't ignore any
2963 * future MOBILITY or other resets.
2964 */
2965 adapter->failover_pending = false;
2966
2967 /* Transport event occurred during previous reset */
2968 if (adapter->wait_for_reset) {
2969 /* Previous was CHANGE_PARAM; caller locked */
2970 adapter->force_reset_recovery = false;
2971 rc = do_hard_reset(adapter, rwi, reset_state);
2972 } else {
2973 rtnl_lock();
2974 adapter->force_reset_recovery = false;
2975 rc = do_hard_reset(adapter, rwi, reset_state);
2976 rtnl_unlock();
2977 }
2978 if (rc)
2979 num_fails++;
2980 else
2981 num_fails = 0;
2982
2983 /* If auto-priority-failover is enabled we can get
2984 * back to back failovers during resets, resulting
2985 * in at least two failed resets (from high-priority
2986 * backing device to low-priority one and then back)
2987 * If resets continue to fail beyond that, give the
2988 * adapter some time to settle down before retrying.
2989 */
2990 if (num_fails >= 3) {
2991 netdev_dbg(adapter->netdev,
2992 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2993 adapter_state_to_string(adapter->state),
2994 num_fails);
2995 set_current_state(TASK_UNINTERRUPTIBLE);
2996 schedule_timeout(60 * HZ);
2997 }
2998 } else {
2999 rc = do_reset(adapter, rwi, reset_state);
3000 }
3001 tmprwi = rwi;
3002 adapter->last_reset_time = jiffies;
3003
3004 if (rc)
3005 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3006
3007 rwi = get_next_rwi(adapter);
3008
3009 /*
3010 * If there are no resets queued and the previous reset failed,
3011 * the adapter would be in an undefined state. So retry the
3012 * previous reset as a hard reset.
3013 *
3014 * Else, free the previous rwi and, if there is another reset
3015 * queued, process the new reset even if previous reset failed
3016 * (the previous reset could have failed because of a fail
3017 * over for instance, so process the fail over).
3018 */
3019 if (!rwi && rc)
3020 rwi = tmprwi;
3021 else
3022 kfree(tmprwi);
3023
3024 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3025 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3026 adapter->force_reset_recovery = true;
3027 }
3028
3029 if (adapter->wait_for_reset) {
3030 adapter->reset_done_rc = rc;
3031 complete(&adapter->reset_done);
3032 }
3033
3034 clear_bit_unlock(0, &adapter->resetting);
3035
3036 netdev_dbg(adapter->netdev,
3037 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3038 adapter_state_to_string(adapter->state),
3039 adapter->force_reset_recovery,
3040 adapter->wait_for_reset);
3041 }
3042
__ibmvnic_delayed_reset(struct work_struct * work)3043 static void __ibmvnic_delayed_reset(struct work_struct *work)
3044 {
3045 struct ibmvnic_adapter *adapter;
3046
3047 adapter = container_of(work, struct ibmvnic_adapter,
3048 ibmvnic_delayed_reset.work);
3049 __ibmvnic_reset(&adapter->ibmvnic_reset);
3050 }
3051
flush_reset_queue(struct ibmvnic_adapter * adapter)3052 static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3053 {
3054 struct list_head *entry, *tmp_entry;
3055
3056 if (!list_empty(&adapter->rwi_list)) {
3057 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3058 list_del(entry);
3059 kfree(list_entry(entry, struct ibmvnic_rwi, list));
3060 }
3061 }
3062 }
3063
ibmvnic_reset(struct ibmvnic_adapter * adapter,enum ibmvnic_reset_reason reason)3064 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3065 enum ibmvnic_reset_reason reason)
3066 {
3067 struct net_device *netdev = adapter->netdev;
3068 struct ibmvnic_rwi *rwi, *tmp;
3069 unsigned long flags;
3070 int ret;
3071
3072 spin_lock_irqsave(&adapter->rwi_lock, flags);
3073
3074 /* If failover is pending don't schedule any other reset.
3075 * Instead let the failover complete. If there is already a
3076 * a failover reset scheduled, we will detect and drop the
3077 * duplicate reset when walking the ->rwi_list below.
3078 */
3079 if (adapter->state == VNIC_REMOVING ||
3080 adapter->state == VNIC_REMOVED ||
3081 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3082 ret = EBUSY;
3083 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3084 goto err;
3085 }
3086
3087 list_for_each_entry(tmp, &adapter->rwi_list, list) {
3088 if (tmp->reset_reason == reason) {
3089 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3090 reset_reason_to_string(reason));
3091 ret = EBUSY;
3092 goto err;
3093 }
3094 }
3095
3096 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3097 if (!rwi) {
3098 ret = ENOMEM;
3099 goto err;
3100 }
3101 /* if we just received a transport event,
3102 * flush reset queue and process this reset
3103 */
3104 if (adapter->force_reset_recovery)
3105 flush_reset_queue(adapter);
3106
3107 rwi->reset_reason = reason;
3108 list_add_tail(&rwi->list, &adapter->rwi_list);
3109 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3110 reset_reason_to_string(reason));
3111 queue_work(system_long_wq, &adapter->ibmvnic_reset);
3112
3113 ret = 0;
3114 err:
3115 /* ibmvnic_close() below can block, so drop the lock first */
3116 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3117
3118 if (ret == ENOMEM)
3119 ibmvnic_close(netdev);
3120
3121 return -ret;
3122 }
3123
ibmvnic_tx_timeout(struct net_device * dev,unsigned int txqueue)3124 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3125 {
3126 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3127
3128 if (test_bit(0, &adapter->resetting)) {
3129 netdev_err(adapter->netdev,
3130 "Adapter is resetting, skip timeout reset\n");
3131 return;
3132 }
3133 /* No queuing up reset until at least 5 seconds (default watchdog val)
3134 * after last reset
3135 */
3136 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3137 netdev_dbg(dev, "Not yet time to tx timeout.\n");
3138 return;
3139 }
3140 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3141 }
3142
remove_buff_from_pool(struct ibmvnic_adapter * adapter,struct ibmvnic_rx_buff * rx_buff)3143 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3144 struct ibmvnic_rx_buff *rx_buff)
3145 {
3146 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3147
3148 rx_buff->skb = NULL;
3149
3150 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3151 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3152
3153 atomic_dec(&pool->available);
3154 }
3155
ibmvnic_poll(struct napi_struct * napi,int budget)3156 static int ibmvnic_poll(struct napi_struct *napi, int budget)
3157 {
3158 struct ibmvnic_sub_crq_queue *rx_scrq;
3159 struct ibmvnic_adapter *adapter;
3160 struct net_device *netdev;
3161 int frames_processed;
3162 int scrq_num;
3163
3164 netdev = napi->dev;
3165 adapter = netdev_priv(netdev);
3166 scrq_num = (int)(napi - adapter->napi);
3167 frames_processed = 0;
3168 rx_scrq = adapter->rx_scrq[scrq_num];
3169
3170 restart_poll:
3171 while (frames_processed < budget) {
3172 struct sk_buff *skb;
3173 struct ibmvnic_rx_buff *rx_buff;
3174 union sub_crq *next;
3175 u32 length;
3176 u16 offset;
3177 u8 flags = 0;
3178
3179 if (unlikely(test_bit(0, &adapter->resetting) &&
3180 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3181 enable_scrq_irq(adapter, rx_scrq);
3182 napi_complete_done(napi, frames_processed);
3183 return frames_processed;
3184 }
3185
3186 if (!pending_scrq(adapter, rx_scrq))
3187 break;
3188 next = ibmvnic_next_scrq(adapter, rx_scrq);
3189 rx_buff = (struct ibmvnic_rx_buff *)
3190 be64_to_cpu(next->rx_comp.correlator);
3191 /* do error checking */
3192 if (next->rx_comp.rc) {
3193 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3194 be16_to_cpu(next->rx_comp.rc));
3195 /* free the entry */
3196 next->rx_comp.first = 0;
3197 dev_kfree_skb_any(rx_buff->skb);
3198 remove_buff_from_pool(adapter, rx_buff);
3199 continue;
3200 } else if (!rx_buff->skb) {
3201 /* free the entry */
3202 next->rx_comp.first = 0;
3203 remove_buff_from_pool(adapter, rx_buff);
3204 continue;
3205 }
3206
3207 length = be32_to_cpu(next->rx_comp.len);
3208 offset = be16_to_cpu(next->rx_comp.off_frame_data);
3209 flags = next->rx_comp.flags;
3210 skb = rx_buff->skb;
3211 /* load long_term_buff before copying to skb */
3212 dma_rmb();
3213 skb_copy_to_linear_data(skb, rx_buff->data + offset,
3214 length);
3215
3216 /* VLAN Header has been stripped by the system firmware and
3217 * needs to be inserted by the driver
3218 */
3219 if (adapter->rx_vlan_header_insertion &&
3220 (flags & IBMVNIC_VLAN_STRIPPED))
3221 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3222 ntohs(next->rx_comp.vlan_tci));
3223
3224 /* free the entry */
3225 next->rx_comp.first = 0;
3226 remove_buff_from_pool(adapter, rx_buff);
3227
3228 skb_put(skb, length);
3229 skb->protocol = eth_type_trans(skb, netdev);
3230 skb_record_rx_queue(skb, scrq_num);
3231
3232 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3233 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3234 skb->ip_summed = CHECKSUM_UNNECESSARY;
3235 }
3236
3237 length = skb->len;
3238 napi_gro_receive(napi, skb); /* send it up */
3239 netdev->stats.rx_packets++;
3240 netdev->stats.rx_bytes += length;
3241 adapter->rx_stats_buffers[scrq_num].packets++;
3242 adapter->rx_stats_buffers[scrq_num].bytes += length;
3243 frames_processed++;
3244 }
3245
3246 if (adapter->state != VNIC_CLOSING &&
3247 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3248 adapter->req_rx_add_entries_per_subcrq / 2) ||
3249 frames_processed < budget))
3250 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
3251 if (frames_processed < budget) {
3252 if (napi_complete_done(napi, frames_processed)) {
3253 enable_scrq_irq(adapter, rx_scrq);
3254 if (pending_scrq(adapter, rx_scrq)) {
3255 if (napi_reschedule(napi)) {
3256 disable_scrq_irq(adapter, rx_scrq);
3257 goto restart_poll;
3258 }
3259 }
3260 }
3261 }
3262 return frames_processed;
3263 }
3264
wait_for_reset(struct ibmvnic_adapter * adapter)3265 static int wait_for_reset(struct ibmvnic_adapter *adapter)
3266 {
3267 int rc, ret;
3268
3269 adapter->fallback.mtu = adapter->req_mtu;
3270 adapter->fallback.rx_queues = adapter->req_rx_queues;
3271 adapter->fallback.tx_queues = adapter->req_tx_queues;
3272 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3273 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3274
3275 reinit_completion(&adapter->reset_done);
3276 adapter->wait_for_reset = true;
3277 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3278
3279 if (rc) {
3280 ret = rc;
3281 goto out;
3282 }
3283 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3284 if (rc) {
3285 ret = -ENODEV;
3286 goto out;
3287 }
3288
3289 ret = 0;
3290 if (adapter->reset_done_rc) {
3291 ret = -EIO;
3292 adapter->desired.mtu = adapter->fallback.mtu;
3293 adapter->desired.rx_queues = adapter->fallback.rx_queues;
3294 adapter->desired.tx_queues = adapter->fallback.tx_queues;
3295 adapter->desired.rx_entries = adapter->fallback.rx_entries;
3296 adapter->desired.tx_entries = adapter->fallback.tx_entries;
3297
3298 reinit_completion(&adapter->reset_done);
3299 adapter->wait_for_reset = true;
3300 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3301 if (rc) {
3302 ret = rc;
3303 goto out;
3304 }
3305 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3306 60000);
3307 if (rc) {
3308 ret = -ENODEV;
3309 goto out;
3310 }
3311 }
3312 out:
3313 adapter->wait_for_reset = false;
3314
3315 return ret;
3316 }
3317
ibmvnic_change_mtu(struct net_device * netdev,int new_mtu)3318 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3319 {
3320 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3321
3322 adapter->desired.mtu = new_mtu + ETH_HLEN;
3323
3324 return wait_for_reset(adapter);
3325 }
3326
ibmvnic_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3327 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3328 struct net_device *dev,
3329 netdev_features_t features)
3330 {
3331 /* Some backing hardware adapters can not
3332 * handle packets with a MSS less than 224
3333 * or with only one segment.
3334 */
3335 if (skb_is_gso(skb)) {
3336 if (skb_shinfo(skb)->gso_size < 224 ||
3337 skb_shinfo(skb)->gso_segs == 1)
3338 features &= ~NETIF_F_GSO_MASK;
3339 }
3340
3341 return features;
3342 }
3343
3344 static const struct net_device_ops ibmvnic_netdev_ops = {
3345 .ndo_open = ibmvnic_open,
3346 .ndo_stop = ibmvnic_close,
3347 .ndo_start_xmit = ibmvnic_xmit,
3348 .ndo_set_rx_mode = ibmvnic_set_multi,
3349 .ndo_set_mac_address = ibmvnic_set_mac,
3350 .ndo_validate_addr = eth_validate_addr,
3351 .ndo_tx_timeout = ibmvnic_tx_timeout,
3352 .ndo_change_mtu = ibmvnic_change_mtu,
3353 .ndo_features_check = ibmvnic_features_check,
3354 };
3355
3356 /* ethtool functions */
3357
ibmvnic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)3358 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3359 struct ethtool_link_ksettings *cmd)
3360 {
3361 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3362 int rc;
3363
3364 rc = send_query_phys_parms(adapter);
3365 if (rc) {
3366 adapter->speed = SPEED_UNKNOWN;
3367 adapter->duplex = DUPLEX_UNKNOWN;
3368 }
3369 cmd->base.speed = adapter->speed;
3370 cmd->base.duplex = adapter->duplex;
3371 cmd->base.port = PORT_FIBRE;
3372 cmd->base.phy_address = 0;
3373 cmd->base.autoneg = AUTONEG_ENABLE;
3374
3375 return 0;
3376 }
3377
ibmvnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)3378 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3379 struct ethtool_drvinfo *info)
3380 {
3381 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3382
3383 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3384 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3385 strscpy(info->fw_version, adapter->fw_version,
3386 sizeof(info->fw_version));
3387 }
3388
ibmvnic_get_msglevel(struct net_device * netdev)3389 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3390 {
3391 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3392
3393 return adapter->msg_enable;
3394 }
3395
ibmvnic_set_msglevel(struct net_device * netdev,u32 data)3396 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3397 {
3398 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3399
3400 adapter->msg_enable = data;
3401 }
3402
ibmvnic_get_link(struct net_device * netdev)3403 static u32 ibmvnic_get_link(struct net_device *netdev)
3404 {
3405 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3406
3407 /* Don't need to send a query because we request a logical link up at
3408 * init and then we wait for link state indications
3409 */
3410 return adapter->logical_link_state;
3411 }
3412
ibmvnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3413 static void ibmvnic_get_ringparam(struct net_device *netdev,
3414 struct ethtool_ringparam *ring,
3415 struct kernel_ethtool_ringparam *kernel_ring,
3416 struct netlink_ext_ack *extack)
3417 {
3418 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3419
3420 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3421 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3422 ring->rx_mini_max_pending = 0;
3423 ring->rx_jumbo_max_pending = 0;
3424 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3425 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3426 ring->rx_mini_pending = 0;
3427 ring->rx_jumbo_pending = 0;
3428 }
3429
ibmvnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3430 static int ibmvnic_set_ringparam(struct net_device *netdev,
3431 struct ethtool_ringparam *ring,
3432 struct kernel_ethtool_ringparam *kernel_ring,
3433 struct netlink_ext_ack *extack)
3434 {
3435 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3436
3437 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
3438 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3439 netdev_err(netdev, "Invalid request.\n");
3440 netdev_err(netdev, "Max tx buffers = %llu\n",
3441 adapter->max_rx_add_entries_per_subcrq);
3442 netdev_err(netdev, "Max rx buffers = %llu\n",
3443 adapter->max_tx_entries_per_subcrq);
3444 return -EINVAL;
3445 }
3446
3447 adapter->desired.rx_entries = ring->rx_pending;
3448 adapter->desired.tx_entries = ring->tx_pending;
3449
3450 return wait_for_reset(adapter);
3451 }
3452
ibmvnic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)3453 static void ibmvnic_get_channels(struct net_device *netdev,
3454 struct ethtool_channels *channels)
3455 {
3456 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3457
3458 channels->max_rx = adapter->max_rx_queues;
3459 channels->max_tx = adapter->max_tx_queues;
3460 channels->max_other = 0;
3461 channels->max_combined = 0;
3462 channels->rx_count = adapter->req_rx_queues;
3463 channels->tx_count = adapter->req_tx_queues;
3464 channels->other_count = 0;
3465 channels->combined_count = 0;
3466 }
3467
ibmvnic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)3468 static int ibmvnic_set_channels(struct net_device *netdev,
3469 struct ethtool_channels *channels)
3470 {
3471 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3472
3473 adapter->desired.rx_queues = channels->rx_count;
3474 adapter->desired.tx_queues = channels->tx_count;
3475
3476 return wait_for_reset(adapter);
3477 }
3478
ibmvnic_get_strings(struct net_device * dev,u32 stringset,u8 * data)3479 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3480 {
3481 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3482 int i;
3483
3484 if (stringset != ETH_SS_STATS)
3485 return;
3486
3487 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3488 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3489
3490 for (i = 0; i < adapter->req_tx_queues; i++) {
3491 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3492 data += ETH_GSTRING_LEN;
3493
3494 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3495 data += ETH_GSTRING_LEN;
3496
3497 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
3498 data += ETH_GSTRING_LEN;
3499 }
3500
3501 for (i = 0; i < adapter->req_rx_queues; i++) {
3502 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3503 data += ETH_GSTRING_LEN;
3504
3505 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3506 data += ETH_GSTRING_LEN;
3507
3508 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3509 data += ETH_GSTRING_LEN;
3510 }
3511 }
3512
ibmvnic_get_sset_count(struct net_device * dev,int sset)3513 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3514 {
3515 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3516
3517 switch (sset) {
3518 case ETH_SS_STATS:
3519 return ARRAY_SIZE(ibmvnic_stats) +
3520 adapter->req_tx_queues * NUM_TX_STATS +
3521 adapter->req_rx_queues * NUM_RX_STATS;
3522 default:
3523 return -EOPNOTSUPP;
3524 }
3525 }
3526
ibmvnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3527 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3528 struct ethtool_stats *stats, u64 *data)
3529 {
3530 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3531 union ibmvnic_crq crq;
3532 int i, j;
3533 int rc;
3534
3535 memset(&crq, 0, sizeof(crq));
3536 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3537 crq.request_statistics.cmd = REQUEST_STATISTICS;
3538 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3539 crq.request_statistics.len =
3540 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3541
3542 /* Wait for data to be written */
3543 reinit_completion(&adapter->stats_done);
3544 rc = ibmvnic_send_crq(adapter, &crq);
3545 if (rc)
3546 return;
3547 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3548 if (rc)
3549 return;
3550
3551 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3552 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3553 (adapter, ibmvnic_stats[i].offset));
3554
3555 for (j = 0; j < adapter->req_tx_queues; j++) {
3556 data[i] = adapter->tx_stats_buffers[j].packets;
3557 i++;
3558 data[i] = adapter->tx_stats_buffers[j].bytes;
3559 i++;
3560 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3561 i++;
3562 }
3563
3564 for (j = 0; j < adapter->req_rx_queues; j++) {
3565 data[i] = adapter->rx_stats_buffers[j].packets;
3566 i++;
3567 data[i] = adapter->rx_stats_buffers[j].bytes;
3568 i++;
3569 data[i] = adapter->rx_stats_buffers[j].interrupts;
3570 i++;
3571 }
3572 }
3573
3574 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3575 .get_drvinfo = ibmvnic_get_drvinfo,
3576 .get_msglevel = ibmvnic_get_msglevel,
3577 .set_msglevel = ibmvnic_set_msglevel,
3578 .get_link = ibmvnic_get_link,
3579 .get_ringparam = ibmvnic_get_ringparam,
3580 .set_ringparam = ibmvnic_set_ringparam,
3581 .get_channels = ibmvnic_get_channels,
3582 .set_channels = ibmvnic_set_channels,
3583 .get_strings = ibmvnic_get_strings,
3584 .get_sset_count = ibmvnic_get_sset_count,
3585 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3586 .get_link_ksettings = ibmvnic_get_link_ksettings,
3587 };
3588
3589 /* Routines for managing CRQs/sCRQs */
3590
reset_one_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3591 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3592 struct ibmvnic_sub_crq_queue *scrq)
3593 {
3594 int rc;
3595
3596 if (!scrq) {
3597 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3598 return -EINVAL;
3599 }
3600
3601 if (scrq->irq) {
3602 free_irq(scrq->irq, scrq);
3603 irq_dispose_mapping(scrq->irq);
3604 scrq->irq = 0;
3605 }
3606
3607 if (scrq->msgs) {
3608 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3609 atomic_set(&scrq->used, 0);
3610 scrq->cur = 0;
3611 scrq->ind_buf.index = 0;
3612 } else {
3613 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3614 return -EINVAL;
3615 }
3616
3617 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3618 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3619 return rc;
3620 }
3621
reset_sub_crq_queues(struct ibmvnic_adapter * adapter)3622 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3623 {
3624 int i, rc;
3625
3626 if (!adapter->tx_scrq || !adapter->rx_scrq)
3627 return -EINVAL;
3628
3629 for (i = 0; i < adapter->req_tx_queues; i++) {
3630 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3631 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3632 if (rc)
3633 return rc;
3634 }
3635
3636 for (i = 0; i < adapter->req_rx_queues; i++) {
3637 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3638 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3639 if (rc)
3640 return rc;
3641 }
3642
3643 return rc;
3644 }
3645
release_sub_crq_queue(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq,bool do_h_free)3646 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3647 struct ibmvnic_sub_crq_queue *scrq,
3648 bool do_h_free)
3649 {
3650 struct device *dev = &adapter->vdev->dev;
3651 long rc;
3652
3653 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3654
3655 if (do_h_free) {
3656 /* Close the sub-crqs */
3657 do {
3658 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3659 adapter->vdev->unit_address,
3660 scrq->crq_num);
3661 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3662
3663 if (rc) {
3664 netdev_err(adapter->netdev,
3665 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3666 scrq->crq_num, rc);
3667 }
3668 }
3669
3670 dma_free_coherent(dev,
3671 IBMVNIC_IND_ARR_SZ,
3672 scrq->ind_buf.indir_arr,
3673 scrq->ind_buf.indir_dma);
3674
3675 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3676 DMA_BIDIRECTIONAL);
3677 free_pages((unsigned long)scrq->msgs, 2);
3678 kfree(scrq);
3679 }
3680
init_sub_crq_queue(struct ibmvnic_adapter * adapter)3681 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3682 *adapter)
3683 {
3684 struct device *dev = &adapter->vdev->dev;
3685 struct ibmvnic_sub_crq_queue *scrq;
3686 int rc;
3687
3688 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3689 if (!scrq)
3690 return NULL;
3691
3692 scrq->msgs =
3693 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3694 if (!scrq->msgs) {
3695 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3696 goto zero_page_failed;
3697 }
3698
3699 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3700 DMA_BIDIRECTIONAL);
3701 if (dma_mapping_error(dev, scrq->msg_token)) {
3702 dev_warn(dev, "Couldn't map crq queue messages page\n");
3703 goto map_failed;
3704 }
3705
3706 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3707 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3708
3709 if (rc == H_RESOURCE)
3710 rc = ibmvnic_reset_crq(adapter);
3711
3712 if (rc == H_CLOSED) {
3713 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3714 } else if (rc) {
3715 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3716 goto reg_failed;
3717 }
3718
3719 scrq->adapter = adapter;
3720 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3721 scrq->ind_buf.index = 0;
3722
3723 scrq->ind_buf.indir_arr =
3724 dma_alloc_coherent(dev,
3725 IBMVNIC_IND_ARR_SZ,
3726 &scrq->ind_buf.indir_dma,
3727 GFP_KERNEL);
3728
3729 if (!scrq->ind_buf.indir_arr)
3730 goto indir_failed;
3731
3732 spin_lock_init(&scrq->lock);
3733
3734 netdev_dbg(adapter->netdev,
3735 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3736 scrq->crq_num, scrq->hw_irq, scrq->irq);
3737
3738 return scrq;
3739
3740 indir_failed:
3741 do {
3742 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3743 adapter->vdev->unit_address,
3744 scrq->crq_num);
3745 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3746 reg_failed:
3747 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3748 DMA_BIDIRECTIONAL);
3749 map_failed:
3750 free_pages((unsigned long)scrq->msgs, 2);
3751 zero_page_failed:
3752 kfree(scrq);
3753
3754 return NULL;
3755 }
3756
release_sub_crqs(struct ibmvnic_adapter * adapter,bool do_h_free)3757 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3758 {
3759 int i;
3760
3761 if (adapter->tx_scrq) {
3762 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3763 if (!adapter->tx_scrq[i])
3764 continue;
3765
3766 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3767 i);
3768 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3769 if (adapter->tx_scrq[i]->irq) {
3770 free_irq(adapter->tx_scrq[i]->irq,
3771 adapter->tx_scrq[i]);
3772 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3773 adapter->tx_scrq[i]->irq = 0;
3774 }
3775
3776 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3777 do_h_free);
3778 }
3779
3780 kfree(adapter->tx_scrq);
3781 adapter->tx_scrq = NULL;
3782 adapter->num_active_tx_scrqs = 0;
3783 }
3784
3785 if (adapter->rx_scrq) {
3786 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3787 if (!adapter->rx_scrq[i])
3788 continue;
3789
3790 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3791 i);
3792 if (adapter->rx_scrq[i]->irq) {
3793 free_irq(adapter->rx_scrq[i]->irq,
3794 adapter->rx_scrq[i]);
3795 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3796 adapter->rx_scrq[i]->irq = 0;
3797 }
3798
3799 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3800 do_h_free);
3801 }
3802
3803 kfree(adapter->rx_scrq);
3804 adapter->rx_scrq = NULL;
3805 adapter->num_active_rx_scrqs = 0;
3806 }
3807 }
3808
disable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3809 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3810 struct ibmvnic_sub_crq_queue *scrq)
3811 {
3812 struct device *dev = &adapter->vdev->dev;
3813 unsigned long rc;
3814
3815 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3816 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3817 if (rc)
3818 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3819 scrq->hw_irq, rc);
3820 return rc;
3821 }
3822
3823 /* We can not use the IRQ chip EOI handler because that has the
3824 * unintended effect of changing the interrupt priority.
3825 */
ibmvnic_xics_eoi(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)3826 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
3827 {
3828 u64 val = 0xff000000 | scrq->hw_irq;
3829 unsigned long rc;
3830
3831 rc = plpar_hcall_norets(H_EOI, val);
3832 if (rc)
3833 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
3834 }
3835
3836 /* Due to a firmware bug, the hypervisor can send an interrupt to a
3837 * transmit or receive queue just prior to a partition migration.
3838 * Force an EOI after migration.
3839 */
ibmvnic_clear_pending_interrupt(struct device * dev,struct ibmvnic_sub_crq_queue * scrq)3840 static void ibmvnic_clear_pending_interrupt(struct device *dev,
3841 struct ibmvnic_sub_crq_queue *scrq)
3842 {
3843 if (!xive_enabled())
3844 ibmvnic_xics_eoi(dev, scrq);
3845 }
3846
enable_scrq_irq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3847 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3848 struct ibmvnic_sub_crq_queue *scrq)
3849 {
3850 struct device *dev = &adapter->vdev->dev;
3851 unsigned long rc;
3852
3853 if (scrq->hw_irq > 0x100000000ULL) {
3854 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3855 return 1;
3856 }
3857
3858 if (test_bit(0, &adapter->resetting) &&
3859 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3860 ibmvnic_clear_pending_interrupt(dev, scrq);
3861 }
3862
3863 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3864 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3865 if (rc)
3866 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3867 scrq->hw_irq, rc);
3868 return rc;
3869 }
3870
ibmvnic_complete_tx(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)3871 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3872 struct ibmvnic_sub_crq_queue *scrq)
3873 {
3874 struct device *dev = &adapter->vdev->dev;
3875 struct ibmvnic_tx_pool *tx_pool;
3876 struct ibmvnic_tx_buff *txbuff;
3877 struct netdev_queue *txq;
3878 union sub_crq *next;
3879 int index;
3880 int i;
3881
3882 restart_loop:
3883 while (pending_scrq(adapter, scrq)) {
3884 unsigned int pool = scrq->pool_index;
3885 int num_entries = 0;
3886 int total_bytes = 0;
3887 int num_packets = 0;
3888
3889 next = ibmvnic_next_scrq(adapter, scrq);
3890 for (i = 0; i < next->tx_comp.num_comps; i++) {
3891 index = be32_to_cpu(next->tx_comp.correlators[i]);
3892 if (index & IBMVNIC_TSO_POOL_MASK) {
3893 tx_pool = &adapter->tso_pool[pool];
3894 index &= ~IBMVNIC_TSO_POOL_MASK;
3895 } else {
3896 tx_pool = &adapter->tx_pool[pool];
3897 }
3898
3899 txbuff = &tx_pool->tx_buff[index];
3900 num_packets++;
3901 num_entries += txbuff->num_entries;
3902 if (txbuff->skb) {
3903 total_bytes += txbuff->skb->len;
3904 if (next->tx_comp.rcs[i]) {
3905 dev_err(dev, "tx error %x\n",
3906 next->tx_comp.rcs[i]);
3907 dev_kfree_skb_irq(txbuff->skb);
3908 } else {
3909 dev_consume_skb_irq(txbuff->skb);
3910 }
3911 txbuff->skb = NULL;
3912 } else {
3913 netdev_warn(adapter->netdev,
3914 "TX completion received with NULL socket buffer\n");
3915 }
3916 tx_pool->free_map[tx_pool->producer_index] = index;
3917 tx_pool->producer_index =
3918 (tx_pool->producer_index + 1) %
3919 tx_pool->num_buffers;
3920 }
3921 /* remove tx_comp scrq*/
3922 next->tx_comp.first = 0;
3923
3924 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3925 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3926
3927 if (atomic_sub_return(num_entries, &scrq->used) <=
3928 (adapter->req_tx_entries_per_subcrq / 2) &&
3929 __netif_subqueue_stopped(adapter->netdev,
3930 scrq->pool_index)) {
3931 rcu_read_lock();
3932 if (adapter->tx_queues_active) {
3933 netif_wake_subqueue(adapter->netdev,
3934 scrq->pool_index);
3935 netdev_dbg(adapter->netdev,
3936 "Started queue %d\n",
3937 scrq->pool_index);
3938 }
3939 rcu_read_unlock();
3940 }
3941 }
3942
3943 enable_scrq_irq(adapter, scrq);
3944
3945 if (pending_scrq(adapter, scrq)) {
3946 disable_scrq_irq(adapter, scrq);
3947 goto restart_loop;
3948 }
3949
3950 return 0;
3951 }
3952
ibmvnic_interrupt_tx(int irq,void * instance)3953 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3954 {
3955 struct ibmvnic_sub_crq_queue *scrq = instance;
3956 struct ibmvnic_adapter *adapter = scrq->adapter;
3957
3958 disable_scrq_irq(adapter, scrq);
3959 ibmvnic_complete_tx(adapter, scrq);
3960
3961 return IRQ_HANDLED;
3962 }
3963
ibmvnic_interrupt_rx(int irq,void * instance)3964 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3965 {
3966 struct ibmvnic_sub_crq_queue *scrq = instance;
3967 struct ibmvnic_adapter *adapter = scrq->adapter;
3968
3969 /* When booting a kdump kernel we can hit pending interrupts
3970 * prior to completing driver initialization.
3971 */
3972 if (unlikely(adapter->state != VNIC_OPEN))
3973 return IRQ_NONE;
3974
3975 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3976
3977 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3978 disable_scrq_irq(adapter, scrq);
3979 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3980 }
3981
3982 return IRQ_HANDLED;
3983 }
3984
init_sub_crq_irqs(struct ibmvnic_adapter * adapter)3985 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3986 {
3987 struct device *dev = &adapter->vdev->dev;
3988 struct ibmvnic_sub_crq_queue *scrq;
3989 int i = 0, j = 0;
3990 int rc = 0;
3991
3992 for (i = 0; i < adapter->req_tx_queues; i++) {
3993 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3994 i);
3995 scrq = adapter->tx_scrq[i];
3996 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3997
3998 if (!scrq->irq) {
3999 rc = -EINVAL;
4000 dev_err(dev, "Error mapping irq\n");
4001 goto req_tx_irq_failed;
4002 }
4003
4004 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4005 adapter->vdev->unit_address, i);
4006 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4007 0, scrq->name, scrq);
4008
4009 if (rc) {
4010 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4011 scrq->irq, rc);
4012 irq_dispose_mapping(scrq->irq);
4013 goto req_tx_irq_failed;
4014 }
4015 }
4016
4017 for (i = 0; i < adapter->req_rx_queues; i++) {
4018 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4019 i);
4020 scrq = adapter->rx_scrq[i];
4021 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4022 if (!scrq->irq) {
4023 rc = -EINVAL;
4024 dev_err(dev, "Error mapping irq\n");
4025 goto req_rx_irq_failed;
4026 }
4027 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4028 adapter->vdev->unit_address, i);
4029 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4030 0, scrq->name, scrq);
4031 if (rc) {
4032 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4033 scrq->irq, rc);
4034 irq_dispose_mapping(scrq->irq);
4035 goto req_rx_irq_failed;
4036 }
4037 }
4038 return rc;
4039
4040 req_rx_irq_failed:
4041 for (j = 0; j < i; j++) {
4042 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4043 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4044 }
4045 i = adapter->req_tx_queues;
4046 req_tx_irq_failed:
4047 for (j = 0; j < i; j++) {
4048 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4049 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4050 }
4051 release_sub_crqs(adapter, 1);
4052 return rc;
4053 }
4054
init_sub_crqs(struct ibmvnic_adapter * adapter)4055 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4056 {
4057 struct device *dev = &adapter->vdev->dev;
4058 struct ibmvnic_sub_crq_queue **allqueues;
4059 int registered_queues = 0;
4060 int total_queues;
4061 int more = 0;
4062 int i;
4063
4064 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4065
4066 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4067 if (!allqueues)
4068 return -ENOMEM;
4069
4070 for (i = 0; i < total_queues; i++) {
4071 allqueues[i] = init_sub_crq_queue(adapter);
4072 if (!allqueues[i]) {
4073 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4074 break;
4075 }
4076 registered_queues++;
4077 }
4078
4079 /* Make sure we were able to register the minimum number of queues */
4080 if (registered_queues <
4081 adapter->min_tx_queues + adapter->min_rx_queues) {
4082 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
4083 goto tx_failed;
4084 }
4085
4086 /* Distribute the failed allocated queues*/
4087 for (i = 0; i < total_queues - registered_queues + more ; i++) {
4088 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4089 switch (i % 3) {
4090 case 0:
4091 if (adapter->req_rx_queues > adapter->min_rx_queues)
4092 adapter->req_rx_queues--;
4093 else
4094 more++;
4095 break;
4096 case 1:
4097 if (adapter->req_tx_queues > adapter->min_tx_queues)
4098 adapter->req_tx_queues--;
4099 else
4100 more++;
4101 break;
4102 }
4103 }
4104
4105 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4106 sizeof(*adapter->tx_scrq), GFP_KERNEL);
4107 if (!adapter->tx_scrq)
4108 goto tx_failed;
4109
4110 for (i = 0; i < adapter->req_tx_queues; i++) {
4111 adapter->tx_scrq[i] = allqueues[i];
4112 adapter->tx_scrq[i]->pool_index = i;
4113 adapter->num_active_tx_scrqs++;
4114 }
4115
4116 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4117 sizeof(*adapter->rx_scrq), GFP_KERNEL);
4118 if (!adapter->rx_scrq)
4119 goto rx_failed;
4120
4121 for (i = 0; i < adapter->req_rx_queues; i++) {
4122 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4123 adapter->rx_scrq[i]->scrq_num = i;
4124 adapter->num_active_rx_scrqs++;
4125 }
4126
4127 kfree(allqueues);
4128 return 0;
4129
4130 rx_failed:
4131 kfree(adapter->tx_scrq);
4132 adapter->tx_scrq = NULL;
4133 tx_failed:
4134 for (i = 0; i < registered_queues; i++)
4135 release_sub_crq_queue(adapter, allqueues[i], 1);
4136 kfree(allqueues);
4137 return -ENOMEM;
4138 }
4139
send_request_cap(struct ibmvnic_adapter * adapter,int retry)4140 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4141 {
4142 struct device *dev = &adapter->vdev->dev;
4143 union ibmvnic_crq crq;
4144 int max_entries;
4145 int cap_reqs;
4146
4147 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4148 * the PROMISC flag). Initialize this count upfront. When the tasklet
4149 * receives a response to all of these, it will send the next protocol
4150 * message (QUERY_IP_OFFLOAD).
4151 */
4152 if (!(adapter->netdev->flags & IFF_PROMISC) ||
4153 adapter->promisc_supported)
4154 cap_reqs = 7;
4155 else
4156 cap_reqs = 6;
4157
4158 if (!retry) {
4159 /* Sub-CRQ entries are 32 byte long */
4160 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4161
4162 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4163
4164 if (adapter->min_tx_entries_per_subcrq > entries_page ||
4165 adapter->min_rx_add_entries_per_subcrq > entries_page) {
4166 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4167 return;
4168 }
4169
4170 if (adapter->desired.mtu)
4171 adapter->req_mtu = adapter->desired.mtu;
4172 else
4173 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4174
4175 if (!adapter->desired.tx_entries)
4176 adapter->desired.tx_entries =
4177 adapter->max_tx_entries_per_subcrq;
4178 if (!adapter->desired.rx_entries)
4179 adapter->desired.rx_entries =
4180 adapter->max_rx_add_entries_per_subcrq;
4181
4182 max_entries = IBMVNIC_LTB_SET_SIZE /
4183 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4184
4185 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4186 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4187 adapter->desired.tx_entries = max_entries;
4188 }
4189
4190 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4191 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4192 adapter->desired.rx_entries = max_entries;
4193 }
4194
4195 if (adapter->desired.tx_entries)
4196 adapter->req_tx_entries_per_subcrq =
4197 adapter->desired.tx_entries;
4198 else
4199 adapter->req_tx_entries_per_subcrq =
4200 adapter->max_tx_entries_per_subcrq;
4201
4202 if (adapter->desired.rx_entries)
4203 adapter->req_rx_add_entries_per_subcrq =
4204 adapter->desired.rx_entries;
4205 else
4206 adapter->req_rx_add_entries_per_subcrq =
4207 adapter->max_rx_add_entries_per_subcrq;
4208
4209 if (adapter->desired.tx_queues)
4210 adapter->req_tx_queues =
4211 adapter->desired.tx_queues;
4212 else
4213 adapter->req_tx_queues =
4214 adapter->opt_tx_comp_sub_queues;
4215
4216 if (adapter->desired.rx_queues)
4217 adapter->req_rx_queues =
4218 adapter->desired.rx_queues;
4219 else
4220 adapter->req_rx_queues =
4221 adapter->opt_rx_comp_queues;
4222
4223 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4224 } else {
4225 atomic_add(cap_reqs, &adapter->running_cap_crqs);
4226 }
4227 memset(&crq, 0, sizeof(crq));
4228 crq.request_capability.first = IBMVNIC_CRQ_CMD;
4229 crq.request_capability.cmd = REQUEST_CAPABILITY;
4230
4231 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4232 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4233 cap_reqs--;
4234 ibmvnic_send_crq(adapter, &crq);
4235
4236 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4237 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4238 cap_reqs--;
4239 ibmvnic_send_crq(adapter, &crq);
4240
4241 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4242 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4243 cap_reqs--;
4244 ibmvnic_send_crq(adapter, &crq);
4245
4246 crq.request_capability.capability =
4247 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4248 crq.request_capability.number =
4249 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4250 cap_reqs--;
4251 ibmvnic_send_crq(adapter, &crq);
4252
4253 crq.request_capability.capability =
4254 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4255 crq.request_capability.number =
4256 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4257 cap_reqs--;
4258 ibmvnic_send_crq(adapter, &crq);
4259
4260 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4261 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4262 cap_reqs--;
4263 ibmvnic_send_crq(adapter, &crq);
4264
4265 if (adapter->netdev->flags & IFF_PROMISC) {
4266 if (adapter->promisc_supported) {
4267 crq.request_capability.capability =
4268 cpu_to_be16(PROMISC_REQUESTED);
4269 crq.request_capability.number = cpu_to_be64(1);
4270 cap_reqs--;
4271 ibmvnic_send_crq(adapter, &crq);
4272 }
4273 } else {
4274 crq.request_capability.capability =
4275 cpu_to_be16(PROMISC_REQUESTED);
4276 crq.request_capability.number = cpu_to_be64(0);
4277 cap_reqs--;
4278 ibmvnic_send_crq(adapter, &crq);
4279 }
4280
4281 /* Keep at end to catch any discrepancy between expected and actual
4282 * CRQs sent.
4283 */
4284 WARN_ON(cap_reqs != 0);
4285 }
4286
pending_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4287 static int pending_scrq(struct ibmvnic_adapter *adapter,
4288 struct ibmvnic_sub_crq_queue *scrq)
4289 {
4290 union sub_crq *entry = &scrq->msgs[scrq->cur];
4291 int rc;
4292
4293 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4294
4295 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4296 * contents of the SCRQ descriptor
4297 */
4298 dma_rmb();
4299
4300 return rc;
4301 }
4302
ibmvnic_next_scrq(struct ibmvnic_adapter * adapter,struct ibmvnic_sub_crq_queue * scrq)4303 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4304 struct ibmvnic_sub_crq_queue *scrq)
4305 {
4306 union sub_crq *entry;
4307 unsigned long flags;
4308
4309 spin_lock_irqsave(&scrq->lock, flags);
4310 entry = &scrq->msgs[scrq->cur];
4311 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4312 if (++scrq->cur == scrq->size)
4313 scrq->cur = 0;
4314 } else {
4315 entry = NULL;
4316 }
4317 spin_unlock_irqrestore(&scrq->lock, flags);
4318
4319 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4320 * contents of the SCRQ descriptor
4321 */
4322 dma_rmb();
4323
4324 return entry;
4325 }
4326
ibmvnic_next_crq(struct ibmvnic_adapter * adapter)4327 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4328 {
4329 struct ibmvnic_crq_queue *queue = &adapter->crq;
4330 union ibmvnic_crq *crq;
4331
4332 crq = &queue->msgs[queue->cur];
4333 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4334 if (++queue->cur == queue->size)
4335 queue->cur = 0;
4336 } else {
4337 crq = NULL;
4338 }
4339
4340 return crq;
4341 }
4342
print_subcrq_error(struct device * dev,int rc,const char * func)4343 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4344 {
4345 switch (rc) {
4346 case H_PARAMETER:
4347 dev_warn_ratelimited(dev,
4348 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4349 func, rc);
4350 break;
4351 case H_CLOSED:
4352 dev_warn_ratelimited(dev,
4353 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4354 func, rc);
4355 break;
4356 default:
4357 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4358 break;
4359 }
4360 }
4361
send_subcrq_indirect(struct ibmvnic_adapter * adapter,u64 remote_handle,u64 ioba,u64 num_entries)4362 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4363 u64 remote_handle, u64 ioba, u64 num_entries)
4364 {
4365 unsigned int ua = adapter->vdev->unit_address;
4366 struct device *dev = &adapter->vdev->dev;
4367 int rc;
4368
4369 /* Make sure the hypervisor sees the complete request */
4370 dma_wmb();
4371 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4372 cpu_to_be64(remote_handle),
4373 ioba, num_entries);
4374
4375 if (rc)
4376 print_subcrq_error(dev, rc, __func__);
4377
4378 return rc;
4379 }
4380
ibmvnic_send_crq(struct ibmvnic_adapter * adapter,union ibmvnic_crq * crq)4381 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4382 union ibmvnic_crq *crq)
4383 {
4384 unsigned int ua = adapter->vdev->unit_address;
4385 struct device *dev = &adapter->vdev->dev;
4386 u64 *u64_crq = (u64 *)crq;
4387 int rc;
4388
4389 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4390 (unsigned long)cpu_to_be64(u64_crq[0]),
4391 (unsigned long)cpu_to_be64(u64_crq[1]));
4392
4393 if (!adapter->crq.active &&
4394 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4395 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4396 return -EINVAL;
4397 }
4398
4399 /* Make sure the hypervisor sees the complete request */
4400 dma_wmb();
4401
4402 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4403 cpu_to_be64(u64_crq[0]),
4404 cpu_to_be64(u64_crq[1]));
4405
4406 if (rc) {
4407 if (rc == H_CLOSED) {
4408 dev_warn(dev, "CRQ Queue closed\n");
4409 /* do not reset, report the fail, wait for passive init from server */
4410 }
4411
4412 dev_warn(dev, "Send error (rc=%d)\n", rc);
4413 }
4414
4415 return rc;
4416 }
4417
ibmvnic_send_crq_init(struct ibmvnic_adapter * adapter)4418 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4419 {
4420 struct device *dev = &adapter->vdev->dev;
4421 union ibmvnic_crq crq;
4422 int retries = 100;
4423 int rc;
4424
4425 memset(&crq, 0, sizeof(crq));
4426 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4427 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4428 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4429
4430 do {
4431 rc = ibmvnic_send_crq(adapter, &crq);
4432 if (rc != H_CLOSED)
4433 break;
4434 retries--;
4435 msleep(50);
4436
4437 } while (retries > 0);
4438
4439 if (rc) {
4440 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4441 return rc;
4442 }
4443
4444 return 0;
4445 }
4446
4447 struct vnic_login_client_data {
4448 u8 type;
4449 __be16 len;
4450 char name[];
4451 } __packed;
4452
vnic_client_data_len(struct ibmvnic_adapter * adapter)4453 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4454 {
4455 int len;
4456
4457 /* Calculate the amount of buffer space needed for the
4458 * vnic client data in the login buffer. There are four entries,
4459 * OS name, LPAR name, device name, and a null last entry.
4460 */
4461 len = 4 * sizeof(struct vnic_login_client_data);
4462 len += 6; /* "Linux" plus NULL */
4463 len += strlen(utsname()->nodename) + 1;
4464 len += strlen(adapter->netdev->name) + 1;
4465
4466 return len;
4467 }
4468
vnic_add_client_data(struct ibmvnic_adapter * adapter,struct vnic_login_client_data * vlcd)4469 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4470 struct vnic_login_client_data *vlcd)
4471 {
4472 const char *os_name = "Linux";
4473 int len;
4474
4475 /* Type 1 - LPAR OS */
4476 vlcd->type = 1;
4477 len = strlen(os_name) + 1;
4478 vlcd->len = cpu_to_be16(len);
4479 strscpy(vlcd->name, os_name, len);
4480 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4481
4482 /* Type 2 - LPAR name */
4483 vlcd->type = 2;
4484 len = strlen(utsname()->nodename) + 1;
4485 vlcd->len = cpu_to_be16(len);
4486 strscpy(vlcd->name, utsname()->nodename, len);
4487 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4488
4489 /* Type 3 - device name */
4490 vlcd->type = 3;
4491 len = strlen(adapter->netdev->name) + 1;
4492 vlcd->len = cpu_to_be16(len);
4493 strscpy(vlcd->name, adapter->netdev->name, len);
4494 }
4495
send_login(struct ibmvnic_adapter * adapter)4496 static int send_login(struct ibmvnic_adapter *adapter)
4497 {
4498 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4499 struct ibmvnic_login_buffer *login_buffer;
4500 struct device *dev = &adapter->vdev->dev;
4501 struct vnic_login_client_data *vlcd;
4502 dma_addr_t rsp_buffer_token;
4503 dma_addr_t buffer_token;
4504 size_t rsp_buffer_size;
4505 union ibmvnic_crq crq;
4506 int client_data_len;
4507 size_t buffer_size;
4508 __be64 *tx_list_p;
4509 __be64 *rx_list_p;
4510 int rc;
4511 int i;
4512
4513 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4514 netdev_err(adapter->netdev,
4515 "RX or TX queues are not allocated, device login failed\n");
4516 return -ENOMEM;
4517 }
4518
4519 release_login_buffer(adapter);
4520 release_login_rsp_buffer(adapter);
4521
4522 client_data_len = vnic_client_data_len(adapter);
4523
4524 buffer_size =
4525 sizeof(struct ibmvnic_login_buffer) +
4526 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4527 client_data_len;
4528
4529 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4530 if (!login_buffer)
4531 goto buf_alloc_failed;
4532
4533 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4534 DMA_TO_DEVICE);
4535 if (dma_mapping_error(dev, buffer_token)) {
4536 dev_err(dev, "Couldn't map login buffer\n");
4537 goto buf_map_failed;
4538 }
4539
4540 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4541 sizeof(u64) * adapter->req_tx_queues +
4542 sizeof(u64) * adapter->req_rx_queues +
4543 sizeof(u64) * adapter->req_rx_queues +
4544 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4545
4546 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4547 if (!login_rsp_buffer)
4548 goto buf_rsp_alloc_failed;
4549
4550 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4551 rsp_buffer_size, DMA_FROM_DEVICE);
4552 if (dma_mapping_error(dev, rsp_buffer_token)) {
4553 dev_err(dev, "Couldn't map login rsp buffer\n");
4554 goto buf_rsp_map_failed;
4555 }
4556
4557 adapter->login_buf = login_buffer;
4558 adapter->login_buf_token = buffer_token;
4559 adapter->login_buf_sz = buffer_size;
4560 adapter->login_rsp_buf = login_rsp_buffer;
4561 adapter->login_rsp_buf_token = rsp_buffer_token;
4562 adapter->login_rsp_buf_sz = rsp_buffer_size;
4563
4564 login_buffer->len = cpu_to_be32(buffer_size);
4565 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4566 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4567 login_buffer->off_txcomp_subcrqs =
4568 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4569 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4570 login_buffer->off_rxcomp_subcrqs =
4571 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4572 sizeof(u64) * adapter->req_tx_queues);
4573 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4574 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4575
4576 tx_list_p = (__be64 *)((char *)login_buffer +
4577 sizeof(struct ibmvnic_login_buffer));
4578 rx_list_p = (__be64 *)((char *)login_buffer +
4579 sizeof(struct ibmvnic_login_buffer) +
4580 sizeof(u64) * adapter->req_tx_queues);
4581
4582 for (i = 0; i < adapter->req_tx_queues; i++) {
4583 if (adapter->tx_scrq[i]) {
4584 tx_list_p[i] =
4585 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4586 }
4587 }
4588
4589 for (i = 0; i < adapter->req_rx_queues; i++) {
4590 if (adapter->rx_scrq[i]) {
4591 rx_list_p[i] =
4592 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4593 }
4594 }
4595
4596 /* Insert vNIC login client data */
4597 vlcd = (struct vnic_login_client_data *)
4598 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4599 login_buffer->client_data_offset =
4600 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4601 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4602
4603 vnic_add_client_data(adapter, vlcd);
4604
4605 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4606 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4607 netdev_dbg(adapter->netdev, "%016lx\n",
4608 ((unsigned long *)(adapter->login_buf))[i]);
4609 }
4610
4611 memset(&crq, 0, sizeof(crq));
4612 crq.login.first = IBMVNIC_CRQ_CMD;
4613 crq.login.cmd = LOGIN;
4614 crq.login.ioba = cpu_to_be32(buffer_token);
4615 crq.login.len = cpu_to_be32(buffer_size);
4616
4617 adapter->login_pending = true;
4618 rc = ibmvnic_send_crq(adapter, &crq);
4619 if (rc) {
4620 adapter->login_pending = false;
4621 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4622 goto buf_rsp_map_failed;
4623 }
4624
4625 return 0;
4626
4627 buf_rsp_map_failed:
4628 kfree(login_rsp_buffer);
4629 adapter->login_rsp_buf = NULL;
4630 buf_rsp_alloc_failed:
4631 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4632 buf_map_failed:
4633 kfree(login_buffer);
4634 adapter->login_buf = NULL;
4635 buf_alloc_failed:
4636 return -ENOMEM;
4637 }
4638
send_request_map(struct ibmvnic_adapter * adapter,dma_addr_t addr,u32 len,u8 map_id)4639 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4640 u32 len, u8 map_id)
4641 {
4642 union ibmvnic_crq crq;
4643
4644 memset(&crq, 0, sizeof(crq));
4645 crq.request_map.first = IBMVNIC_CRQ_CMD;
4646 crq.request_map.cmd = REQUEST_MAP;
4647 crq.request_map.map_id = map_id;
4648 crq.request_map.ioba = cpu_to_be32(addr);
4649 crq.request_map.len = cpu_to_be32(len);
4650 return ibmvnic_send_crq(adapter, &crq);
4651 }
4652
send_request_unmap(struct ibmvnic_adapter * adapter,u8 map_id)4653 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4654 {
4655 union ibmvnic_crq crq;
4656
4657 memset(&crq, 0, sizeof(crq));
4658 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4659 crq.request_unmap.cmd = REQUEST_UNMAP;
4660 crq.request_unmap.map_id = map_id;
4661 return ibmvnic_send_crq(adapter, &crq);
4662 }
4663
send_query_map(struct ibmvnic_adapter * adapter)4664 static void send_query_map(struct ibmvnic_adapter *adapter)
4665 {
4666 union ibmvnic_crq crq;
4667
4668 memset(&crq, 0, sizeof(crq));
4669 crq.query_map.first = IBMVNIC_CRQ_CMD;
4670 crq.query_map.cmd = QUERY_MAP;
4671 ibmvnic_send_crq(adapter, &crq);
4672 }
4673
4674 /* Send a series of CRQs requesting various capabilities of the VNIC server */
send_query_cap(struct ibmvnic_adapter * adapter)4675 static void send_query_cap(struct ibmvnic_adapter *adapter)
4676 {
4677 union ibmvnic_crq crq;
4678 int cap_reqs;
4679
4680 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
4681 * upfront. When the tasklet receives a response to all of these, it
4682 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4683 */
4684 cap_reqs = 25;
4685
4686 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4687
4688 memset(&crq, 0, sizeof(crq));
4689 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4690 crq.query_capability.cmd = QUERY_CAPABILITY;
4691
4692 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4693 ibmvnic_send_crq(adapter, &crq);
4694 cap_reqs--;
4695
4696 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4697 ibmvnic_send_crq(adapter, &crq);
4698 cap_reqs--;
4699
4700 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4701 ibmvnic_send_crq(adapter, &crq);
4702 cap_reqs--;
4703
4704 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4705 ibmvnic_send_crq(adapter, &crq);
4706 cap_reqs--;
4707
4708 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4709 ibmvnic_send_crq(adapter, &crq);
4710 cap_reqs--;
4711
4712 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4713 ibmvnic_send_crq(adapter, &crq);
4714 cap_reqs--;
4715
4716 crq.query_capability.capability =
4717 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4718 ibmvnic_send_crq(adapter, &crq);
4719 cap_reqs--;
4720
4721 crq.query_capability.capability =
4722 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4723 ibmvnic_send_crq(adapter, &crq);
4724 cap_reqs--;
4725
4726 crq.query_capability.capability =
4727 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4728 ibmvnic_send_crq(adapter, &crq);
4729 cap_reqs--;
4730
4731 crq.query_capability.capability =
4732 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4733 ibmvnic_send_crq(adapter, &crq);
4734 cap_reqs--;
4735
4736 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4737 ibmvnic_send_crq(adapter, &crq);
4738 cap_reqs--;
4739
4740 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4741 ibmvnic_send_crq(adapter, &crq);
4742 cap_reqs--;
4743
4744 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4745 ibmvnic_send_crq(adapter, &crq);
4746 cap_reqs--;
4747
4748 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4749 ibmvnic_send_crq(adapter, &crq);
4750 cap_reqs--;
4751
4752 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4753 ibmvnic_send_crq(adapter, &crq);
4754 cap_reqs--;
4755
4756 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4757 ibmvnic_send_crq(adapter, &crq);
4758 cap_reqs--;
4759
4760 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4761 ibmvnic_send_crq(adapter, &crq);
4762 cap_reqs--;
4763
4764 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4765 ibmvnic_send_crq(adapter, &crq);
4766 cap_reqs--;
4767
4768 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4769 ibmvnic_send_crq(adapter, &crq);
4770 cap_reqs--;
4771
4772 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4773 ibmvnic_send_crq(adapter, &crq);
4774 cap_reqs--;
4775
4776 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4777 ibmvnic_send_crq(adapter, &crq);
4778 cap_reqs--;
4779
4780 crq.query_capability.capability =
4781 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4782 ibmvnic_send_crq(adapter, &crq);
4783 cap_reqs--;
4784
4785 crq.query_capability.capability =
4786 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4787 ibmvnic_send_crq(adapter, &crq);
4788 cap_reqs--;
4789
4790 crq.query_capability.capability =
4791 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4792 ibmvnic_send_crq(adapter, &crq);
4793 cap_reqs--;
4794
4795 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4796
4797 ibmvnic_send_crq(adapter, &crq);
4798 cap_reqs--;
4799
4800 /* Keep at end to catch any discrepancy between expected and actual
4801 * CRQs sent.
4802 */
4803 WARN_ON(cap_reqs != 0);
4804 }
4805
send_query_ip_offload(struct ibmvnic_adapter * adapter)4806 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4807 {
4808 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4809 struct device *dev = &adapter->vdev->dev;
4810 union ibmvnic_crq crq;
4811
4812 adapter->ip_offload_tok =
4813 dma_map_single(dev,
4814 &adapter->ip_offload_buf,
4815 buf_sz,
4816 DMA_FROM_DEVICE);
4817
4818 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4819 if (!firmware_has_feature(FW_FEATURE_CMO))
4820 dev_err(dev, "Couldn't map offload buffer\n");
4821 return;
4822 }
4823
4824 memset(&crq, 0, sizeof(crq));
4825 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4826 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4827 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4828 crq.query_ip_offload.ioba =
4829 cpu_to_be32(adapter->ip_offload_tok);
4830
4831 ibmvnic_send_crq(adapter, &crq);
4832 }
4833
send_control_ip_offload(struct ibmvnic_adapter * adapter)4834 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4835 {
4836 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4837 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4838 struct device *dev = &adapter->vdev->dev;
4839 netdev_features_t old_hw_features = 0;
4840 union ibmvnic_crq crq;
4841
4842 adapter->ip_offload_ctrl_tok =
4843 dma_map_single(dev,
4844 ctrl_buf,
4845 sizeof(adapter->ip_offload_ctrl),
4846 DMA_TO_DEVICE);
4847
4848 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4849 dev_err(dev, "Couldn't map ip offload control buffer\n");
4850 return;
4851 }
4852
4853 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4854 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4855 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4856 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4857 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4858 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4859 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4860 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4861 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4862 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4863
4864 /* large_rx disabled for now, additional features needed */
4865 ctrl_buf->large_rx_ipv4 = 0;
4866 ctrl_buf->large_rx_ipv6 = 0;
4867
4868 if (adapter->state != VNIC_PROBING) {
4869 old_hw_features = adapter->netdev->hw_features;
4870 adapter->netdev->hw_features = 0;
4871 }
4872
4873 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4874
4875 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4876 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4877
4878 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4879 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4880
4881 if ((adapter->netdev->features &
4882 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4883 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4884
4885 if (buf->large_tx_ipv4)
4886 adapter->netdev->hw_features |= NETIF_F_TSO;
4887 if (buf->large_tx_ipv6)
4888 adapter->netdev->hw_features |= NETIF_F_TSO6;
4889
4890 if (adapter->state == VNIC_PROBING) {
4891 adapter->netdev->features |= adapter->netdev->hw_features;
4892 } else if (old_hw_features != adapter->netdev->hw_features) {
4893 netdev_features_t tmp = 0;
4894
4895 /* disable features no longer supported */
4896 adapter->netdev->features &= adapter->netdev->hw_features;
4897 /* turn on features now supported if previously enabled */
4898 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4899 adapter->netdev->hw_features;
4900 adapter->netdev->features |=
4901 tmp & adapter->netdev->wanted_features;
4902 }
4903
4904 memset(&crq, 0, sizeof(crq));
4905 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4906 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4907 crq.control_ip_offload.len =
4908 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4909 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4910 ibmvnic_send_crq(adapter, &crq);
4911 }
4912
handle_vpd_size_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)4913 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4914 struct ibmvnic_adapter *adapter)
4915 {
4916 struct device *dev = &adapter->vdev->dev;
4917
4918 if (crq->get_vpd_size_rsp.rc.code) {
4919 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4920 crq->get_vpd_size_rsp.rc.code);
4921 complete(&adapter->fw_done);
4922 return;
4923 }
4924
4925 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4926 complete(&adapter->fw_done);
4927 }
4928
handle_vpd_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)4929 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4930 struct ibmvnic_adapter *adapter)
4931 {
4932 struct device *dev = &adapter->vdev->dev;
4933 unsigned char *substr = NULL;
4934 u8 fw_level_len = 0;
4935
4936 memset(adapter->fw_version, 0, 32);
4937
4938 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4939 DMA_FROM_DEVICE);
4940
4941 if (crq->get_vpd_rsp.rc.code) {
4942 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4943 crq->get_vpd_rsp.rc.code);
4944 goto complete;
4945 }
4946
4947 /* get the position of the firmware version info
4948 * located after the ASCII 'RM' substring in the buffer
4949 */
4950 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4951 if (!substr) {
4952 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4953 goto complete;
4954 }
4955
4956 /* get length of firmware level ASCII substring */
4957 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4958 fw_level_len = *(substr + 2);
4959 } else {
4960 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4961 goto complete;
4962 }
4963
4964 /* copy firmware version string from vpd into adapter */
4965 if ((substr + 3 + fw_level_len) <
4966 (adapter->vpd->buff + adapter->vpd->len)) {
4967 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4968 } else {
4969 dev_info(dev, "FW substr extrapolated VPD buff\n");
4970 }
4971
4972 complete:
4973 if (adapter->fw_version[0] == '\0')
4974 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4975 complete(&adapter->fw_done);
4976 }
4977
handle_query_ip_offload_rsp(struct ibmvnic_adapter * adapter)4978 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4979 {
4980 struct device *dev = &adapter->vdev->dev;
4981 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4982 int i;
4983
4984 dma_unmap_single(dev, adapter->ip_offload_tok,
4985 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4986
4987 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4988 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4989 netdev_dbg(adapter->netdev, "%016lx\n",
4990 ((unsigned long *)(buf))[i]);
4991
4992 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4993 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4994 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4995 buf->tcp_ipv4_chksum);
4996 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4997 buf->tcp_ipv6_chksum);
4998 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4999 buf->udp_ipv4_chksum);
5000 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5001 buf->udp_ipv6_chksum);
5002 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5003 buf->large_tx_ipv4);
5004 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5005 buf->large_tx_ipv6);
5006 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5007 buf->large_rx_ipv4);
5008 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5009 buf->large_rx_ipv6);
5010 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5011 buf->max_ipv4_header_size);
5012 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5013 buf->max_ipv6_header_size);
5014 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5015 buf->max_tcp_header_size);
5016 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5017 buf->max_udp_header_size);
5018 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5019 buf->max_large_tx_size);
5020 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5021 buf->max_large_rx_size);
5022 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5023 buf->ipv6_extension_header);
5024 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5025 buf->tcp_pseudosum_req);
5026 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5027 buf->num_ipv6_ext_headers);
5028 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5029 buf->off_ipv6_ext_headers);
5030
5031 send_control_ip_offload(adapter);
5032 }
5033
ibmvnic_fw_err_cause(u16 cause)5034 static const char *ibmvnic_fw_err_cause(u16 cause)
5035 {
5036 switch (cause) {
5037 case ADAPTER_PROBLEM:
5038 return "adapter problem";
5039 case BUS_PROBLEM:
5040 return "bus problem";
5041 case FW_PROBLEM:
5042 return "firmware problem";
5043 case DD_PROBLEM:
5044 return "device driver problem";
5045 case EEH_RECOVERY:
5046 return "EEH recovery";
5047 case FW_UPDATED:
5048 return "firmware updated";
5049 case LOW_MEMORY:
5050 return "low Memory";
5051 default:
5052 return "unknown";
5053 }
5054 }
5055
handle_error_indication(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5056 static void handle_error_indication(union ibmvnic_crq *crq,
5057 struct ibmvnic_adapter *adapter)
5058 {
5059 struct device *dev = &adapter->vdev->dev;
5060 u16 cause;
5061
5062 cause = be16_to_cpu(crq->error_indication.error_cause);
5063
5064 dev_warn_ratelimited(dev,
5065 "Firmware reports %serror, cause: %s. Starting recovery...\n",
5066 crq->error_indication.flags
5067 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5068 ibmvnic_fw_err_cause(cause));
5069
5070 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5071 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5072 else
5073 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5074 }
5075
handle_change_mac_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5076 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5077 struct ibmvnic_adapter *adapter)
5078 {
5079 struct net_device *netdev = adapter->netdev;
5080 struct device *dev = &adapter->vdev->dev;
5081 long rc;
5082
5083 rc = crq->change_mac_addr_rsp.rc.code;
5084 if (rc) {
5085 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5086 goto out;
5087 }
5088 /* crq->change_mac_addr.mac_addr is the requested one
5089 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5090 */
5091 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5092 ether_addr_copy(adapter->mac_addr,
5093 &crq->change_mac_addr_rsp.mac_addr[0]);
5094 out:
5095 complete(&adapter->fw_done);
5096 return rc;
5097 }
5098
handle_request_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5099 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5100 struct ibmvnic_adapter *adapter)
5101 {
5102 struct device *dev = &adapter->vdev->dev;
5103 u64 *req_value;
5104 char *name;
5105
5106 atomic_dec(&adapter->running_cap_crqs);
5107 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5108 atomic_read(&adapter->running_cap_crqs));
5109 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5110 case REQ_TX_QUEUES:
5111 req_value = &adapter->req_tx_queues;
5112 name = "tx";
5113 break;
5114 case REQ_RX_QUEUES:
5115 req_value = &adapter->req_rx_queues;
5116 name = "rx";
5117 break;
5118 case REQ_RX_ADD_QUEUES:
5119 req_value = &adapter->req_rx_add_queues;
5120 name = "rx_add";
5121 break;
5122 case REQ_TX_ENTRIES_PER_SUBCRQ:
5123 req_value = &adapter->req_tx_entries_per_subcrq;
5124 name = "tx_entries_per_subcrq";
5125 break;
5126 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5127 req_value = &adapter->req_rx_add_entries_per_subcrq;
5128 name = "rx_add_entries_per_subcrq";
5129 break;
5130 case REQ_MTU:
5131 req_value = &adapter->req_mtu;
5132 name = "mtu";
5133 break;
5134 case PROMISC_REQUESTED:
5135 req_value = &adapter->promisc;
5136 name = "promisc";
5137 break;
5138 default:
5139 dev_err(dev, "Got invalid cap request rsp %d\n",
5140 crq->request_capability.capability);
5141 return;
5142 }
5143
5144 switch (crq->request_capability_rsp.rc.code) {
5145 case SUCCESS:
5146 break;
5147 case PARTIALSUCCESS:
5148 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5149 *req_value,
5150 (long)be64_to_cpu(crq->request_capability_rsp.number),
5151 name);
5152
5153 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5154 REQ_MTU) {
5155 pr_err("mtu of %llu is not supported. Reverting.\n",
5156 *req_value);
5157 *req_value = adapter->fallback.mtu;
5158 } else {
5159 *req_value =
5160 be64_to_cpu(crq->request_capability_rsp.number);
5161 }
5162
5163 send_request_cap(adapter, 1);
5164 return;
5165 default:
5166 dev_err(dev, "Error %d in request cap rsp\n",
5167 crq->request_capability_rsp.rc.code);
5168 return;
5169 }
5170
5171 /* Done receiving requested capabilities, query IP offload support */
5172 if (atomic_read(&adapter->running_cap_crqs) == 0)
5173 send_query_ip_offload(adapter);
5174 }
5175
handle_login_rsp(union ibmvnic_crq * login_rsp_crq,struct ibmvnic_adapter * adapter)5176 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5177 struct ibmvnic_adapter *adapter)
5178 {
5179 struct device *dev = &adapter->vdev->dev;
5180 struct net_device *netdev = adapter->netdev;
5181 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5182 struct ibmvnic_login_buffer *login = adapter->login_buf;
5183 u64 *tx_handle_array;
5184 u64 *rx_handle_array;
5185 int num_tx_pools;
5186 int num_rx_pools;
5187 u64 *size_array;
5188 int i;
5189
5190 /* CHECK: Test/set of login_pending does not need to be atomic
5191 * because only ibmvnic_tasklet tests/clears this.
5192 */
5193 if (!adapter->login_pending) {
5194 netdev_warn(netdev, "Ignoring unexpected login response\n");
5195 return 0;
5196 }
5197 adapter->login_pending = false;
5198
5199 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
5200 DMA_TO_DEVICE);
5201 dma_unmap_single(dev, adapter->login_rsp_buf_token,
5202 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
5203
5204 /* If the number of queues requested can't be allocated by the
5205 * server, the login response will return with code 1. We will need
5206 * to resend the login buffer with fewer queues requested.
5207 */
5208 if (login_rsp_crq->generic.rc.code) {
5209 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5210 complete(&adapter->init_done);
5211 return 0;
5212 }
5213
5214 if (adapter->failover_pending) {
5215 adapter->init_done_rc = -EAGAIN;
5216 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5217 complete(&adapter->init_done);
5218 /* login response buffer will be released on reset */
5219 return 0;
5220 }
5221
5222 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5223
5224 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5225 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5226 netdev_dbg(adapter->netdev, "%016lx\n",
5227 ((unsigned long *)(adapter->login_rsp_buf))[i]);
5228 }
5229
5230 /* Sanity checks */
5231 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5232 (be32_to_cpu(login->num_rxcomp_subcrqs) *
5233 adapter->req_rx_add_queues !=
5234 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5235 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5236 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5237 return -EIO;
5238 }
5239 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5240 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5241 /* variable buffer sizes are not supported, so just read the
5242 * first entry.
5243 */
5244 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5245
5246 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5247 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5248
5249 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5250 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5251 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5252 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5253
5254 for (i = 0; i < num_tx_pools; i++)
5255 adapter->tx_scrq[i]->handle = tx_handle_array[i];
5256
5257 for (i = 0; i < num_rx_pools; i++)
5258 adapter->rx_scrq[i]->handle = rx_handle_array[i];
5259
5260 adapter->num_active_tx_scrqs = num_tx_pools;
5261 adapter->num_active_rx_scrqs = num_rx_pools;
5262 release_login_rsp_buffer(adapter);
5263 release_login_buffer(adapter);
5264 complete(&adapter->init_done);
5265
5266 return 0;
5267 }
5268
handle_request_unmap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5269 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5270 struct ibmvnic_adapter *adapter)
5271 {
5272 struct device *dev = &adapter->vdev->dev;
5273 long rc;
5274
5275 rc = crq->request_unmap_rsp.rc.code;
5276 if (rc)
5277 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5278 }
5279
handle_query_map_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5280 static void handle_query_map_rsp(union ibmvnic_crq *crq,
5281 struct ibmvnic_adapter *adapter)
5282 {
5283 struct net_device *netdev = adapter->netdev;
5284 struct device *dev = &adapter->vdev->dev;
5285 long rc;
5286
5287 rc = crq->query_map_rsp.rc.code;
5288 if (rc) {
5289 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5290 return;
5291 }
5292 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5293 crq->query_map_rsp.page_size,
5294 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5295 __be32_to_cpu(crq->query_map_rsp.free_pages));
5296 }
5297
handle_query_cap_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5298 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5299 struct ibmvnic_adapter *adapter)
5300 {
5301 struct net_device *netdev = adapter->netdev;
5302 struct device *dev = &adapter->vdev->dev;
5303 long rc;
5304
5305 atomic_dec(&adapter->running_cap_crqs);
5306 netdev_dbg(netdev, "Outstanding queries: %d\n",
5307 atomic_read(&adapter->running_cap_crqs));
5308 rc = crq->query_capability.rc.code;
5309 if (rc) {
5310 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5311 goto out;
5312 }
5313
5314 switch (be16_to_cpu(crq->query_capability.capability)) {
5315 case MIN_TX_QUEUES:
5316 adapter->min_tx_queues =
5317 be64_to_cpu(crq->query_capability.number);
5318 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5319 adapter->min_tx_queues);
5320 break;
5321 case MIN_RX_QUEUES:
5322 adapter->min_rx_queues =
5323 be64_to_cpu(crq->query_capability.number);
5324 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5325 adapter->min_rx_queues);
5326 break;
5327 case MIN_RX_ADD_QUEUES:
5328 adapter->min_rx_add_queues =
5329 be64_to_cpu(crq->query_capability.number);
5330 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5331 adapter->min_rx_add_queues);
5332 break;
5333 case MAX_TX_QUEUES:
5334 adapter->max_tx_queues =
5335 be64_to_cpu(crq->query_capability.number);
5336 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5337 adapter->max_tx_queues);
5338 break;
5339 case MAX_RX_QUEUES:
5340 adapter->max_rx_queues =
5341 be64_to_cpu(crq->query_capability.number);
5342 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5343 adapter->max_rx_queues);
5344 break;
5345 case MAX_RX_ADD_QUEUES:
5346 adapter->max_rx_add_queues =
5347 be64_to_cpu(crq->query_capability.number);
5348 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5349 adapter->max_rx_add_queues);
5350 break;
5351 case MIN_TX_ENTRIES_PER_SUBCRQ:
5352 adapter->min_tx_entries_per_subcrq =
5353 be64_to_cpu(crq->query_capability.number);
5354 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5355 adapter->min_tx_entries_per_subcrq);
5356 break;
5357 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5358 adapter->min_rx_add_entries_per_subcrq =
5359 be64_to_cpu(crq->query_capability.number);
5360 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5361 adapter->min_rx_add_entries_per_subcrq);
5362 break;
5363 case MAX_TX_ENTRIES_PER_SUBCRQ:
5364 adapter->max_tx_entries_per_subcrq =
5365 be64_to_cpu(crq->query_capability.number);
5366 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5367 adapter->max_tx_entries_per_subcrq);
5368 break;
5369 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5370 adapter->max_rx_add_entries_per_subcrq =
5371 be64_to_cpu(crq->query_capability.number);
5372 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5373 adapter->max_rx_add_entries_per_subcrq);
5374 break;
5375 case TCP_IP_OFFLOAD:
5376 adapter->tcp_ip_offload =
5377 be64_to_cpu(crq->query_capability.number);
5378 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5379 adapter->tcp_ip_offload);
5380 break;
5381 case PROMISC_SUPPORTED:
5382 adapter->promisc_supported =
5383 be64_to_cpu(crq->query_capability.number);
5384 netdev_dbg(netdev, "promisc_supported = %lld\n",
5385 adapter->promisc_supported);
5386 break;
5387 case MIN_MTU:
5388 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5389 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5390 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5391 break;
5392 case MAX_MTU:
5393 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5394 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5395 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5396 break;
5397 case MAX_MULTICAST_FILTERS:
5398 adapter->max_multicast_filters =
5399 be64_to_cpu(crq->query_capability.number);
5400 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5401 adapter->max_multicast_filters);
5402 break;
5403 case VLAN_HEADER_INSERTION:
5404 adapter->vlan_header_insertion =
5405 be64_to_cpu(crq->query_capability.number);
5406 if (adapter->vlan_header_insertion)
5407 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5408 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5409 adapter->vlan_header_insertion);
5410 break;
5411 case RX_VLAN_HEADER_INSERTION:
5412 adapter->rx_vlan_header_insertion =
5413 be64_to_cpu(crq->query_capability.number);
5414 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5415 adapter->rx_vlan_header_insertion);
5416 break;
5417 case MAX_TX_SG_ENTRIES:
5418 adapter->max_tx_sg_entries =
5419 be64_to_cpu(crq->query_capability.number);
5420 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5421 adapter->max_tx_sg_entries);
5422 break;
5423 case RX_SG_SUPPORTED:
5424 adapter->rx_sg_supported =
5425 be64_to_cpu(crq->query_capability.number);
5426 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5427 adapter->rx_sg_supported);
5428 break;
5429 case OPT_TX_COMP_SUB_QUEUES:
5430 adapter->opt_tx_comp_sub_queues =
5431 be64_to_cpu(crq->query_capability.number);
5432 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5433 adapter->opt_tx_comp_sub_queues);
5434 break;
5435 case OPT_RX_COMP_QUEUES:
5436 adapter->opt_rx_comp_queues =
5437 be64_to_cpu(crq->query_capability.number);
5438 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5439 adapter->opt_rx_comp_queues);
5440 break;
5441 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5442 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5443 be64_to_cpu(crq->query_capability.number);
5444 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5445 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5446 break;
5447 case OPT_TX_ENTRIES_PER_SUBCRQ:
5448 adapter->opt_tx_entries_per_subcrq =
5449 be64_to_cpu(crq->query_capability.number);
5450 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5451 adapter->opt_tx_entries_per_subcrq);
5452 break;
5453 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5454 adapter->opt_rxba_entries_per_subcrq =
5455 be64_to_cpu(crq->query_capability.number);
5456 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5457 adapter->opt_rxba_entries_per_subcrq);
5458 break;
5459 case TX_RX_DESC_REQ:
5460 adapter->tx_rx_desc_req = crq->query_capability.number;
5461 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5462 adapter->tx_rx_desc_req);
5463 break;
5464
5465 default:
5466 netdev_err(netdev, "Got invalid cap rsp %d\n",
5467 crq->query_capability.capability);
5468 }
5469
5470 out:
5471 if (atomic_read(&adapter->running_cap_crqs) == 0)
5472 send_request_cap(adapter, 0);
5473 }
5474
send_query_phys_parms(struct ibmvnic_adapter * adapter)5475 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5476 {
5477 union ibmvnic_crq crq;
5478 int rc;
5479
5480 memset(&crq, 0, sizeof(crq));
5481 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5482 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5483
5484 mutex_lock(&adapter->fw_lock);
5485 adapter->fw_done_rc = 0;
5486 reinit_completion(&adapter->fw_done);
5487
5488 rc = ibmvnic_send_crq(adapter, &crq);
5489 if (rc) {
5490 mutex_unlock(&adapter->fw_lock);
5491 return rc;
5492 }
5493
5494 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5495 if (rc) {
5496 mutex_unlock(&adapter->fw_lock);
5497 return rc;
5498 }
5499
5500 mutex_unlock(&adapter->fw_lock);
5501 return adapter->fw_done_rc ? -EIO : 0;
5502 }
5503
handle_query_phys_parms_rsp(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5504 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5505 struct ibmvnic_adapter *adapter)
5506 {
5507 struct net_device *netdev = adapter->netdev;
5508 int rc;
5509 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5510
5511 rc = crq->query_phys_parms_rsp.rc.code;
5512 if (rc) {
5513 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5514 return rc;
5515 }
5516 switch (rspeed) {
5517 case IBMVNIC_10MBPS:
5518 adapter->speed = SPEED_10;
5519 break;
5520 case IBMVNIC_100MBPS:
5521 adapter->speed = SPEED_100;
5522 break;
5523 case IBMVNIC_1GBPS:
5524 adapter->speed = SPEED_1000;
5525 break;
5526 case IBMVNIC_10GBPS:
5527 adapter->speed = SPEED_10000;
5528 break;
5529 case IBMVNIC_25GBPS:
5530 adapter->speed = SPEED_25000;
5531 break;
5532 case IBMVNIC_40GBPS:
5533 adapter->speed = SPEED_40000;
5534 break;
5535 case IBMVNIC_50GBPS:
5536 adapter->speed = SPEED_50000;
5537 break;
5538 case IBMVNIC_100GBPS:
5539 adapter->speed = SPEED_100000;
5540 break;
5541 case IBMVNIC_200GBPS:
5542 adapter->speed = SPEED_200000;
5543 break;
5544 default:
5545 if (netif_carrier_ok(netdev))
5546 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5547 adapter->speed = SPEED_UNKNOWN;
5548 }
5549 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5550 adapter->duplex = DUPLEX_FULL;
5551 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5552 adapter->duplex = DUPLEX_HALF;
5553 else
5554 adapter->duplex = DUPLEX_UNKNOWN;
5555
5556 return rc;
5557 }
5558
ibmvnic_handle_crq(union ibmvnic_crq * crq,struct ibmvnic_adapter * adapter)5559 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5560 struct ibmvnic_adapter *adapter)
5561 {
5562 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5563 struct net_device *netdev = adapter->netdev;
5564 struct device *dev = &adapter->vdev->dev;
5565 u64 *u64_crq = (u64 *)crq;
5566 long rc;
5567
5568 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5569 (unsigned long)cpu_to_be64(u64_crq[0]),
5570 (unsigned long)cpu_to_be64(u64_crq[1]));
5571 switch (gen_crq->first) {
5572 case IBMVNIC_CRQ_INIT_RSP:
5573 switch (gen_crq->cmd) {
5574 case IBMVNIC_CRQ_INIT:
5575 dev_info(dev, "Partner initialized\n");
5576 adapter->from_passive_init = true;
5577 /* Discard any stale login responses from prev reset.
5578 * CHECK: should we clear even on INIT_COMPLETE?
5579 */
5580 adapter->login_pending = false;
5581
5582 if (adapter->state == VNIC_DOWN)
5583 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5584 else
5585 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5586
5587 if (rc && rc != -EBUSY) {
5588 /* We were unable to schedule the failover
5589 * reset either because the adapter was still
5590 * probing (eg: during kexec) or we could not
5591 * allocate memory. Clear the failover_pending
5592 * flag since no one else will. We ignore
5593 * EBUSY because it means either FAILOVER reset
5594 * is already scheduled or the adapter is
5595 * being removed.
5596 */
5597 netdev_err(netdev,
5598 "Error %ld scheduling failover reset\n",
5599 rc);
5600 adapter->failover_pending = false;
5601 }
5602
5603 if (!completion_done(&adapter->init_done)) {
5604 if (!adapter->init_done_rc)
5605 adapter->init_done_rc = -EAGAIN;
5606 complete(&adapter->init_done);
5607 }
5608
5609 break;
5610 case IBMVNIC_CRQ_INIT_COMPLETE:
5611 dev_info(dev, "Partner initialization complete\n");
5612 adapter->crq.active = true;
5613 send_version_xchg(adapter);
5614 break;
5615 default:
5616 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5617 }
5618 return;
5619 case IBMVNIC_CRQ_XPORT_EVENT:
5620 netif_carrier_off(netdev);
5621 adapter->crq.active = false;
5622 /* terminate any thread waiting for a response
5623 * from the device
5624 */
5625 if (!completion_done(&adapter->fw_done)) {
5626 adapter->fw_done_rc = -EIO;
5627 complete(&adapter->fw_done);
5628 }
5629
5630 /* if we got here during crq-init, retry crq-init */
5631 if (!completion_done(&adapter->init_done)) {
5632 adapter->init_done_rc = -EAGAIN;
5633 complete(&adapter->init_done);
5634 }
5635
5636 if (!completion_done(&adapter->stats_done))
5637 complete(&adapter->stats_done);
5638 if (test_bit(0, &adapter->resetting))
5639 adapter->force_reset_recovery = true;
5640 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5641 dev_info(dev, "Migrated, re-enabling adapter\n");
5642 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5643 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5644 dev_info(dev, "Backing device failover detected\n");
5645 adapter->failover_pending = true;
5646 } else {
5647 /* The adapter lost the connection */
5648 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5649 gen_crq->cmd);
5650 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5651 }
5652 return;
5653 case IBMVNIC_CRQ_CMD_RSP:
5654 break;
5655 default:
5656 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5657 gen_crq->first);
5658 return;
5659 }
5660
5661 switch (gen_crq->cmd) {
5662 case VERSION_EXCHANGE_RSP:
5663 rc = crq->version_exchange_rsp.rc.code;
5664 if (rc) {
5665 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5666 break;
5667 }
5668 ibmvnic_version =
5669 be16_to_cpu(crq->version_exchange_rsp.version);
5670 dev_info(dev, "Partner protocol version is %d\n",
5671 ibmvnic_version);
5672 send_query_cap(adapter);
5673 break;
5674 case QUERY_CAPABILITY_RSP:
5675 handle_query_cap_rsp(crq, adapter);
5676 break;
5677 case QUERY_MAP_RSP:
5678 handle_query_map_rsp(crq, adapter);
5679 break;
5680 case REQUEST_MAP_RSP:
5681 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5682 complete(&adapter->fw_done);
5683 break;
5684 case REQUEST_UNMAP_RSP:
5685 handle_request_unmap_rsp(crq, adapter);
5686 break;
5687 case REQUEST_CAPABILITY_RSP:
5688 handle_request_cap_rsp(crq, adapter);
5689 break;
5690 case LOGIN_RSP:
5691 netdev_dbg(netdev, "Got Login Response\n");
5692 handle_login_rsp(crq, adapter);
5693 break;
5694 case LOGICAL_LINK_STATE_RSP:
5695 netdev_dbg(netdev,
5696 "Got Logical Link State Response, state: %d rc: %d\n",
5697 crq->logical_link_state_rsp.link_state,
5698 crq->logical_link_state_rsp.rc.code);
5699 adapter->logical_link_state =
5700 crq->logical_link_state_rsp.link_state;
5701 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5702 complete(&adapter->init_done);
5703 break;
5704 case LINK_STATE_INDICATION:
5705 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5706 adapter->phys_link_state =
5707 crq->link_state_indication.phys_link_state;
5708 adapter->logical_link_state =
5709 crq->link_state_indication.logical_link_state;
5710 if (adapter->phys_link_state && adapter->logical_link_state)
5711 netif_carrier_on(netdev);
5712 else
5713 netif_carrier_off(netdev);
5714 break;
5715 case CHANGE_MAC_ADDR_RSP:
5716 netdev_dbg(netdev, "Got MAC address change Response\n");
5717 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5718 break;
5719 case ERROR_INDICATION:
5720 netdev_dbg(netdev, "Got Error Indication\n");
5721 handle_error_indication(crq, adapter);
5722 break;
5723 case REQUEST_STATISTICS_RSP:
5724 netdev_dbg(netdev, "Got Statistics Response\n");
5725 complete(&adapter->stats_done);
5726 break;
5727 case QUERY_IP_OFFLOAD_RSP:
5728 netdev_dbg(netdev, "Got Query IP offload Response\n");
5729 handle_query_ip_offload_rsp(adapter);
5730 break;
5731 case MULTICAST_CTRL_RSP:
5732 netdev_dbg(netdev, "Got multicast control Response\n");
5733 break;
5734 case CONTROL_IP_OFFLOAD_RSP:
5735 netdev_dbg(netdev, "Got Control IP offload Response\n");
5736 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5737 sizeof(adapter->ip_offload_ctrl),
5738 DMA_TO_DEVICE);
5739 complete(&adapter->init_done);
5740 break;
5741 case COLLECT_FW_TRACE_RSP:
5742 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5743 complete(&adapter->fw_done);
5744 break;
5745 case GET_VPD_SIZE_RSP:
5746 handle_vpd_size_rsp(crq, adapter);
5747 break;
5748 case GET_VPD_RSP:
5749 handle_vpd_rsp(crq, adapter);
5750 break;
5751 case QUERY_PHYS_PARMS_RSP:
5752 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5753 complete(&adapter->fw_done);
5754 break;
5755 default:
5756 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5757 gen_crq->cmd);
5758 }
5759 }
5760
ibmvnic_interrupt(int irq,void * instance)5761 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5762 {
5763 struct ibmvnic_adapter *adapter = instance;
5764
5765 tasklet_schedule(&adapter->tasklet);
5766 return IRQ_HANDLED;
5767 }
5768
ibmvnic_tasklet(struct tasklet_struct * t)5769 static void ibmvnic_tasklet(struct tasklet_struct *t)
5770 {
5771 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5772 struct ibmvnic_crq_queue *queue = &adapter->crq;
5773 union ibmvnic_crq *crq;
5774 unsigned long flags;
5775
5776 spin_lock_irqsave(&queue->lock, flags);
5777
5778 /* Pull all the valid messages off the CRQ */
5779 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5780 /* This barrier makes sure ibmvnic_next_crq()'s
5781 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5782 * before ibmvnic_handle_crq()'s
5783 * switch(gen_crq->first) and switch(gen_crq->cmd).
5784 */
5785 dma_rmb();
5786 ibmvnic_handle_crq(crq, adapter);
5787 crq->generic.first = 0;
5788 }
5789
5790 spin_unlock_irqrestore(&queue->lock, flags);
5791 }
5792
ibmvnic_reenable_crq_queue(struct ibmvnic_adapter * adapter)5793 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5794 {
5795 struct vio_dev *vdev = adapter->vdev;
5796 int rc;
5797
5798 do {
5799 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5800 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5801
5802 if (rc)
5803 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5804
5805 return rc;
5806 }
5807
ibmvnic_reset_crq(struct ibmvnic_adapter * adapter)5808 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5809 {
5810 struct ibmvnic_crq_queue *crq = &adapter->crq;
5811 struct device *dev = &adapter->vdev->dev;
5812 struct vio_dev *vdev = adapter->vdev;
5813 int rc;
5814
5815 /* Close the CRQ */
5816 do {
5817 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5818 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5819
5820 /* Clean out the queue */
5821 if (!crq->msgs)
5822 return -EINVAL;
5823
5824 memset(crq->msgs, 0, PAGE_SIZE);
5825 crq->cur = 0;
5826 crq->active = false;
5827
5828 /* And re-open it again */
5829 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5830 crq->msg_token, PAGE_SIZE);
5831
5832 if (rc == H_CLOSED)
5833 /* Adapter is good, but other end is not ready */
5834 dev_warn(dev, "Partner adapter not ready\n");
5835 else if (rc != 0)
5836 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5837
5838 return rc;
5839 }
5840
release_crq_queue(struct ibmvnic_adapter * adapter)5841 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5842 {
5843 struct ibmvnic_crq_queue *crq = &adapter->crq;
5844 struct vio_dev *vdev = adapter->vdev;
5845 long rc;
5846
5847 if (!crq->msgs)
5848 return;
5849
5850 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5851 free_irq(vdev->irq, adapter);
5852 tasklet_kill(&adapter->tasklet);
5853 do {
5854 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5855 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5856
5857 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5858 DMA_BIDIRECTIONAL);
5859 free_page((unsigned long)crq->msgs);
5860 crq->msgs = NULL;
5861 crq->active = false;
5862 }
5863
init_crq_queue(struct ibmvnic_adapter * adapter)5864 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5865 {
5866 struct ibmvnic_crq_queue *crq = &adapter->crq;
5867 struct device *dev = &adapter->vdev->dev;
5868 struct vio_dev *vdev = adapter->vdev;
5869 int rc, retrc = -ENOMEM;
5870
5871 if (crq->msgs)
5872 return 0;
5873
5874 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5875 /* Should we allocate more than one page? */
5876
5877 if (!crq->msgs)
5878 return -ENOMEM;
5879
5880 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5881 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5882 DMA_BIDIRECTIONAL);
5883 if (dma_mapping_error(dev, crq->msg_token))
5884 goto map_failed;
5885
5886 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5887 crq->msg_token, PAGE_SIZE);
5888
5889 if (rc == H_RESOURCE)
5890 /* maybe kexecing and resource is busy. try a reset */
5891 rc = ibmvnic_reset_crq(adapter);
5892 retrc = rc;
5893
5894 if (rc == H_CLOSED) {
5895 dev_warn(dev, "Partner adapter not ready\n");
5896 } else if (rc) {
5897 dev_warn(dev, "Error %d opening adapter\n", rc);
5898 goto reg_crq_failed;
5899 }
5900
5901 retrc = 0;
5902
5903 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5904
5905 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5906 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5907 adapter->vdev->unit_address);
5908 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5909 if (rc) {
5910 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5911 vdev->irq, rc);
5912 goto req_irq_failed;
5913 }
5914
5915 rc = vio_enable_interrupts(vdev);
5916 if (rc) {
5917 dev_err(dev, "Error %d enabling interrupts\n", rc);
5918 goto req_irq_failed;
5919 }
5920
5921 crq->cur = 0;
5922 spin_lock_init(&crq->lock);
5923
5924 /* process any CRQs that were queued before we enabled interrupts */
5925 tasklet_schedule(&adapter->tasklet);
5926
5927 return retrc;
5928
5929 req_irq_failed:
5930 tasklet_kill(&adapter->tasklet);
5931 do {
5932 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5933 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5934 reg_crq_failed:
5935 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5936 map_failed:
5937 free_page((unsigned long)crq->msgs);
5938 crq->msgs = NULL;
5939 return retrc;
5940 }
5941
ibmvnic_reset_init(struct ibmvnic_adapter * adapter,bool reset)5942 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5943 {
5944 struct device *dev = &adapter->vdev->dev;
5945 unsigned long timeout = msecs_to_jiffies(20000);
5946 u64 old_num_rx_queues = adapter->req_rx_queues;
5947 u64 old_num_tx_queues = adapter->req_tx_queues;
5948 int rc;
5949
5950 adapter->from_passive_init = false;
5951
5952 rc = ibmvnic_send_crq_init(adapter);
5953 if (rc) {
5954 dev_err(dev, "Send crq init failed with error %d\n", rc);
5955 return rc;
5956 }
5957
5958 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5959 dev_err(dev, "Initialization sequence timed out\n");
5960 return -ETIMEDOUT;
5961 }
5962
5963 if (adapter->init_done_rc) {
5964 release_crq_queue(adapter);
5965 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
5966 return adapter->init_done_rc;
5967 }
5968
5969 if (adapter->from_passive_init) {
5970 adapter->state = VNIC_OPEN;
5971 adapter->from_passive_init = false;
5972 dev_err(dev, "CRQ-init failed, passive-init\n");
5973 return -EINVAL;
5974 }
5975
5976 if (reset &&
5977 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5978 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5979 if (adapter->req_rx_queues != old_num_rx_queues ||
5980 adapter->req_tx_queues != old_num_tx_queues) {
5981 release_sub_crqs(adapter, 0);
5982 rc = init_sub_crqs(adapter);
5983 } else {
5984 /* no need to reinitialize completely, but we do
5985 * need to clean up transmits that were in flight
5986 * when we processed the reset. Failure to do so
5987 * will confound the upper layer, usually TCP, by
5988 * creating the illusion of transmits that are
5989 * awaiting completion.
5990 */
5991 clean_tx_pools(adapter);
5992
5993 rc = reset_sub_crq_queues(adapter);
5994 }
5995 } else {
5996 rc = init_sub_crqs(adapter);
5997 }
5998
5999 if (rc) {
6000 dev_err(dev, "Initialization of sub crqs failed\n");
6001 release_crq_queue(adapter);
6002 return rc;
6003 }
6004
6005 rc = init_sub_crq_irqs(adapter);
6006 if (rc) {
6007 dev_err(dev, "Failed to initialize sub crq irqs\n");
6008 release_crq_queue(adapter);
6009 }
6010
6011 return rc;
6012 }
6013
6014 static struct device_attribute dev_attr_failover;
6015
ibmvnic_probe(struct vio_dev * dev,const struct vio_device_id * id)6016 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6017 {
6018 struct ibmvnic_adapter *adapter;
6019 struct net_device *netdev;
6020 unsigned char *mac_addr_p;
6021 unsigned long flags;
6022 bool init_success;
6023 int rc;
6024
6025 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6026 dev->unit_address);
6027
6028 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6029 VETH_MAC_ADDR, NULL);
6030 if (!mac_addr_p) {
6031 dev_err(&dev->dev,
6032 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6033 __FILE__, __LINE__);
6034 return 0;
6035 }
6036
6037 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6038 IBMVNIC_MAX_QUEUES);
6039 if (!netdev)
6040 return -ENOMEM;
6041
6042 adapter = netdev_priv(netdev);
6043 adapter->state = VNIC_PROBING;
6044 dev_set_drvdata(&dev->dev, netdev);
6045 adapter->vdev = dev;
6046 adapter->netdev = netdev;
6047 adapter->login_pending = false;
6048 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6049 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6050 bitmap_set(adapter->map_ids, 0, 1);
6051
6052 ether_addr_copy(adapter->mac_addr, mac_addr_p);
6053 eth_hw_addr_set(netdev, adapter->mac_addr);
6054 netdev->irq = dev->irq;
6055 netdev->netdev_ops = &ibmvnic_netdev_ops;
6056 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6057 SET_NETDEV_DEV(netdev, &dev->dev);
6058
6059 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6060 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6061 __ibmvnic_delayed_reset);
6062 INIT_LIST_HEAD(&adapter->rwi_list);
6063 spin_lock_init(&adapter->rwi_lock);
6064 spin_lock_init(&adapter->state_lock);
6065 mutex_init(&adapter->fw_lock);
6066 init_completion(&adapter->probe_done);
6067 init_completion(&adapter->init_done);
6068 init_completion(&adapter->fw_done);
6069 init_completion(&adapter->reset_done);
6070 init_completion(&adapter->stats_done);
6071 clear_bit(0, &adapter->resetting);
6072 adapter->prev_rx_buf_sz = 0;
6073 adapter->prev_mtu = 0;
6074
6075 init_success = false;
6076 do {
6077 reinit_init_done(adapter);
6078
6079 /* clear any failovers we got in the previous pass
6080 * since we are reinitializing the CRQ
6081 */
6082 adapter->failover_pending = false;
6083
6084 /* If we had already initialized CRQ, we may have one or
6085 * more resets queued already. Discard those and release
6086 * the CRQ before initializing the CRQ again.
6087 */
6088 release_crq_queue(adapter);
6089
6090 /* Since we are still in PROBING state, __ibmvnic_reset()
6091 * will not access the ->rwi_list and since we released CRQ,
6092 * we won't get _new_ transport events. But there maybe an
6093 * ongoing ibmvnic_reset() call. So serialize access to
6094 * rwi_list. If we win the race, ibvmnic_reset() could add
6095 * a reset after we purged but thats ok - we just may end
6096 * up with an extra reset (i.e similar to having two or more
6097 * resets in the queue at once).
6098 * CHECK.
6099 */
6100 spin_lock_irqsave(&adapter->rwi_lock, flags);
6101 flush_reset_queue(adapter);
6102 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6103
6104 rc = init_crq_queue(adapter);
6105 if (rc) {
6106 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6107 rc);
6108 goto ibmvnic_init_fail;
6109 }
6110
6111 rc = ibmvnic_reset_init(adapter, false);
6112 } while (rc == -EAGAIN);
6113
6114 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
6115 * partner is not ready. CRQ is not active. When the partner becomes
6116 * ready, we will do the passive init reset.
6117 */
6118
6119 if (!rc)
6120 init_success = true;
6121
6122 rc = init_stats_buffers(adapter);
6123 if (rc)
6124 goto ibmvnic_init_fail;
6125
6126 rc = init_stats_token(adapter);
6127 if (rc)
6128 goto ibmvnic_stats_fail;
6129
6130 rc = device_create_file(&dev->dev, &dev_attr_failover);
6131 if (rc)
6132 goto ibmvnic_dev_file_err;
6133
6134 netif_carrier_off(netdev);
6135
6136 if (init_success) {
6137 adapter->state = VNIC_PROBED;
6138 netdev->mtu = adapter->req_mtu - ETH_HLEN;
6139 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6140 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6141 } else {
6142 adapter->state = VNIC_DOWN;
6143 }
6144
6145 adapter->wait_for_reset = false;
6146 adapter->last_reset_time = jiffies;
6147
6148 rc = register_netdev(netdev);
6149 if (rc) {
6150 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6151 goto ibmvnic_register_fail;
6152 }
6153 dev_info(&dev->dev, "ibmvnic registered\n");
6154
6155 complete(&adapter->probe_done);
6156
6157 return 0;
6158
6159 ibmvnic_register_fail:
6160 device_remove_file(&dev->dev, &dev_attr_failover);
6161
6162 ibmvnic_dev_file_err:
6163 release_stats_token(adapter);
6164
6165 ibmvnic_stats_fail:
6166 release_stats_buffers(adapter);
6167
6168 ibmvnic_init_fail:
6169 release_sub_crqs(adapter, 1);
6170 release_crq_queue(adapter);
6171
6172 /* cleanup worker thread after releasing CRQ so we don't get
6173 * transport events (i.e new work items for the worker thread).
6174 */
6175 adapter->state = VNIC_REMOVING;
6176 complete(&adapter->probe_done);
6177 flush_work(&adapter->ibmvnic_reset);
6178 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6179
6180 flush_reset_queue(adapter);
6181
6182 mutex_destroy(&adapter->fw_lock);
6183 free_netdev(netdev);
6184
6185 return rc;
6186 }
6187
ibmvnic_remove(struct vio_dev * dev)6188 static void ibmvnic_remove(struct vio_dev *dev)
6189 {
6190 struct net_device *netdev = dev_get_drvdata(&dev->dev);
6191 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6192 unsigned long flags;
6193
6194 spin_lock_irqsave(&adapter->state_lock, flags);
6195
6196 /* If ibmvnic_reset() is scheduling a reset, wait for it to
6197 * finish. Then, set the state to REMOVING to prevent it from
6198 * scheduling any more work and to have reset functions ignore
6199 * any resets that have already been scheduled. Drop the lock
6200 * after setting state, so __ibmvnic_reset() which is called
6201 * from the flush_work() below, can make progress.
6202 */
6203 spin_lock(&adapter->rwi_lock);
6204 adapter->state = VNIC_REMOVING;
6205 spin_unlock(&adapter->rwi_lock);
6206
6207 spin_unlock_irqrestore(&adapter->state_lock, flags);
6208
6209 flush_work(&adapter->ibmvnic_reset);
6210 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6211
6212 rtnl_lock();
6213 unregister_netdevice(netdev);
6214
6215 release_resources(adapter);
6216 release_rx_pools(adapter);
6217 release_tx_pools(adapter);
6218 release_sub_crqs(adapter, 1);
6219 release_crq_queue(adapter);
6220
6221 release_stats_token(adapter);
6222 release_stats_buffers(adapter);
6223
6224 adapter->state = VNIC_REMOVED;
6225
6226 rtnl_unlock();
6227 mutex_destroy(&adapter->fw_lock);
6228 device_remove_file(&dev->dev, &dev_attr_failover);
6229 free_netdev(netdev);
6230 dev_set_drvdata(&dev->dev, NULL);
6231 }
6232
failover_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6233 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6234 const char *buf, size_t count)
6235 {
6236 struct net_device *netdev = dev_get_drvdata(dev);
6237 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6238 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6239 __be64 session_token;
6240 long rc;
6241
6242 if (!sysfs_streq(buf, "1"))
6243 return -EINVAL;
6244
6245 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6246 H_GET_SESSION_TOKEN, 0, 0, 0);
6247 if (rc) {
6248 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6249 rc);
6250 goto last_resort;
6251 }
6252
6253 session_token = (__be64)retbuf[0];
6254 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6255 be64_to_cpu(session_token));
6256 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6257 H_SESSION_ERR_DETECTED, session_token, 0, 0);
6258 if (rc) {
6259 netdev_err(netdev,
6260 "H_VIOCTL initiated failover failed, rc %ld\n",
6261 rc);
6262 goto last_resort;
6263 }
6264
6265 return count;
6266
6267 last_resort:
6268 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6269 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6270
6271 return count;
6272 }
6273 static DEVICE_ATTR_WO(failover);
6274
ibmvnic_get_desired_dma(struct vio_dev * vdev)6275 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6276 {
6277 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6278 struct ibmvnic_adapter *adapter;
6279 struct iommu_table *tbl;
6280 unsigned long ret = 0;
6281 int i;
6282
6283 tbl = get_iommu_table_base(&vdev->dev);
6284
6285 /* netdev inits at probe time along with the structures we need below*/
6286 if (!netdev)
6287 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6288
6289 adapter = netdev_priv(netdev);
6290
6291 ret += PAGE_SIZE; /* the crq message queue */
6292 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6293
6294 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6295 ret += 4 * PAGE_SIZE; /* the scrq message queue */
6296
6297 for (i = 0; i < adapter->num_active_rx_pools; i++)
6298 ret += adapter->rx_pool[i].size *
6299 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6300
6301 return ret;
6302 }
6303
ibmvnic_resume(struct device * dev)6304 static int ibmvnic_resume(struct device *dev)
6305 {
6306 struct net_device *netdev = dev_get_drvdata(dev);
6307 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6308
6309 if (adapter->state != VNIC_OPEN)
6310 return 0;
6311
6312 tasklet_schedule(&adapter->tasklet);
6313
6314 return 0;
6315 }
6316
6317 static const struct vio_device_id ibmvnic_device_table[] = {
6318 {"network", "IBM,vnic"},
6319 {"", "" }
6320 };
6321 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6322
6323 static const struct dev_pm_ops ibmvnic_pm_ops = {
6324 .resume = ibmvnic_resume
6325 };
6326
6327 static struct vio_driver ibmvnic_driver = {
6328 .id_table = ibmvnic_device_table,
6329 .probe = ibmvnic_probe,
6330 .remove = ibmvnic_remove,
6331 .get_desired_dma = ibmvnic_get_desired_dma,
6332 .name = ibmvnic_driver_name,
6333 .pm = &ibmvnic_pm_ops,
6334 };
6335
6336 /* module functions */
ibmvnic_module_init(void)6337 static int __init ibmvnic_module_init(void)
6338 {
6339 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6340 IBMVNIC_DRIVER_VERSION);
6341
6342 return vio_register_driver(&ibmvnic_driver);
6343 }
6344
ibmvnic_module_exit(void)6345 static void __exit ibmvnic_module_exit(void)
6346 {
6347 vio_unregister_driver(&ibmvnic_driver);
6348 }
6349
6350 module_init(ibmvnic_module_init);
6351 module_exit(ibmvnic_module_exit);
6352