1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 /* intf dev list mutex */
40 static DEFINE_MUTEX(mlx5_intf_mutex);
41 static DEFINE_IDA(mlx5_adev_ida);
42
is_eth_rep_supported(struct mlx5_core_dev * dev)43 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
44 {
45 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
46 return false;
47
48 if (!MLX5_ESWITCH_MANAGER(dev))
49 return false;
50
51 if (!is_mdev_switchdev_mode(dev))
52 return false;
53
54 return true;
55 }
56
mlx5_eth_supported(struct mlx5_core_dev * dev)57 bool mlx5_eth_supported(struct mlx5_core_dev *dev)
58 {
59 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
60 return false;
61
62 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
63 return false;
64
65 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
66 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
67 return false;
68 }
69
70 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
71 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
72 return false;
73 }
74
75 if (!MLX5_CAP_ETH(dev, csum_cap)) {
76 mlx5_core_warn(dev, "Missing csum_cap capability\n");
77 return false;
78 }
79
80 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
81 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
82 return false;
83 }
84
85 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
86 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
87 return false;
88 }
89
90 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
91 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
92 return false;
93 }
94
95 if (MLX5_CAP_FLOWTABLE(dev,
96 flow_table_properties_nic_receive.max_ft_level) < 3) {
97 mlx5_core_warn(dev, "max_ft_level < 3\n");
98 return false;
99 }
100
101 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
102 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
103 if (!MLX5_CAP_GEN(dev, cq_moderation))
104 mlx5_core_warn(dev, "CQ moderation is not supported\n");
105
106 return true;
107 }
108
is_eth_enabled(struct mlx5_core_dev * dev)109 static bool is_eth_enabled(struct mlx5_core_dev *dev)
110 {
111 union devlink_param_value val;
112 int err;
113
114 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
115 DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
116 &val);
117 return err ? false : val.vbool;
118 }
119
mlx5_vnet_supported(struct mlx5_core_dev * dev)120 bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
121 {
122 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
123 return false;
124
125 if (mlx5_core_is_pf(dev))
126 return false;
127
128 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
129 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
130 return false;
131
132 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
133 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
134 return false;
135
136 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
137 return false;
138
139 return true;
140 }
141
is_vnet_enabled(struct mlx5_core_dev * dev)142 static bool is_vnet_enabled(struct mlx5_core_dev *dev)
143 {
144 union devlink_param_value val;
145 int err;
146
147 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
148 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
149 &val);
150 return err ? false : val.vbool;
151 }
152
is_ib_rep_supported(struct mlx5_core_dev * dev)153 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
154 {
155 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
156 return false;
157
158 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
159 return false;
160
161 if (!is_eth_rep_supported(dev))
162 return false;
163
164 if (!MLX5_ESWITCH_MANAGER(dev))
165 return false;
166
167 if (!is_mdev_switchdev_mode(dev))
168 return false;
169
170 if (mlx5_core_mp_enabled(dev))
171 return false;
172
173 return true;
174 }
175
is_mp_supported(struct mlx5_core_dev * dev)176 static bool is_mp_supported(struct mlx5_core_dev *dev)
177 {
178 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
179 return false;
180
181 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
182 return false;
183
184 if (is_ib_rep_supported(dev))
185 return false;
186
187 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
188 return false;
189
190 if (!mlx5_core_is_mp_slave(dev))
191 return false;
192
193 return true;
194 }
195
mlx5_rdma_supported(struct mlx5_core_dev * dev)196 bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
197 {
198 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
199 return false;
200
201 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
202 return false;
203
204 if (is_ib_rep_supported(dev))
205 return false;
206
207 if (is_mp_supported(dev))
208 return false;
209
210 return true;
211 }
212
is_ib_enabled(struct mlx5_core_dev * dev)213 static bool is_ib_enabled(struct mlx5_core_dev *dev)
214 {
215 union devlink_param_value val;
216 int err;
217
218 err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
219 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
220 &val);
221 return err ? false : val.vbool;
222 }
223
224 enum {
225 MLX5_INTERFACE_PROTOCOL_ETH,
226 MLX5_INTERFACE_PROTOCOL_ETH_REP,
227
228 MLX5_INTERFACE_PROTOCOL_IB,
229 MLX5_INTERFACE_PROTOCOL_IB_REP,
230 MLX5_INTERFACE_PROTOCOL_MPIB,
231
232 MLX5_INTERFACE_PROTOCOL_VNET,
233 };
234
235 static const struct mlx5_adev_device {
236 const char *suffix;
237 bool (*is_supported)(struct mlx5_core_dev *dev);
238 bool (*is_enabled)(struct mlx5_core_dev *dev);
239 } mlx5_adev_devices[] = {
240 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
241 .is_supported = &mlx5_vnet_supported,
242 .is_enabled = &is_vnet_enabled },
243 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
244 .is_supported = &mlx5_rdma_supported,
245 .is_enabled = &is_ib_enabled },
246 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
247 .is_supported = &mlx5_eth_supported,
248 .is_enabled = &is_eth_enabled },
249 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
250 .is_supported = &is_eth_rep_supported },
251 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
252 .is_supported = &is_ib_rep_supported },
253 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
254 .is_supported = &is_mp_supported },
255 };
256
mlx5_adev_idx_alloc(void)257 int mlx5_adev_idx_alloc(void)
258 {
259 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
260 }
261
mlx5_adev_idx_free(int idx)262 void mlx5_adev_idx_free(int idx)
263 {
264 ida_free(&mlx5_adev_ida, idx);
265 }
266
mlx5_adev_init(struct mlx5_core_dev * dev)267 int mlx5_adev_init(struct mlx5_core_dev *dev)
268 {
269 struct mlx5_priv *priv = &dev->priv;
270
271 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
272 sizeof(struct mlx5_adev *), GFP_KERNEL);
273 if (!priv->adev)
274 return -ENOMEM;
275
276 return 0;
277 }
278
mlx5_adev_cleanup(struct mlx5_core_dev * dev)279 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
280 {
281 struct mlx5_priv *priv = &dev->priv;
282
283 kfree(priv->adev);
284 }
285
adev_release(struct device * dev)286 static void adev_release(struct device *dev)
287 {
288 struct mlx5_adev *mlx5_adev =
289 container_of(dev, struct mlx5_adev, adev.dev);
290 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
291 int idx = mlx5_adev->idx;
292
293 kfree(mlx5_adev);
294 priv->adev[idx] = NULL;
295 }
296
add_adev(struct mlx5_core_dev * dev,int idx)297 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
298 {
299 const char *suffix = mlx5_adev_devices[idx].suffix;
300 struct auxiliary_device *adev;
301 struct mlx5_adev *madev;
302 int ret;
303
304 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
305 if (!madev)
306 return ERR_PTR(-ENOMEM);
307
308 adev = &madev->adev;
309 adev->id = dev->priv.adev_idx;
310 adev->name = suffix;
311 adev->dev.parent = dev->device;
312 adev->dev.release = adev_release;
313 madev->mdev = dev;
314 madev->idx = idx;
315
316 ret = auxiliary_device_init(adev);
317 if (ret) {
318 kfree(madev);
319 return ERR_PTR(ret);
320 }
321
322 ret = auxiliary_device_add(adev);
323 if (ret) {
324 auxiliary_device_uninit(adev);
325 return ERR_PTR(ret);
326 }
327 return madev;
328 }
329
del_adev(struct auxiliary_device * adev)330 static void del_adev(struct auxiliary_device *adev)
331 {
332 auxiliary_device_delete(adev);
333 auxiliary_device_uninit(adev);
334 }
335
mlx5_attach_device(struct mlx5_core_dev * dev)336 int mlx5_attach_device(struct mlx5_core_dev *dev)
337 {
338 struct mlx5_priv *priv = &dev->priv;
339 struct auxiliary_device *adev;
340 struct auxiliary_driver *adrv;
341 int ret = 0, i;
342
343 mutex_lock(&mlx5_intf_mutex);
344 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
345 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
346 if (!priv->adev[i]) {
347 bool is_supported = false;
348
349 if (mlx5_adev_devices[i].is_enabled) {
350 bool enabled;
351
352 enabled = mlx5_adev_devices[i].is_enabled(dev);
353 if (!enabled)
354 continue;
355 }
356
357 if (mlx5_adev_devices[i].is_supported)
358 is_supported = mlx5_adev_devices[i].is_supported(dev);
359
360 if (!is_supported)
361 continue;
362
363 priv->adev[i] = add_adev(dev, i);
364 if (IS_ERR(priv->adev[i])) {
365 ret = PTR_ERR(priv->adev[i]);
366 priv->adev[i] = NULL;
367 }
368 } else {
369 adev = &priv->adev[i]->adev;
370
371 /* Pay attention that this is not PCI driver that
372 * mlx5_core_dev is connected, but auxiliary driver.
373 *
374 * Here we can race of module unload with devlink
375 * reload, but we don't need to take extra lock because
376 * we are holding global mlx5_intf_mutex.
377 */
378 if (!adev->dev.driver)
379 continue;
380 adrv = to_auxiliary_drv(adev->dev.driver);
381
382 if (adrv->resume)
383 ret = adrv->resume(adev);
384 }
385 if (ret) {
386 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
387 i, mlx5_adev_devices[i].suffix);
388
389 break;
390 }
391 }
392 mutex_unlock(&mlx5_intf_mutex);
393 return ret;
394 }
395
mlx5_detach_device(struct mlx5_core_dev * dev)396 void mlx5_detach_device(struct mlx5_core_dev *dev)
397 {
398 struct mlx5_priv *priv = &dev->priv;
399 struct auxiliary_device *adev;
400 struct auxiliary_driver *adrv;
401 pm_message_t pm = {};
402 int i;
403
404 mutex_lock(&mlx5_intf_mutex);
405 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
406 if (!priv->adev[i])
407 continue;
408
409 if (mlx5_adev_devices[i].is_enabled) {
410 bool enabled;
411
412 enabled = mlx5_adev_devices[i].is_enabled(dev);
413 if (!enabled)
414 goto skip_suspend;
415 }
416
417 adev = &priv->adev[i]->adev;
418 /* Auxiliary driver was unbind manually through sysfs */
419 if (!adev->dev.driver)
420 goto skip_suspend;
421
422 adrv = to_auxiliary_drv(adev->dev.driver);
423
424 if (adrv->suspend) {
425 adrv->suspend(adev, pm);
426 continue;
427 }
428
429 skip_suspend:
430 del_adev(&priv->adev[i]->adev);
431 priv->adev[i] = NULL;
432 }
433 priv->flags |= MLX5_PRIV_FLAGS_DETACH;
434 mutex_unlock(&mlx5_intf_mutex);
435 }
436
mlx5_register_device(struct mlx5_core_dev * dev)437 int mlx5_register_device(struct mlx5_core_dev *dev)
438 {
439 int ret;
440
441 mutex_lock(&mlx5_intf_mutex);
442 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
443 ret = mlx5_rescan_drivers_locked(dev);
444 mutex_unlock(&mlx5_intf_mutex);
445 if (ret)
446 mlx5_unregister_device(dev);
447
448 return ret;
449 }
450
mlx5_unregister_device(struct mlx5_core_dev * dev)451 void mlx5_unregister_device(struct mlx5_core_dev *dev)
452 {
453 mutex_lock(&mlx5_intf_mutex);
454 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
455 mlx5_rescan_drivers_locked(dev);
456 mutex_unlock(&mlx5_intf_mutex);
457 }
458
add_drivers(struct mlx5_core_dev * dev)459 static int add_drivers(struct mlx5_core_dev *dev)
460 {
461 struct mlx5_priv *priv = &dev->priv;
462 int i, ret = 0;
463
464 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
465 bool is_supported = false;
466
467 if (priv->adev[i])
468 continue;
469
470 if (mlx5_adev_devices[i].is_supported)
471 is_supported = mlx5_adev_devices[i].is_supported(dev);
472
473 if (!is_supported)
474 continue;
475
476 priv->adev[i] = add_adev(dev, i);
477 if (IS_ERR(priv->adev[i])) {
478 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
479 i, mlx5_adev_devices[i].suffix);
480 /* We continue to rescan drivers and leave to the caller
481 * to make decision if to release everything or continue.
482 */
483 ret = PTR_ERR(priv->adev[i]);
484 priv->adev[i] = NULL;
485 }
486 }
487 return ret;
488 }
489
delete_drivers(struct mlx5_core_dev * dev)490 static void delete_drivers(struct mlx5_core_dev *dev)
491 {
492 struct mlx5_priv *priv = &dev->priv;
493 bool delete_all;
494 int i;
495
496 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
497
498 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
499 bool is_supported = false;
500
501 if (!priv->adev[i])
502 continue;
503
504 if (mlx5_adev_devices[i].is_enabled) {
505 bool enabled;
506
507 enabled = mlx5_adev_devices[i].is_enabled(dev);
508 if (!enabled)
509 goto del_adev;
510 }
511
512 if (mlx5_adev_devices[i].is_supported && !delete_all)
513 is_supported = mlx5_adev_devices[i].is_supported(dev);
514
515 if (is_supported)
516 continue;
517
518 del_adev:
519 del_adev(&priv->adev[i]->adev);
520 priv->adev[i] = NULL;
521 }
522 }
523
524 /* This function is used after mlx5_core_dev is reconfigured.
525 */
mlx5_rescan_drivers_locked(struct mlx5_core_dev * dev)526 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
527 {
528 struct mlx5_priv *priv = &dev->priv;
529
530 lockdep_assert_held(&mlx5_intf_mutex);
531 if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
532 return 0;
533
534 delete_drivers(dev);
535 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
536 return 0;
537
538 return add_drivers(dev);
539 }
540
mlx5_same_hw_devs(struct mlx5_core_dev * dev,struct mlx5_core_dev * peer_dev)541 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
542 {
543 u64 fsystem_guid, psystem_guid;
544
545 fsystem_guid = mlx5_query_nic_system_image_guid(dev);
546 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
547
548 return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
549 }
550
mlx5_gen_pci_id(const struct mlx5_core_dev * dev)551 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
552 {
553 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
554 (dev->pdev->bus->number << 8) |
555 PCI_SLOT(dev->pdev->devfn));
556 }
557
_next_phys_dev(struct mlx5_core_dev * mdev,const struct mlx5_core_dev * curr)558 static int _next_phys_dev(struct mlx5_core_dev *mdev,
559 const struct mlx5_core_dev *curr)
560 {
561 if (!mlx5_core_is_pf(mdev))
562 return 0;
563
564 if (mdev == curr)
565 return 0;
566
567 if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
568 mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
569 return 0;
570
571 return 1;
572 }
573
pci_get_other_drvdata(struct device * this,struct device * other)574 static void *pci_get_other_drvdata(struct device *this, struct device *other)
575 {
576 if (this->driver != other->driver)
577 return NULL;
578
579 return pci_get_drvdata(to_pci_dev(other));
580 }
581
next_phys_dev_lag(struct device * dev,const void * data)582 static int next_phys_dev_lag(struct device *dev, const void *data)
583 {
584 struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
585
586 mdev = pci_get_other_drvdata(this->device, dev);
587 if (!mdev)
588 return 0;
589
590 if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
591 !MLX5_CAP_GEN(mdev, lag_master) ||
592 (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
593 MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
594 return 0;
595
596 return _next_phys_dev(mdev, data);
597 }
598
mlx5_get_next_dev(struct mlx5_core_dev * dev,int (* match)(struct device * dev,const void * data))599 static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
600 int (*match)(struct device *dev, const void *data))
601 {
602 struct device *next;
603
604 if (!mlx5_core_is_pf(dev))
605 return NULL;
606
607 next = bus_find_device(&pci_bus_type, NULL, dev, match);
608 if (!next)
609 return NULL;
610
611 put_device(next);
612 return pci_get_drvdata(to_pci_dev(next));
613 }
614
615 /* Must be called with intf_mutex held */
mlx5_get_next_phys_dev_lag(struct mlx5_core_dev * dev)616 struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
617 {
618 lockdep_assert_held(&mlx5_intf_mutex);
619 return mlx5_get_next_dev(dev, &next_phys_dev_lag);
620 }
621
mlx5_dev_list_lock(void)622 void mlx5_dev_list_lock(void)
623 {
624 mutex_lock(&mlx5_intf_mutex);
625 }
mlx5_dev_list_unlock(void)626 void mlx5_dev_list_unlock(void)
627 {
628 mutex_unlock(&mlx5_intf_mutex);
629 }
630
mlx5_dev_list_trylock(void)631 int mlx5_dev_list_trylock(void)
632 {
633 return mutex_trylock(&mlx5_intf_mutex);
634 }
635