1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vDPA bus.
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
18
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DECLARE_RWSEM(vdpa_dev_lock);
22 static DEFINE_IDA(vdpa_index_ida);
23
vdpa_set_status(struct vdpa_device * vdev,u8 status)24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
25 {
26 down_write(&vdev->cf_lock);
27 vdev->config->set_status(vdev, status);
28 up_write(&vdev->cf_lock);
29 }
30 EXPORT_SYMBOL(vdpa_set_status);
31
32 static struct genl_family vdpa_nl_family;
33
vdpa_dev_probe(struct device * d)34 static int vdpa_dev_probe(struct device *d)
35 {
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
40 int ret = 0;
41
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
46 return -EINVAL;
47
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
50
51 return ret;
52 }
53
vdpa_dev_remove(struct device * d)54 static void vdpa_dev_remove(struct device *d)
55 {
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
58
59 if (drv && drv->remove)
60 drv->remove(vdev);
61 }
62
vdpa_dev_match(struct device * dev,struct device_driver * drv)63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
64 {
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
66
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
70
71 /* Currently devices must be supported by all vDPA bus drivers */
72 return 1;
73 }
74
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)75 static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
78 {
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
80 int ret;
81
82 ret = driver_set_override(dev, &vdev->driver_override, buf, count);
83 if (ret)
84 return ret;
85
86 return count;
87 }
88
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)89 static ssize_t driver_override_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91 {
92 struct vdpa_device *vdev = dev_to_vdpa(dev);
93 ssize_t len;
94
95 device_lock(dev);
96 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
97 device_unlock(dev);
98
99 return len;
100 }
101 static DEVICE_ATTR_RW(driver_override);
102
103 static struct attribute *vdpa_dev_attrs[] = {
104 &dev_attr_driver_override.attr,
105 NULL,
106 };
107
108 static const struct attribute_group vdpa_dev_group = {
109 .attrs = vdpa_dev_attrs,
110 };
111 __ATTRIBUTE_GROUPS(vdpa_dev);
112
113 static struct bus_type vdpa_bus = {
114 .name = "vdpa",
115 .dev_groups = vdpa_dev_groups,
116 .match = vdpa_dev_match,
117 .probe = vdpa_dev_probe,
118 .remove = vdpa_dev_remove,
119 };
120
vdpa_release_dev(struct device * d)121 static void vdpa_release_dev(struct device *d)
122 {
123 struct vdpa_device *vdev = dev_to_vdpa(d);
124 const struct vdpa_config_ops *ops = vdev->config;
125
126 if (ops->free)
127 ops->free(vdev);
128
129 ida_simple_remove(&vdpa_index_ida, vdev->index);
130 kfree(vdev->driver_override);
131 kfree(vdev);
132 }
133
134 /**
135 * __vdpa_alloc_device - allocate and initilaize a vDPA device
136 * This allows driver to some prepartion after device is
137 * initialized but before registered.
138 * @parent: the parent device
139 * @config: the bus operations that is supported by this device
140 * @ngroups: number of groups supported by this device
141 * @nas: number of address spaces supported by this device
142 * @size: size of the parent structure that contains private data
143 * @name: name of the vdpa device; optional.
144 * @use_va: indicate whether virtual address must be used by this device
145 *
146 * Driver should use vdpa_alloc_device() wrapper macro instead of
147 * using this directly.
148 *
149 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
150 * ida.
151 */
__vdpa_alloc_device(struct device * parent,const struct vdpa_config_ops * config,unsigned int ngroups,unsigned int nas,size_t size,const char * name,bool use_va)152 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
153 const struct vdpa_config_ops *config,
154 unsigned int ngroups, unsigned int nas,
155 size_t size, const char *name,
156 bool use_va)
157 {
158 struct vdpa_device *vdev;
159 int err = -EINVAL;
160
161 if (!config)
162 goto err;
163
164 if (!!config->dma_map != !!config->dma_unmap)
165 goto err;
166
167 /* It should only work for the device that use on-chip IOMMU */
168 if (use_va && !(config->dma_map || config->set_map))
169 goto err;
170
171 err = -ENOMEM;
172 vdev = kzalloc(size, GFP_KERNEL);
173 if (!vdev)
174 goto err;
175
176 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
177 if (err < 0)
178 goto err_ida;
179
180 vdev->dev.bus = &vdpa_bus;
181 vdev->dev.parent = parent;
182 vdev->dev.release = vdpa_release_dev;
183 vdev->index = err;
184 vdev->config = config;
185 vdev->features_valid = false;
186 vdev->use_va = use_va;
187 vdev->ngroups = ngroups;
188 vdev->nas = nas;
189
190 if (name)
191 err = dev_set_name(&vdev->dev, "%s", name);
192 else
193 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
194 if (err)
195 goto err_name;
196
197 init_rwsem(&vdev->cf_lock);
198 device_initialize(&vdev->dev);
199
200 return vdev;
201
202 err_name:
203 ida_simple_remove(&vdpa_index_ida, vdev->index);
204 err_ida:
205 kfree(vdev);
206 err:
207 return ERR_PTR(err);
208 }
209 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
210
vdpa_name_match(struct device * dev,const void * data)211 static int vdpa_name_match(struct device *dev, const void *data)
212 {
213 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
214
215 return (strcmp(dev_name(&vdev->dev), data) == 0);
216 }
217
__vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)218 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
219 {
220 struct device *dev;
221
222 vdev->nvqs = nvqs;
223
224 lockdep_assert_held(&vdpa_dev_lock);
225 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
226 if (dev) {
227 put_device(dev);
228 return -EEXIST;
229 }
230 return device_add(&vdev->dev);
231 }
232
233 /**
234 * _vdpa_register_device - register a vDPA device with vdpa lock held
235 * Caller must have a succeed call of vdpa_alloc_device() before.
236 * Caller must invoke this routine in the management device dev_add()
237 * callback after setting up valid mgmtdev for this vdpa device.
238 * @vdev: the vdpa device to be registered to vDPA bus
239 * @nvqs: number of virtqueues supported by this device
240 *
241 * Return: Returns an error when fail to add device to vDPA bus
242 */
_vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)243 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
244 {
245 if (!vdev->mdev)
246 return -EINVAL;
247
248 return __vdpa_register_device(vdev, nvqs);
249 }
250 EXPORT_SYMBOL_GPL(_vdpa_register_device);
251
252 /**
253 * vdpa_register_device - register a vDPA device
254 * Callers must have a succeed call of vdpa_alloc_device() before.
255 * @vdev: the vdpa device to be registered to vDPA bus
256 * @nvqs: number of virtqueues supported by this device
257 *
258 * Return: Returns an error when fail to add to vDPA bus
259 */
vdpa_register_device(struct vdpa_device * vdev,u32 nvqs)260 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
261 {
262 int err;
263
264 down_write(&vdpa_dev_lock);
265 err = __vdpa_register_device(vdev, nvqs);
266 up_write(&vdpa_dev_lock);
267 return err;
268 }
269 EXPORT_SYMBOL_GPL(vdpa_register_device);
270
271 /**
272 * _vdpa_unregister_device - unregister a vDPA device
273 * Caller must invoke this routine as part of management device dev_del()
274 * callback.
275 * @vdev: the vdpa device to be unregisted from vDPA bus
276 */
_vdpa_unregister_device(struct vdpa_device * vdev)277 void _vdpa_unregister_device(struct vdpa_device *vdev)
278 {
279 lockdep_assert_held(&vdpa_dev_lock);
280 WARN_ON(!vdev->mdev);
281 device_unregister(&vdev->dev);
282 }
283 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
284
285 /**
286 * vdpa_unregister_device - unregister a vDPA device
287 * @vdev: the vdpa device to be unregisted from vDPA bus
288 */
vdpa_unregister_device(struct vdpa_device * vdev)289 void vdpa_unregister_device(struct vdpa_device *vdev)
290 {
291 down_write(&vdpa_dev_lock);
292 device_unregister(&vdev->dev);
293 up_write(&vdpa_dev_lock);
294 }
295 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
296
297 /**
298 * __vdpa_register_driver - register a vDPA device driver
299 * @drv: the vdpa device driver to be registered
300 * @owner: module owner of the driver
301 *
302 * Return: Returns an err when fail to do the registration
303 */
__vdpa_register_driver(struct vdpa_driver * drv,struct module * owner)304 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
305 {
306 drv->driver.bus = &vdpa_bus;
307 drv->driver.owner = owner;
308
309 return driver_register(&drv->driver);
310 }
311 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
312
313 /**
314 * vdpa_unregister_driver - unregister a vDPA device driver
315 * @drv: the vdpa device driver to be unregistered
316 */
vdpa_unregister_driver(struct vdpa_driver * drv)317 void vdpa_unregister_driver(struct vdpa_driver *drv)
318 {
319 driver_unregister(&drv->driver);
320 }
321 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
322
323 /**
324 * vdpa_mgmtdev_register - register a vdpa management device
325 *
326 * @mdev: Pointer to vdpa management device
327 * vdpa_mgmtdev_register() register a vdpa management device which supports
328 * vdpa device management.
329 * Return: Returns 0 on success or failure when required callback ops are not
330 * initialized.
331 */
vdpa_mgmtdev_register(struct vdpa_mgmt_dev * mdev)332 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
333 {
334 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
335 return -EINVAL;
336
337 INIT_LIST_HEAD(&mdev->list);
338 down_write(&vdpa_dev_lock);
339 list_add_tail(&mdev->list, &mdev_head);
340 up_write(&vdpa_dev_lock);
341 return 0;
342 }
343 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
344
vdpa_match_remove(struct device * dev,void * data)345 static int vdpa_match_remove(struct device *dev, void *data)
346 {
347 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
348 struct vdpa_mgmt_dev *mdev = vdev->mdev;
349
350 if (mdev == data)
351 mdev->ops->dev_del(mdev, vdev);
352 return 0;
353 }
354
vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev * mdev)355 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
356 {
357 down_write(&vdpa_dev_lock);
358
359 list_del(&mdev->list);
360
361 /* Filter out all the entries belong to this management device and delete it. */
362 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
363
364 up_write(&vdpa_dev_lock);
365 }
366 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
367
vdpa_get_config_unlocked(struct vdpa_device * vdev,unsigned int offset,void * buf,unsigned int len)368 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
369 unsigned int offset,
370 void *buf, unsigned int len)
371 {
372 const struct vdpa_config_ops *ops = vdev->config;
373
374 /*
375 * Config accesses aren't supposed to trigger before features are set.
376 * If it does happen we assume a legacy guest.
377 */
378 if (!vdev->features_valid)
379 vdpa_set_features_unlocked(vdev, 0);
380 ops->get_config(vdev, offset, buf, len);
381 }
382
383 /**
384 * vdpa_get_config - Get one or more device configuration fields.
385 * @vdev: vdpa device to operate on
386 * @offset: starting byte offset of the field
387 * @buf: buffer pointer to read to
388 * @len: length of the configuration fields in bytes
389 */
vdpa_get_config(struct vdpa_device * vdev,unsigned int offset,void * buf,unsigned int len)390 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
391 void *buf, unsigned int len)
392 {
393 down_read(&vdev->cf_lock);
394 vdpa_get_config_unlocked(vdev, offset, buf, len);
395 up_read(&vdev->cf_lock);
396 }
397 EXPORT_SYMBOL_GPL(vdpa_get_config);
398
399 /**
400 * vdpa_set_config - Set one or more device configuration fields.
401 * @vdev: vdpa device to operate on
402 * @offset: starting byte offset of the field
403 * @buf: buffer pointer to read from
404 * @length: length of the configuration fields in bytes
405 */
vdpa_set_config(struct vdpa_device * vdev,unsigned int offset,const void * buf,unsigned int length)406 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
407 const void *buf, unsigned int length)
408 {
409 down_write(&vdev->cf_lock);
410 vdev->config->set_config(vdev, offset, buf, length);
411 up_write(&vdev->cf_lock);
412 }
413 EXPORT_SYMBOL_GPL(vdpa_set_config);
414
mgmtdev_handle_match(const struct vdpa_mgmt_dev * mdev,const char * busname,const char * devname)415 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
416 const char *busname, const char *devname)
417 {
418 /* Bus name is optional for simulated management device, so ignore the
419 * device with bus if bus attribute is provided.
420 */
421 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
422 return false;
423
424 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
425 return true;
426
427 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
428 (strcmp(dev_name(mdev->device), devname) == 0))
429 return true;
430
431 return false;
432 }
433
vdpa_mgmtdev_get_from_attr(struct nlattr ** attrs)434 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
435 {
436 struct vdpa_mgmt_dev *mdev;
437 const char *busname = NULL;
438 const char *devname;
439
440 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
441 return ERR_PTR(-EINVAL);
442 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
443 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
444 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
445
446 list_for_each_entry(mdev, &mdev_head, list) {
447 if (mgmtdev_handle_match(mdev, busname, devname))
448 return mdev;
449 }
450 return ERR_PTR(-ENODEV);
451 }
452
vdpa_nl_mgmtdev_handle_fill(struct sk_buff * msg,const struct vdpa_mgmt_dev * mdev)453 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
454 {
455 if (mdev->device->bus &&
456 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
457 return -EMSGSIZE;
458 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
459 return -EMSGSIZE;
460 return 0;
461 }
462
vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev * mdev,struct sk_buff * msg,u32 portid,u32 seq,int flags)463 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
464 u32 portid, u32 seq, int flags)
465 {
466 u64 supported_classes = 0;
467 void *hdr;
468 int i = 0;
469 int err;
470
471 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
472 if (!hdr)
473 return -EMSGSIZE;
474 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
475 if (err)
476 goto msg_err;
477
478 while (mdev->id_table[i].device) {
479 if (mdev->id_table[i].device <= 63)
480 supported_classes |= BIT_ULL(mdev->id_table[i].device);
481 i++;
482 }
483
484 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
485 supported_classes, VDPA_ATTR_UNSPEC)) {
486 err = -EMSGSIZE;
487 goto msg_err;
488 }
489 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
490 mdev->max_supported_vqs)) {
491 err = -EMSGSIZE;
492 goto msg_err;
493 }
494 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
495 mdev->supported_features, VDPA_ATTR_PAD)) {
496 err = -EMSGSIZE;
497 goto msg_err;
498 }
499
500 genlmsg_end(msg, hdr);
501 return 0;
502
503 msg_err:
504 genlmsg_cancel(msg, hdr);
505 return err;
506 }
507
vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff * skb,struct genl_info * info)508 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
509 {
510 struct vdpa_mgmt_dev *mdev;
511 struct sk_buff *msg;
512 int err;
513
514 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
515 if (!msg)
516 return -ENOMEM;
517
518 down_read(&vdpa_dev_lock);
519 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
520 if (IS_ERR(mdev)) {
521 up_read(&vdpa_dev_lock);
522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
523 err = PTR_ERR(mdev);
524 goto out;
525 }
526
527 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
528 up_read(&vdpa_dev_lock);
529 if (err)
530 goto out;
531 err = genlmsg_reply(msg, info);
532 return err;
533
534 out:
535 nlmsg_free(msg);
536 return err;
537 }
538
539 static int
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)540 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
541 {
542 struct vdpa_mgmt_dev *mdev;
543 int start = cb->args[0];
544 int idx = 0;
545 int err;
546
547 down_read(&vdpa_dev_lock);
548 list_for_each_entry(mdev, &mdev_head, list) {
549 if (idx < start) {
550 idx++;
551 continue;
552 }
553 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
554 cb->nlh->nlmsg_seq, NLM_F_MULTI);
555 if (err)
556 goto out;
557 idx++;
558 }
559 out:
560 up_read(&vdpa_dev_lock);
561 cb->args[0] = idx;
562 return msg->len;
563 }
564
565 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
566 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
567 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
568
vdpa_nl_cmd_dev_add_set_doit(struct sk_buff * skb,struct genl_info * info)569 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
570 {
571 struct vdpa_dev_set_config config = {};
572 struct nlattr **nl_attrs = info->attrs;
573 struct vdpa_mgmt_dev *mdev;
574 const u8 *macaddr;
575 const char *name;
576 int err = 0;
577
578 if (!info->attrs[VDPA_ATTR_DEV_NAME])
579 return -EINVAL;
580
581 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
582
583 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
584 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
585 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
586 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
587 }
588 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
589 config.net.mtu =
590 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
591 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
592 }
593 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
594 config.net.max_vq_pairs =
595 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
596 if (!config.net.max_vq_pairs) {
597 NL_SET_ERR_MSG_MOD(info->extack,
598 "At least one pair of VQs is required");
599 return -EINVAL;
600 }
601 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
602 }
603
604 /* Skip checking capability if user didn't prefer to configure any
605 * device networking attributes. It is likely that user might have used
606 * a device specific method to configure such attributes or using device
607 * default attributes.
608 */
609 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
610 !netlink_capable(skb, CAP_NET_ADMIN))
611 return -EPERM;
612
613 down_write(&vdpa_dev_lock);
614 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
615 if (IS_ERR(mdev)) {
616 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
617 err = PTR_ERR(mdev);
618 goto err;
619 }
620 if ((config.mask & mdev->config_attr_mask) != config.mask) {
621 NL_SET_ERR_MSG_MOD(info->extack,
622 "All provided attributes are not supported");
623 err = -EOPNOTSUPP;
624 goto err;
625 }
626
627 err = mdev->ops->dev_add(mdev, name, &config);
628 err:
629 up_write(&vdpa_dev_lock);
630 return err;
631 }
632
vdpa_nl_cmd_dev_del_set_doit(struct sk_buff * skb,struct genl_info * info)633 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
634 {
635 struct vdpa_mgmt_dev *mdev;
636 struct vdpa_device *vdev;
637 struct device *dev;
638 const char *name;
639 int err = 0;
640
641 if (!info->attrs[VDPA_ATTR_DEV_NAME])
642 return -EINVAL;
643 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
644
645 down_write(&vdpa_dev_lock);
646 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
647 if (!dev) {
648 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
649 err = -ENODEV;
650 goto dev_err;
651 }
652 vdev = container_of(dev, struct vdpa_device, dev);
653 if (!vdev->mdev) {
654 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
655 err = -EINVAL;
656 goto mdev_err;
657 }
658 mdev = vdev->mdev;
659 mdev->ops->dev_del(mdev, vdev);
660 mdev_err:
661 put_device(dev);
662 dev_err:
663 up_write(&vdpa_dev_lock);
664 return err;
665 }
666
667 static int
vdpa_dev_fill(struct vdpa_device * vdev,struct sk_buff * msg,u32 portid,u32 seq,int flags,struct netlink_ext_ack * extack)668 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
669 int flags, struct netlink_ext_ack *extack)
670 {
671 u16 max_vq_size;
672 u16 min_vq_size = 1;
673 u32 device_id;
674 u32 vendor_id;
675 void *hdr;
676 int err;
677
678 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
679 if (!hdr)
680 return -EMSGSIZE;
681
682 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
683 if (err)
684 goto msg_err;
685
686 device_id = vdev->config->get_device_id(vdev);
687 vendor_id = vdev->config->get_vendor_id(vdev);
688 max_vq_size = vdev->config->get_vq_num_max(vdev);
689 if (vdev->config->get_vq_num_min)
690 min_vq_size = vdev->config->get_vq_num_min(vdev);
691
692 err = -EMSGSIZE;
693 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
694 goto msg_err;
695 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
696 goto msg_err;
697 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
698 goto msg_err;
699 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
700 goto msg_err;
701 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
702 goto msg_err;
703 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
704 goto msg_err;
705
706 genlmsg_end(msg, hdr);
707 return 0;
708
709 msg_err:
710 genlmsg_cancel(msg, hdr);
711 return err;
712 }
713
vdpa_nl_cmd_dev_get_doit(struct sk_buff * skb,struct genl_info * info)714 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
715 {
716 struct vdpa_device *vdev;
717 struct sk_buff *msg;
718 const char *devname;
719 struct device *dev;
720 int err;
721
722 if (!info->attrs[VDPA_ATTR_DEV_NAME])
723 return -EINVAL;
724 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
725 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
726 if (!msg)
727 return -ENOMEM;
728
729 down_read(&vdpa_dev_lock);
730 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
731 if (!dev) {
732 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
733 err = -ENODEV;
734 goto err;
735 }
736 vdev = container_of(dev, struct vdpa_device, dev);
737 if (!vdev->mdev) {
738 err = -EINVAL;
739 goto mdev_err;
740 }
741 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
742 if (err)
743 goto mdev_err;
744
745 err = genlmsg_reply(msg, info);
746 put_device(dev);
747 up_read(&vdpa_dev_lock);
748 return err;
749
750 mdev_err:
751 put_device(dev);
752 err:
753 up_read(&vdpa_dev_lock);
754 nlmsg_free(msg);
755 return err;
756 }
757
758 struct vdpa_dev_dump_info {
759 struct sk_buff *msg;
760 struct netlink_callback *cb;
761 int start_idx;
762 int idx;
763 };
764
vdpa_dev_dump(struct device * dev,void * data)765 static int vdpa_dev_dump(struct device *dev, void *data)
766 {
767 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
768 struct vdpa_dev_dump_info *info = data;
769 int err;
770
771 if (!vdev->mdev)
772 return 0;
773 if (info->idx < info->start_idx) {
774 info->idx++;
775 return 0;
776 }
777 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
778 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
779 if (err)
780 return err;
781
782 info->idx++;
783 return 0;
784 }
785
vdpa_nl_cmd_dev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)786 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
787 {
788 struct vdpa_dev_dump_info info;
789
790 info.msg = msg;
791 info.cb = cb;
792 info.start_idx = cb->args[0];
793 info.idx = 0;
794
795 down_read(&vdpa_dev_lock);
796 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
797 up_read(&vdpa_dev_lock);
798 cb->args[0] = info.idx;
799 return msg->len;
800 }
801
vdpa_dev_net_mq_config_fill(struct vdpa_device * vdev,struct sk_buff * msg,u64 features,const struct virtio_net_config * config)802 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
803 struct sk_buff *msg, u64 features,
804 const struct virtio_net_config *config)
805 {
806 u16 val_u16;
807
808 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
809 return 0;
810
811 val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
812 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
813 }
814
vdpa_dev_net_config_fill(struct vdpa_device * vdev,struct sk_buff * msg)815 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
816 {
817 struct virtio_net_config config = {};
818 u64 features;
819 u16 val_u16;
820
821 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
822
823 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
824 config.mac))
825 return -EMSGSIZE;
826
827 val_u16 = le16_to_cpu(config.status);
828 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
829 return -EMSGSIZE;
830
831 val_u16 = le16_to_cpu(config.mtu);
832 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
833 return -EMSGSIZE;
834
835 features = vdev->config->get_driver_features(vdev);
836 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
837 VDPA_ATTR_PAD))
838 return -EMSGSIZE;
839
840 return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
841 }
842
843 static int
vdpa_dev_config_fill(struct vdpa_device * vdev,struct sk_buff * msg,u32 portid,u32 seq,int flags,struct netlink_ext_ack * extack)844 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
845 int flags, struct netlink_ext_ack *extack)
846 {
847 u32 device_id;
848 void *hdr;
849 u8 status;
850 int err;
851
852 down_read(&vdev->cf_lock);
853 status = vdev->config->get_status(vdev);
854 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
855 NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
856 err = -EAGAIN;
857 goto out;
858 }
859
860 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
861 VDPA_CMD_DEV_CONFIG_GET);
862 if (!hdr) {
863 err = -EMSGSIZE;
864 goto out;
865 }
866
867 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
868 err = -EMSGSIZE;
869 goto msg_err;
870 }
871
872 device_id = vdev->config->get_device_id(vdev);
873 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
874 err = -EMSGSIZE;
875 goto msg_err;
876 }
877
878 switch (device_id) {
879 case VIRTIO_ID_NET:
880 err = vdpa_dev_net_config_fill(vdev, msg);
881 break;
882 default:
883 err = -EOPNOTSUPP;
884 break;
885 }
886 if (err)
887 goto msg_err;
888
889 up_read(&vdev->cf_lock);
890 genlmsg_end(msg, hdr);
891 return 0;
892
893 msg_err:
894 genlmsg_cancel(msg, hdr);
895 out:
896 up_read(&vdev->cf_lock);
897 return err;
898 }
899
vdpa_fill_stats_rec(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)900 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
901 struct genl_info *info, u32 index)
902 {
903 struct virtio_net_config config = {};
904 u64 features;
905 u16 max_vqp;
906 u8 status;
907 int err;
908
909 status = vdev->config->get_status(vdev);
910 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
911 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
912 return -EAGAIN;
913 }
914 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
915
916 max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
917 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
918 return -EMSGSIZE;
919
920 features = vdev->config->get_driver_features(vdev);
921 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
922 features, VDPA_ATTR_PAD))
923 return -EMSGSIZE;
924
925 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
926 return -EMSGSIZE;
927
928 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
929 if (err)
930 return err;
931
932 return 0;
933 }
934
vendor_stats_fill(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)935 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
936 struct genl_info *info, u32 index)
937 {
938 int err;
939
940 down_read(&vdev->cf_lock);
941 if (!vdev->config->get_vendor_vq_stats) {
942 err = -EOPNOTSUPP;
943 goto out;
944 }
945
946 err = vdpa_fill_stats_rec(vdev, msg, info, index);
947 out:
948 up_read(&vdev->cf_lock);
949 return err;
950 }
951
vdpa_dev_vendor_stats_fill(struct vdpa_device * vdev,struct sk_buff * msg,struct genl_info * info,u32 index)952 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
953 struct sk_buff *msg,
954 struct genl_info *info, u32 index)
955 {
956 u32 device_id;
957 void *hdr;
958 int err;
959 u32 portid = info->snd_portid;
960 u32 seq = info->snd_seq;
961 u32 flags = 0;
962
963 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
964 VDPA_CMD_DEV_VSTATS_GET);
965 if (!hdr)
966 return -EMSGSIZE;
967
968 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
969 err = -EMSGSIZE;
970 goto undo_msg;
971 }
972
973 device_id = vdev->config->get_device_id(vdev);
974 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
975 err = -EMSGSIZE;
976 goto undo_msg;
977 }
978
979 switch (device_id) {
980 case VIRTIO_ID_NET:
981 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
982 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
983 err = -ERANGE;
984 break;
985 }
986
987 err = vendor_stats_fill(vdev, msg, info, index);
988 break;
989 default:
990 err = -EOPNOTSUPP;
991 break;
992 }
993 genlmsg_end(msg, hdr);
994
995 return err;
996
997 undo_msg:
998 genlmsg_cancel(msg, hdr);
999 return err;
1000 }
1001
vdpa_nl_cmd_dev_config_get_doit(struct sk_buff * skb,struct genl_info * info)1002 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1003 {
1004 struct vdpa_device *vdev;
1005 struct sk_buff *msg;
1006 const char *devname;
1007 struct device *dev;
1008 int err;
1009
1010 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1011 return -EINVAL;
1012 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1013 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1014 if (!msg)
1015 return -ENOMEM;
1016
1017 down_read(&vdpa_dev_lock);
1018 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1019 if (!dev) {
1020 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1021 err = -ENODEV;
1022 goto dev_err;
1023 }
1024 vdev = container_of(dev, struct vdpa_device, dev);
1025 if (!vdev->mdev) {
1026 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1027 err = -EINVAL;
1028 goto mdev_err;
1029 }
1030 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1031 0, info->extack);
1032 if (!err)
1033 err = genlmsg_reply(msg, info);
1034
1035 mdev_err:
1036 put_device(dev);
1037 dev_err:
1038 up_read(&vdpa_dev_lock);
1039 if (err)
1040 nlmsg_free(msg);
1041 return err;
1042 }
1043
vdpa_dev_config_dump(struct device * dev,void * data)1044 static int vdpa_dev_config_dump(struct device *dev, void *data)
1045 {
1046 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1047 struct vdpa_dev_dump_info *info = data;
1048 int err;
1049
1050 if (!vdev->mdev)
1051 return 0;
1052 if (info->idx < info->start_idx) {
1053 info->idx++;
1054 return 0;
1055 }
1056 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1057 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1058 info->cb->extack);
1059 if (err)
1060 return err;
1061
1062 info->idx++;
1063 return 0;
1064 }
1065
1066 static int
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)1067 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1068 {
1069 struct vdpa_dev_dump_info info;
1070
1071 info.msg = msg;
1072 info.cb = cb;
1073 info.start_idx = cb->args[0];
1074 info.idx = 0;
1075
1076 down_read(&vdpa_dev_lock);
1077 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1078 up_read(&vdpa_dev_lock);
1079 cb->args[0] = info.idx;
1080 return msg->len;
1081 }
1082
vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff * skb,struct genl_info * info)1083 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1084 struct genl_info *info)
1085 {
1086 struct vdpa_device *vdev;
1087 struct sk_buff *msg;
1088 const char *devname;
1089 struct device *dev;
1090 u32 index;
1091 int err;
1092
1093 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1094 return -EINVAL;
1095
1096 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1097 return -EINVAL;
1098
1099 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1100 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1101 if (!msg)
1102 return -ENOMEM;
1103
1104 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1105 down_read(&vdpa_dev_lock);
1106 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1107 if (!dev) {
1108 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1109 err = -ENODEV;
1110 goto dev_err;
1111 }
1112 vdev = container_of(dev, struct vdpa_device, dev);
1113 if (!vdev->mdev) {
1114 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1115 err = -EINVAL;
1116 goto mdev_err;
1117 }
1118 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1119 if (err)
1120 goto mdev_err;
1121
1122 err = genlmsg_reply(msg, info);
1123
1124 put_device(dev);
1125 up_read(&vdpa_dev_lock);
1126
1127 return err;
1128
1129 mdev_err:
1130 put_device(dev);
1131 dev_err:
1132 nlmsg_free(msg);
1133 up_read(&vdpa_dev_lock);
1134 return err;
1135 }
1136
1137 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1138 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1139 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1140 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1141 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1142 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1143 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1144 };
1145
1146 static const struct genl_ops vdpa_nl_ops[] = {
1147 {
1148 .cmd = VDPA_CMD_MGMTDEV_GET,
1149 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1150 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1151 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1152 },
1153 {
1154 .cmd = VDPA_CMD_DEV_NEW,
1155 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1156 .doit = vdpa_nl_cmd_dev_add_set_doit,
1157 .flags = GENL_ADMIN_PERM,
1158 },
1159 {
1160 .cmd = VDPA_CMD_DEV_DEL,
1161 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1162 .doit = vdpa_nl_cmd_dev_del_set_doit,
1163 .flags = GENL_ADMIN_PERM,
1164 },
1165 {
1166 .cmd = VDPA_CMD_DEV_GET,
1167 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1168 .doit = vdpa_nl_cmd_dev_get_doit,
1169 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1170 },
1171 {
1172 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1173 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1174 .doit = vdpa_nl_cmd_dev_config_get_doit,
1175 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1176 },
1177 {
1178 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1179 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1180 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1181 .flags = GENL_ADMIN_PERM,
1182 },
1183 };
1184
1185 static struct genl_family vdpa_nl_family __ro_after_init = {
1186 .name = VDPA_GENL_NAME,
1187 .version = VDPA_GENL_VERSION,
1188 .maxattr = VDPA_ATTR_MAX,
1189 .policy = vdpa_nl_policy,
1190 .netnsok = false,
1191 .module = THIS_MODULE,
1192 .ops = vdpa_nl_ops,
1193 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1194 };
1195
vdpa_init(void)1196 static int vdpa_init(void)
1197 {
1198 int err;
1199
1200 err = bus_register(&vdpa_bus);
1201 if (err)
1202 return err;
1203 err = genl_register_family(&vdpa_nl_family);
1204 if (err)
1205 goto err;
1206 return 0;
1207
1208 err:
1209 bus_unregister(&vdpa_bus);
1210 return err;
1211 }
1212
vdpa_exit(void)1213 static void __exit vdpa_exit(void)
1214 {
1215 genl_unregister_family(&vdpa_nl_family);
1216 bus_unregister(&vdpa_bus);
1217 ida_destroy(&vdpa_index_ida);
1218 }
1219 core_initcall(vdpa_init);
1220 module_exit(vdpa_exit);
1221
1222 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1223 MODULE_LICENSE("GPL v2");
1224