1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vDPA bridge driver for modern virtio-pci device
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 * Based on virtio_pci_modern.c.
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/vdpa.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_ring.h>
18 #include <linux/virtio_pci.h>
19 #include <linux/virtio_pci_modern.h>
20
21 #define VP_VDPA_QUEUE_MAX 256
22 #define VP_VDPA_DRIVER_NAME "vp_vdpa"
23 #define VP_VDPA_NAME_SIZE 256
24
25 struct vp_vring {
26 void __iomem *notify;
27 char msix_name[VP_VDPA_NAME_SIZE];
28 struct vdpa_callback cb;
29 resource_size_t notify_pa;
30 int irq;
31 };
32
33 struct vp_vdpa {
34 struct vdpa_device vdpa;
35 struct virtio_pci_modern_device *mdev;
36 struct vp_vring *vring;
37 struct vdpa_callback config_cb;
38 char msix_name[VP_VDPA_NAME_SIZE];
39 int config_irq;
40 int queues;
41 int vectors;
42 };
43
44 struct vp_vdpa_mgmtdev {
45 struct vdpa_mgmt_dev mgtdev;
46 struct virtio_pci_modern_device *mdev;
47 struct vp_vdpa *vp_vdpa;
48 };
49
vdpa_to_vp(struct vdpa_device * vdpa)50 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
51 {
52 return container_of(vdpa, struct vp_vdpa, vdpa);
53 }
54
vdpa_to_mdev(struct vdpa_device * vdpa)55 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
56 {
57 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
58
59 return vp_vdpa->mdev;
60 }
61
vp_vdpa_to_mdev(struct vp_vdpa * vp_vdpa)62 static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
63 {
64 return vp_vdpa->mdev;
65 }
66
vp_vdpa_get_device_features(struct vdpa_device * vdpa)67 static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
68 {
69 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
70
71 return vp_modern_get_features(mdev);
72 }
73
vp_vdpa_set_driver_features(struct vdpa_device * vdpa,u64 features)74 static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
75 {
76 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
77
78 vp_modern_set_features(mdev, features);
79
80 return 0;
81 }
82
vp_vdpa_get_driver_features(struct vdpa_device * vdpa)83 static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
84 {
85 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
86
87 return vp_modern_get_driver_features(mdev);
88 }
89
vp_vdpa_get_status(struct vdpa_device * vdpa)90 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
91 {
92 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
93
94 return vp_modern_get_status(mdev);
95 }
96
vp_vdpa_get_vq_irq(struct vdpa_device * vdpa,u16 idx)97 static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
98 {
99 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
100 int irq = vp_vdpa->vring[idx].irq;
101
102 if (irq == VIRTIO_MSI_NO_VECTOR)
103 return -EINVAL;
104
105 return irq;
106 }
107
vp_vdpa_free_irq(struct vp_vdpa * vp_vdpa)108 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
109 {
110 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
111 struct pci_dev *pdev = mdev->pci_dev;
112 int i;
113
114 for (i = 0; i < vp_vdpa->queues; i++) {
115 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
116 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
117 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
118 &vp_vdpa->vring[i]);
119 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
120 }
121 }
122
123 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
124 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
125 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
126 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
127 }
128
129 if (vp_vdpa->vectors) {
130 pci_free_irq_vectors(pdev);
131 vp_vdpa->vectors = 0;
132 }
133 }
134
vp_vdpa_vq_handler(int irq,void * arg)135 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
136 {
137 struct vp_vring *vring = arg;
138
139 if (vring->cb.callback)
140 return vring->cb.callback(vring->cb.private);
141
142 return IRQ_HANDLED;
143 }
144
vp_vdpa_config_handler(int irq,void * arg)145 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
146 {
147 struct vp_vdpa *vp_vdpa = arg;
148
149 if (vp_vdpa->config_cb.callback)
150 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
151
152 return IRQ_HANDLED;
153 }
154
vp_vdpa_request_irq(struct vp_vdpa * vp_vdpa)155 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
156 {
157 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
158 struct pci_dev *pdev = mdev->pci_dev;
159 int i, ret, irq;
160 int queues = vp_vdpa->queues;
161 int vectors = queues + 1;
162
163 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
164 if (ret != vectors) {
165 dev_err(&pdev->dev,
166 "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
167 vectors, ret);
168 return ret;
169 }
170
171 vp_vdpa->vectors = vectors;
172
173 for (i = 0; i < queues; i++) {
174 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
175 "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
176 irq = pci_irq_vector(pdev, i);
177 ret = devm_request_irq(&pdev->dev, irq,
178 vp_vdpa_vq_handler,
179 0, vp_vdpa->vring[i].msix_name,
180 &vp_vdpa->vring[i]);
181 if (ret) {
182 dev_err(&pdev->dev,
183 "vp_vdpa: fail to request irq for vq %d\n", i);
184 goto err;
185 }
186 vp_modern_queue_vector(mdev, i, i);
187 vp_vdpa->vring[i].irq = irq;
188 }
189
190 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
191 pci_name(pdev));
192 irq = pci_irq_vector(pdev, queues);
193 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
194 vp_vdpa->msix_name, vp_vdpa);
195 if (ret) {
196 dev_err(&pdev->dev,
197 "vp_vdpa: fail to request irq for vq %d\n", i);
198 goto err;
199 }
200 vp_modern_config_vector(mdev, queues);
201 vp_vdpa->config_irq = irq;
202
203 return 0;
204 err:
205 vp_vdpa_free_irq(vp_vdpa);
206 return ret;
207 }
208
vp_vdpa_set_status(struct vdpa_device * vdpa,u8 status)209 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
210 {
211 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
212 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
213 u8 s = vp_vdpa_get_status(vdpa);
214
215 if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
216 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
217 vp_vdpa_request_irq(vp_vdpa);
218 }
219
220 vp_modern_set_status(mdev, status);
221 }
222
vp_vdpa_reset(struct vdpa_device * vdpa)223 static int vp_vdpa_reset(struct vdpa_device *vdpa)
224 {
225 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
226 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
227 u8 s = vp_vdpa_get_status(vdpa);
228
229 vp_modern_set_status(mdev, 0);
230
231 if (s & VIRTIO_CONFIG_S_DRIVER_OK)
232 vp_vdpa_free_irq(vp_vdpa);
233
234 return 0;
235 }
236
vp_vdpa_get_vq_num_max(struct vdpa_device * vdpa)237 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
238 {
239 return VP_VDPA_QUEUE_MAX;
240 }
241
vp_vdpa_get_vq_state(struct vdpa_device * vdpa,u16 qid,struct vdpa_vq_state * state)242 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
243 struct vdpa_vq_state *state)
244 {
245 /* Note that this is not supported by virtio specification, so
246 * we return -EOPNOTSUPP here. This means we can't support live
247 * migration, vhost device start/stop.
248 */
249 return -EOPNOTSUPP;
250 }
251
vp_vdpa_set_vq_state_split(struct vdpa_device * vdpa,const struct vdpa_vq_state * state)252 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
253 const struct vdpa_vq_state *state)
254 {
255 const struct vdpa_vq_state_split *split = &state->split;
256
257 if (split->avail_index == 0)
258 return 0;
259
260 return -EOPNOTSUPP;
261 }
262
vp_vdpa_set_vq_state_packed(struct vdpa_device * vdpa,const struct vdpa_vq_state * state)263 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
264 const struct vdpa_vq_state *state)
265 {
266 const struct vdpa_vq_state_packed *packed = &state->packed;
267
268 if (packed->last_avail_counter == 1 &&
269 packed->last_avail_idx == 0 &&
270 packed->last_used_counter == 1 &&
271 packed->last_used_idx == 0)
272 return 0;
273
274 return -EOPNOTSUPP;
275 }
276
vp_vdpa_set_vq_state(struct vdpa_device * vdpa,u16 qid,const struct vdpa_vq_state * state)277 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
278 const struct vdpa_vq_state *state)
279 {
280 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
281
282 /* Note that this is not supported by virtio specification.
283 * But if the state is by chance equal to the device initial
284 * state, we can let it go.
285 */
286 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
287 !vp_modern_get_queue_enable(mdev, qid)) {
288 if (vp_modern_get_driver_features(mdev) &
289 BIT_ULL(VIRTIO_F_RING_PACKED))
290 return vp_vdpa_set_vq_state_packed(vdpa, state);
291 else
292 return vp_vdpa_set_vq_state_split(vdpa, state);
293 }
294
295 return -EOPNOTSUPP;
296 }
297
vp_vdpa_set_vq_cb(struct vdpa_device * vdpa,u16 qid,struct vdpa_callback * cb)298 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
299 struct vdpa_callback *cb)
300 {
301 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
302
303 vp_vdpa->vring[qid].cb = *cb;
304 }
305
vp_vdpa_set_vq_ready(struct vdpa_device * vdpa,u16 qid,bool ready)306 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
307 u16 qid, bool ready)
308 {
309 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
310
311 vp_modern_set_queue_enable(mdev, qid, ready);
312 }
313
vp_vdpa_get_vq_ready(struct vdpa_device * vdpa,u16 qid)314 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
315 {
316 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
317
318 return vp_modern_get_queue_enable(mdev, qid);
319 }
320
vp_vdpa_set_vq_num(struct vdpa_device * vdpa,u16 qid,u32 num)321 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
322 u32 num)
323 {
324 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
325
326 vp_modern_set_queue_size(mdev, qid, num);
327 }
328
vp_vdpa_set_vq_address(struct vdpa_device * vdpa,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)329 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
330 u64 desc_area, u64 driver_area,
331 u64 device_area)
332 {
333 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
334
335 vp_modern_queue_address(mdev, qid, desc_area,
336 driver_area, device_area);
337
338 return 0;
339 }
340
vp_vdpa_kick_vq(struct vdpa_device * vdpa,u16 qid)341 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
342 {
343 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
344
345 vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
346 }
347
vp_vdpa_get_generation(struct vdpa_device * vdpa)348 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
349 {
350 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
351
352 return vp_modern_generation(mdev);
353 }
354
vp_vdpa_get_device_id(struct vdpa_device * vdpa)355 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
356 {
357 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
358
359 return mdev->id.device;
360 }
361
vp_vdpa_get_vendor_id(struct vdpa_device * vdpa)362 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
363 {
364 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
365
366 return mdev->id.vendor;
367 }
368
vp_vdpa_get_vq_align(struct vdpa_device * vdpa)369 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
370 {
371 return PAGE_SIZE;
372 }
373
vp_vdpa_get_config_size(struct vdpa_device * vdpa)374 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
375 {
376 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
377
378 return mdev->device_len;
379 }
380
vp_vdpa_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)381 static void vp_vdpa_get_config(struct vdpa_device *vdpa,
382 unsigned int offset,
383 void *buf, unsigned int len)
384 {
385 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
386 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
387 u8 old, new;
388 u8 *p;
389 int i;
390
391 do {
392 old = vp_ioread8(&mdev->common->config_generation);
393 p = buf;
394 for (i = 0; i < len; i++)
395 *p++ = vp_ioread8(mdev->device + offset + i);
396
397 new = vp_ioread8(&mdev->common->config_generation);
398 } while (old != new);
399 }
400
vp_vdpa_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)401 static void vp_vdpa_set_config(struct vdpa_device *vdpa,
402 unsigned int offset, const void *buf,
403 unsigned int len)
404 {
405 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
406 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
407 const u8 *p = buf;
408 int i;
409
410 for (i = 0; i < len; i++)
411 vp_iowrite8(*p++, mdev->device + offset + i);
412 }
413
vp_vdpa_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)414 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
415 struct vdpa_callback *cb)
416 {
417 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
418
419 vp_vdpa->config_cb = *cb;
420 }
421
422 static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device * vdpa,u16 qid)423 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
424 {
425 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
426 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
427 struct vdpa_notification_area notify;
428
429 notify.addr = vp_vdpa->vring[qid].notify_pa;
430 notify.size = mdev->notify_offset_multiplier;
431
432 return notify;
433 }
434
435 static const struct vdpa_config_ops vp_vdpa_ops = {
436 .get_device_features = vp_vdpa_get_device_features,
437 .set_driver_features = vp_vdpa_set_driver_features,
438 .get_driver_features = vp_vdpa_get_driver_features,
439 .get_status = vp_vdpa_get_status,
440 .set_status = vp_vdpa_set_status,
441 .reset = vp_vdpa_reset,
442 .get_vq_num_max = vp_vdpa_get_vq_num_max,
443 .get_vq_state = vp_vdpa_get_vq_state,
444 .get_vq_notification = vp_vdpa_get_vq_notification,
445 .set_vq_state = vp_vdpa_set_vq_state,
446 .set_vq_cb = vp_vdpa_set_vq_cb,
447 .set_vq_ready = vp_vdpa_set_vq_ready,
448 .get_vq_ready = vp_vdpa_get_vq_ready,
449 .set_vq_num = vp_vdpa_set_vq_num,
450 .set_vq_address = vp_vdpa_set_vq_address,
451 .kick_vq = vp_vdpa_kick_vq,
452 .get_generation = vp_vdpa_get_generation,
453 .get_device_id = vp_vdpa_get_device_id,
454 .get_vendor_id = vp_vdpa_get_vendor_id,
455 .get_vq_align = vp_vdpa_get_vq_align,
456 .get_config_size = vp_vdpa_get_config_size,
457 .get_config = vp_vdpa_get_config,
458 .set_config = vp_vdpa_set_config,
459 .set_config_cb = vp_vdpa_set_config_cb,
460 .get_vq_irq = vp_vdpa_get_vq_irq,
461 };
462
vp_vdpa_free_irq_vectors(void * data)463 static void vp_vdpa_free_irq_vectors(void *data)
464 {
465 pci_free_irq_vectors(data);
466 }
467
vp_vdpa_dev_add(struct vdpa_mgmt_dev * v_mdev,const char * name,const struct vdpa_dev_set_config * add_config)468 static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
469 const struct vdpa_dev_set_config *add_config)
470 {
471 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
472 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
473
474 struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
475 struct pci_dev *pdev = mdev->pci_dev;
476 struct device *dev = &pdev->dev;
477 struct vp_vdpa *vp_vdpa = NULL;
478 int ret, i;
479
480 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
481 dev, &vp_vdpa_ops, 1, 1, name, false);
482
483 if (IS_ERR(vp_vdpa)) {
484 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
485 return PTR_ERR(vp_vdpa);
486 }
487
488 vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
489
490 vp_vdpa->vdpa.dma_dev = &pdev->dev;
491 vp_vdpa->queues = vp_modern_get_num_queues(mdev);
492 vp_vdpa->mdev = mdev;
493
494 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
495 if (ret) {
496 dev_err(&pdev->dev,
497 "Failed for adding devres for freeing irq vectors\n");
498 goto err;
499 }
500
501 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
502 sizeof(*vp_vdpa->vring),
503 GFP_KERNEL);
504 if (!vp_vdpa->vring) {
505 ret = -ENOMEM;
506 dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
507 goto err;
508 }
509
510 for (i = 0; i < vp_vdpa->queues; i++) {
511 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
512 vp_vdpa->vring[i].notify =
513 vp_modern_map_vq_notify(mdev, i,
514 &vp_vdpa->vring[i].notify_pa);
515 if (!vp_vdpa->vring[i].notify) {
516 ret = -EINVAL;
517 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
518 goto err;
519 }
520 }
521 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
522
523 vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
524 ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
525 if (ret) {
526 dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
527 goto err;
528 }
529
530 return 0;
531
532 err:
533 put_device(&vp_vdpa->vdpa.dev);
534 return ret;
535 }
536
vp_vdpa_dev_del(struct vdpa_mgmt_dev * v_mdev,struct vdpa_device * dev)537 static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
538 struct vdpa_device *dev)
539 {
540 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
541 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
542
543 struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
544
545 _vdpa_unregister_device(&vp_vdpa->vdpa);
546 vp_vdpa_mgtdev->vp_vdpa = NULL;
547 }
548
549 static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
550 .dev_add = vp_vdpa_dev_add,
551 .dev_del = vp_vdpa_dev_del,
552 };
553
vp_vdpa_probe(struct pci_dev * pdev,const struct pci_device_id * id)554 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
555 {
556 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
557 struct vdpa_mgmt_dev *mgtdev;
558 struct device *dev = &pdev->dev;
559 struct virtio_pci_modern_device *mdev = NULL;
560 struct virtio_device_id *mdev_id = NULL;
561 int err;
562
563 vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
564 if (!vp_vdpa_mgtdev)
565 return -ENOMEM;
566
567 mgtdev = &vp_vdpa_mgtdev->mgtdev;
568 mgtdev->ops = &vp_vdpa_mdev_ops;
569 mgtdev->device = dev;
570
571 mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
572 if (!mdev) {
573 err = -ENOMEM;
574 goto mdev_err;
575 }
576
577 mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
578 if (!mdev_id) {
579 err = -ENOMEM;
580 goto mdev_id_err;
581 }
582
583 vp_vdpa_mgtdev->mdev = mdev;
584 mdev->pci_dev = pdev;
585
586 err = pcim_enable_device(pdev);
587 if (err) {
588 goto probe_err;
589 }
590
591 err = vp_modern_probe(mdev);
592 if (err) {
593 dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
594 goto probe_err;
595 }
596
597 mdev_id->device = mdev->id.device;
598 mdev_id->vendor = mdev->id.vendor;
599 mgtdev->id_table = mdev_id;
600 mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
601 mgtdev->supported_features = vp_modern_get_features(mdev);
602 pci_set_master(pdev);
603 pci_set_drvdata(pdev, vp_vdpa_mgtdev);
604
605 err = vdpa_mgmtdev_register(mgtdev);
606 if (err) {
607 dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
608 goto register_err;
609 }
610
611 return 0;
612
613 register_err:
614 vp_modern_remove(vp_vdpa_mgtdev->mdev);
615 probe_err:
616 kfree(mdev_id);
617 mdev_id_err:
618 kfree(mdev);
619 mdev_err:
620 kfree(vp_vdpa_mgtdev);
621 return err;
622 }
623
vp_vdpa_remove(struct pci_dev * pdev)624 static void vp_vdpa_remove(struct pci_dev *pdev)
625 {
626 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
627 struct virtio_pci_modern_device *mdev = NULL;
628
629 mdev = vp_vdpa_mgtdev->mdev;
630 vp_modern_remove(mdev);
631 vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
632 kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
633 kfree(mdev);
634 kfree(vp_vdpa_mgtdev);
635 }
636
637 static struct pci_driver vp_vdpa_driver = {
638 .name = "vp-vdpa",
639 .id_table = NULL, /* only dynamic ids */
640 .probe = vp_vdpa_probe,
641 .remove = vp_vdpa_remove,
642 };
643
644 module_pci_driver(vp_vdpa_driver);
645
646 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
647 MODULE_DESCRIPTION("vp-vdpa");
648 MODULE_LICENSE("GPL");
649 MODULE_VERSION("1");
650