1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDPA device simulator core.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <linux/iova.h>
21 #include <uapi/linux/vdpa.h>
22
23 #include "vdpa_sim.h"
24
25 #define DRV_VERSION "0.1"
26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
27 #define DRV_DESC "vDPA Device Simulator core"
28 #define DRV_LICENSE "GPL v2"
29
30 static int batch_mapping = 1;
31 module_param(batch_mapping, int, 0444);
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33
34 static int max_iotlb_entries = 2048;
35 module_param(max_iotlb_entries, int, 0444);
36 MODULE_PARM_DESC(max_iotlb_entries,
37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
38
39 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
40 #define VDPASIM_QUEUE_MAX 256
41 #define VDPASIM_VENDOR_ID 0
42
vdpa_to_sim(struct vdpa_device * vdpa)43 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
44 {
45 return container_of(vdpa, struct vdpasim, vdpa);
46 }
47
dev_to_sim(struct device * dev)48 static struct vdpasim *dev_to_sim(struct device *dev)
49 {
50 struct vdpa_device *vdpa = dev_to_vdpa(dev);
51
52 return vdpa_to_sim(vdpa);
53 }
54
vdpasim_vq_notify(struct vringh * vring)55 static void vdpasim_vq_notify(struct vringh *vring)
56 {
57 struct vdpasim_virtqueue *vq =
58 container_of(vring, struct vdpasim_virtqueue, vring);
59
60 if (!vq->cb)
61 return;
62
63 vq->cb(vq->private);
64 }
65
vdpasim_queue_ready(struct vdpasim * vdpasim,unsigned int idx)66 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
67 {
68 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
69
70 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
71 (struct vring_desc *)(uintptr_t)vq->desc_addr,
72 (struct vring_avail *)
73 (uintptr_t)vq->driver_addr,
74 (struct vring_used *)
75 (uintptr_t)vq->device_addr);
76
77 vq->vring.notify = vdpasim_vq_notify;
78 }
79
vdpasim_vq_reset(struct vdpasim * vdpasim,struct vdpasim_virtqueue * vq)80 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
81 struct vdpasim_virtqueue *vq)
82 {
83 vq->ready = false;
84 vq->desc_addr = 0;
85 vq->driver_addr = 0;
86 vq->device_addr = 0;
87 vq->cb = NULL;
88 vq->private = NULL;
89 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
90 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
91
92 vq->vring.notify = NULL;
93 }
94
vdpasim_do_reset(struct vdpasim * vdpasim)95 static void vdpasim_do_reset(struct vdpasim *vdpasim)
96 {
97 int i;
98
99 spin_lock(&vdpasim->iommu_lock);
100
101 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
102 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
103 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
104 &vdpasim->iommu_lock);
105 }
106
107 for (i = 0; i < vdpasim->dev_attr.nas; i++)
108 vhost_iotlb_reset(&vdpasim->iommu[i]);
109
110 vdpasim->running = true;
111 spin_unlock(&vdpasim->iommu_lock);
112
113 vdpasim->features = 0;
114 vdpasim->status = 0;
115 ++vdpasim->generation;
116 }
117
dir_to_perm(enum dma_data_direction dir)118 static int dir_to_perm(enum dma_data_direction dir)
119 {
120 int perm = -EFAULT;
121
122 switch (dir) {
123 case DMA_FROM_DEVICE:
124 perm = VHOST_MAP_WO;
125 break;
126 case DMA_TO_DEVICE:
127 perm = VHOST_MAP_RO;
128 break;
129 case DMA_BIDIRECTIONAL:
130 perm = VHOST_MAP_RW;
131 break;
132 default:
133 break;
134 }
135
136 return perm;
137 }
138
vdpasim_map_range(struct vdpasim * vdpasim,phys_addr_t paddr,size_t size,unsigned int perm)139 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
140 size_t size, unsigned int perm)
141 {
142 struct iova *iova;
143 dma_addr_t dma_addr;
144 int ret;
145
146 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
147 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
148 ULONG_MAX - 1, true);
149 if (!iova)
150 return DMA_MAPPING_ERROR;
151
152 dma_addr = iova_dma_addr(&vdpasim->iova, iova);
153
154 spin_lock(&vdpasim->iommu_lock);
155 ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
156 (u64)dma_addr + size - 1, (u64)paddr, perm);
157 spin_unlock(&vdpasim->iommu_lock);
158
159 if (ret) {
160 __free_iova(&vdpasim->iova, iova);
161 return DMA_MAPPING_ERROR;
162 }
163
164 return dma_addr;
165 }
166
vdpasim_unmap_range(struct vdpasim * vdpasim,dma_addr_t dma_addr,size_t size)167 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
168 size_t size)
169 {
170 spin_lock(&vdpasim->iommu_lock);
171 vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
172 (u64)dma_addr + size - 1);
173 spin_unlock(&vdpasim->iommu_lock);
174
175 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
176 }
177
vdpasim_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)178 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
179 unsigned long offset, size_t size,
180 enum dma_data_direction dir,
181 unsigned long attrs)
182 {
183 struct vdpasim *vdpasim = dev_to_sim(dev);
184 phys_addr_t paddr = page_to_phys(page) + offset;
185 int perm = dir_to_perm(dir);
186
187 if (perm < 0)
188 return DMA_MAPPING_ERROR;
189
190 return vdpasim_map_range(vdpasim, paddr, size, perm);
191 }
192
vdpasim_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)193 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
194 size_t size, enum dma_data_direction dir,
195 unsigned long attrs)
196 {
197 struct vdpasim *vdpasim = dev_to_sim(dev);
198
199 vdpasim_unmap_range(vdpasim, dma_addr, size);
200 }
201
vdpasim_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t flag,unsigned long attrs)202 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
203 dma_addr_t *dma_addr, gfp_t flag,
204 unsigned long attrs)
205 {
206 struct vdpasim *vdpasim = dev_to_sim(dev);
207 phys_addr_t paddr;
208 void *addr;
209
210 addr = kmalloc(size, flag);
211 if (!addr) {
212 *dma_addr = DMA_MAPPING_ERROR;
213 return NULL;
214 }
215
216 paddr = virt_to_phys(addr);
217
218 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
219 if (*dma_addr == DMA_MAPPING_ERROR) {
220 kfree(addr);
221 return NULL;
222 }
223
224 return addr;
225 }
226
vdpasim_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_addr,unsigned long attrs)227 static void vdpasim_free_coherent(struct device *dev, size_t size,
228 void *vaddr, dma_addr_t dma_addr,
229 unsigned long attrs)
230 {
231 struct vdpasim *vdpasim = dev_to_sim(dev);
232
233 vdpasim_unmap_range(vdpasim, dma_addr, size);
234
235 kfree(vaddr);
236 }
237
238 static const struct dma_map_ops vdpasim_dma_ops = {
239 .map_page = vdpasim_map_page,
240 .unmap_page = vdpasim_unmap_page,
241 .alloc = vdpasim_alloc_coherent,
242 .free = vdpasim_free_coherent,
243 };
244
245 static const struct vdpa_config_ops vdpasim_config_ops;
246 static const struct vdpa_config_ops vdpasim_batch_config_ops;
247
vdpasim_create(struct vdpasim_dev_attr * dev_attr,const struct vdpa_dev_set_config * config)248 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
249 const struct vdpa_dev_set_config *config)
250 {
251 const struct vdpa_config_ops *ops;
252 struct vdpasim *vdpasim;
253 struct device *dev;
254 int i, ret = -ENOMEM;
255
256 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
257 if (config->device_features &
258 ~dev_attr->supported_features)
259 return ERR_PTR(-EINVAL);
260 dev_attr->supported_features =
261 config->device_features;
262 }
263
264 if (batch_mapping)
265 ops = &vdpasim_batch_config_ops;
266 else
267 ops = &vdpasim_config_ops;
268
269 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
270 dev_attr->ngroups, dev_attr->nas,
271 dev_attr->name, false);
272 if (IS_ERR(vdpasim)) {
273 ret = PTR_ERR(vdpasim);
274 goto err_alloc;
275 }
276
277 vdpasim->dev_attr = *dev_attr;
278 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
279 spin_lock_init(&vdpasim->lock);
280 spin_lock_init(&vdpasim->iommu_lock);
281
282 dev = &vdpasim->vdpa.dev;
283 dev->dma_mask = &dev->coherent_dma_mask;
284 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
285 goto err_iommu;
286 set_dma_ops(dev, &vdpasim_dma_ops);
287 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
288
289 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
290 if (!vdpasim->config)
291 goto err_iommu;
292
293 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
294 GFP_KERNEL);
295 if (!vdpasim->vqs)
296 goto err_iommu;
297
298 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
299 sizeof(*vdpasim->iommu), GFP_KERNEL);
300 if (!vdpasim->iommu)
301 goto err_iommu;
302
303 for (i = 0; i < vdpasim->dev_attr.nas; i++)
304 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
305
306 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
307 if (!vdpasim->buffer)
308 goto err_iommu;
309
310 for (i = 0; i < dev_attr->nvqs; i++)
311 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
312 &vdpasim->iommu_lock);
313
314 ret = iova_cache_get();
315 if (ret)
316 goto err_iommu;
317
318 /* For simplicity we use an IOVA allocator with byte granularity */
319 init_iova_domain(&vdpasim->iova, 1, 0);
320
321 vdpasim->vdpa.dma_dev = dev;
322
323 return vdpasim;
324
325 err_iommu:
326 put_device(dev);
327 err_alloc:
328 return ERR_PTR(ret);
329 }
330 EXPORT_SYMBOL_GPL(vdpasim_create);
331
vdpasim_set_vq_address(struct vdpa_device * vdpa,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)332 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
333 u64 desc_area, u64 driver_area,
334 u64 device_area)
335 {
336 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
337 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
338
339 vq->desc_addr = desc_area;
340 vq->driver_addr = driver_area;
341 vq->device_addr = device_area;
342
343 return 0;
344 }
345
vdpasim_set_vq_num(struct vdpa_device * vdpa,u16 idx,u32 num)346 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
347 {
348 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
349 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
350
351 vq->num = num;
352 }
353
vdpasim_kick_vq(struct vdpa_device * vdpa,u16 idx)354 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
355 {
356 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
357 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
358
359 if (vq->ready)
360 schedule_work(&vdpasim->work);
361 }
362
vdpasim_set_vq_cb(struct vdpa_device * vdpa,u16 idx,struct vdpa_callback * cb)363 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
364 struct vdpa_callback *cb)
365 {
366 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
367 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
368
369 vq->cb = cb->callback;
370 vq->private = cb->private;
371 }
372
vdpasim_set_vq_ready(struct vdpa_device * vdpa,u16 idx,bool ready)373 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
374 {
375 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
376 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
377 bool old_ready;
378
379 spin_lock(&vdpasim->lock);
380 old_ready = vq->ready;
381 vq->ready = ready;
382 if (vq->ready && !old_ready) {
383 vdpasim_queue_ready(vdpasim, idx);
384 }
385 spin_unlock(&vdpasim->lock);
386 }
387
vdpasim_get_vq_ready(struct vdpa_device * vdpa,u16 idx)388 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
389 {
390 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
391 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
392
393 return vq->ready;
394 }
395
vdpasim_set_vq_state(struct vdpa_device * vdpa,u16 idx,const struct vdpa_vq_state * state)396 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
397 const struct vdpa_vq_state *state)
398 {
399 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
400 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
401 struct vringh *vrh = &vq->vring;
402
403 spin_lock(&vdpasim->lock);
404 vrh->last_avail_idx = state->split.avail_index;
405 spin_unlock(&vdpasim->lock);
406
407 return 0;
408 }
409
vdpasim_get_vq_state(struct vdpa_device * vdpa,u16 idx,struct vdpa_vq_state * state)410 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
411 struct vdpa_vq_state *state)
412 {
413 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
414 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
415 struct vringh *vrh = &vq->vring;
416
417 state->split.avail_index = vrh->last_avail_idx;
418 return 0;
419 }
420
vdpasim_get_vq_align(struct vdpa_device * vdpa)421 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
422 {
423 return VDPASIM_QUEUE_ALIGN;
424 }
425
vdpasim_get_vq_group(struct vdpa_device * vdpa,u16 idx)426 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
427 {
428 /* RX and TX belongs to group 0, CVQ belongs to group 1 */
429 if (idx == 2)
430 return 1;
431 else
432 return 0;
433 }
434
vdpasim_get_device_features(struct vdpa_device * vdpa)435 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
436 {
437 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
438
439 return vdpasim->dev_attr.supported_features;
440 }
441
vdpasim_set_driver_features(struct vdpa_device * vdpa,u64 features)442 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
443 {
444 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
445
446 /* DMA mapping must be done by driver */
447 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
448 return -EINVAL;
449
450 vdpasim->features = features & vdpasim->dev_attr.supported_features;
451
452 return 0;
453 }
454
vdpasim_get_driver_features(struct vdpa_device * vdpa)455 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
456 {
457 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
458
459 return vdpasim->features;
460 }
461
vdpasim_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)462 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
463 struct vdpa_callback *cb)
464 {
465 /* We don't support config interrupt */
466 }
467
vdpasim_get_vq_num_max(struct vdpa_device * vdpa)468 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
469 {
470 return VDPASIM_QUEUE_MAX;
471 }
472
vdpasim_get_device_id(struct vdpa_device * vdpa)473 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
474 {
475 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
476
477 return vdpasim->dev_attr.id;
478 }
479
vdpasim_get_vendor_id(struct vdpa_device * vdpa)480 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
481 {
482 return VDPASIM_VENDOR_ID;
483 }
484
vdpasim_get_status(struct vdpa_device * vdpa)485 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
486 {
487 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
488 u8 status;
489
490 spin_lock(&vdpasim->lock);
491 status = vdpasim->status;
492 spin_unlock(&vdpasim->lock);
493
494 return status;
495 }
496
vdpasim_set_status(struct vdpa_device * vdpa,u8 status)497 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
498 {
499 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
500
501 spin_lock(&vdpasim->lock);
502 vdpasim->status = status;
503 spin_unlock(&vdpasim->lock);
504 }
505
vdpasim_reset(struct vdpa_device * vdpa)506 static int vdpasim_reset(struct vdpa_device *vdpa)
507 {
508 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
509
510 spin_lock(&vdpasim->lock);
511 vdpasim->status = 0;
512 vdpasim_do_reset(vdpasim);
513 spin_unlock(&vdpasim->lock);
514
515 return 0;
516 }
517
vdpasim_suspend(struct vdpa_device * vdpa)518 static int vdpasim_suspend(struct vdpa_device *vdpa)
519 {
520 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
521
522 spin_lock(&vdpasim->lock);
523 vdpasim->running = false;
524 spin_unlock(&vdpasim->lock);
525
526 return 0;
527 }
528
vdpasim_get_config_size(struct vdpa_device * vdpa)529 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
530 {
531 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
532
533 return vdpasim->dev_attr.config_size;
534 }
535
vdpasim_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)536 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
537 void *buf, unsigned int len)
538 {
539 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
540
541 if (offset + len > vdpasim->dev_attr.config_size)
542 return;
543
544 if (vdpasim->dev_attr.get_config)
545 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
546
547 memcpy(buf, vdpasim->config + offset, len);
548 }
549
vdpasim_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)550 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
551 const void *buf, unsigned int len)
552 {
553 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
554
555 if (offset + len > vdpasim->dev_attr.config_size)
556 return;
557
558 memcpy(vdpasim->config + offset, buf, len);
559
560 if (vdpasim->dev_attr.set_config)
561 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
562 }
563
vdpasim_get_generation(struct vdpa_device * vdpa)564 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
565 {
566 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
567
568 return vdpasim->generation;
569 }
570
vdpasim_get_iova_range(struct vdpa_device * vdpa)571 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
572 {
573 struct vdpa_iova_range range = {
574 .first = 0ULL,
575 .last = ULLONG_MAX,
576 };
577
578 return range;
579 }
580
vdpasim_set_group_asid(struct vdpa_device * vdpa,unsigned int group,unsigned int asid)581 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
582 unsigned int asid)
583 {
584 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
585 struct vhost_iotlb *iommu;
586 int i;
587
588 if (group > vdpasim->dev_attr.ngroups)
589 return -EINVAL;
590
591 if (asid >= vdpasim->dev_attr.nas)
592 return -EINVAL;
593
594 iommu = &vdpasim->iommu[asid];
595
596 spin_lock(&vdpasim->lock);
597
598 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
599 if (vdpasim_get_vq_group(vdpa, i) == group)
600 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
601 &vdpasim->iommu_lock);
602
603 spin_unlock(&vdpasim->lock);
604
605 return 0;
606 }
607
vdpasim_set_map(struct vdpa_device * vdpa,unsigned int asid,struct vhost_iotlb * iotlb)608 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
609 struct vhost_iotlb *iotlb)
610 {
611 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
612 struct vhost_iotlb_map *map;
613 struct vhost_iotlb *iommu;
614 u64 start = 0ULL, last = 0ULL - 1;
615 int ret;
616
617 if (asid >= vdpasim->dev_attr.nas)
618 return -EINVAL;
619
620 spin_lock(&vdpasim->iommu_lock);
621
622 iommu = &vdpasim->iommu[asid];
623 vhost_iotlb_reset(iommu);
624
625 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
626 map = vhost_iotlb_itree_next(map, start, last)) {
627 ret = vhost_iotlb_add_range(iommu, map->start,
628 map->last, map->addr, map->perm);
629 if (ret)
630 goto err;
631 }
632 spin_unlock(&vdpasim->iommu_lock);
633 return 0;
634
635 err:
636 vhost_iotlb_reset(iommu);
637 spin_unlock(&vdpasim->iommu_lock);
638 return ret;
639 }
640
vdpasim_dma_map(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)641 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
642 u64 iova, u64 size,
643 u64 pa, u32 perm, void *opaque)
644 {
645 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
646 int ret;
647
648 if (asid >= vdpasim->dev_attr.nas)
649 return -EINVAL;
650
651 spin_lock(&vdpasim->iommu_lock);
652 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
653 iova + size - 1, pa, perm, opaque);
654 spin_unlock(&vdpasim->iommu_lock);
655
656 return ret;
657 }
658
vdpasim_dma_unmap(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size)659 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
660 u64 iova, u64 size)
661 {
662 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
663
664 if (asid >= vdpasim->dev_attr.nas)
665 return -EINVAL;
666
667 spin_lock(&vdpasim->iommu_lock);
668 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
669 spin_unlock(&vdpasim->iommu_lock);
670
671 return 0;
672 }
673
vdpasim_free(struct vdpa_device * vdpa)674 static void vdpasim_free(struct vdpa_device *vdpa)
675 {
676 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
677 int i;
678
679 cancel_work_sync(&vdpasim->work);
680
681 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
682 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
683 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
684 }
685
686 if (vdpa_get_dma_dev(vdpa)) {
687 put_iova_domain(&vdpasim->iova);
688 iova_cache_put();
689 }
690
691 kvfree(vdpasim->buffer);
692 for (i = 0; i < vdpasim->dev_attr.nas; i++)
693 vhost_iotlb_reset(&vdpasim->iommu[i]);
694 kfree(vdpasim->iommu);
695 kfree(vdpasim->vqs);
696 kfree(vdpasim->config);
697 }
698
699 static const struct vdpa_config_ops vdpasim_config_ops = {
700 .set_vq_address = vdpasim_set_vq_address,
701 .set_vq_num = vdpasim_set_vq_num,
702 .kick_vq = vdpasim_kick_vq,
703 .set_vq_cb = vdpasim_set_vq_cb,
704 .set_vq_ready = vdpasim_set_vq_ready,
705 .get_vq_ready = vdpasim_get_vq_ready,
706 .set_vq_state = vdpasim_set_vq_state,
707 .get_vq_state = vdpasim_get_vq_state,
708 .get_vq_align = vdpasim_get_vq_align,
709 .get_vq_group = vdpasim_get_vq_group,
710 .get_device_features = vdpasim_get_device_features,
711 .set_driver_features = vdpasim_set_driver_features,
712 .get_driver_features = vdpasim_get_driver_features,
713 .set_config_cb = vdpasim_set_config_cb,
714 .get_vq_num_max = vdpasim_get_vq_num_max,
715 .get_device_id = vdpasim_get_device_id,
716 .get_vendor_id = vdpasim_get_vendor_id,
717 .get_status = vdpasim_get_status,
718 .set_status = vdpasim_set_status,
719 .reset = vdpasim_reset,
720 .suspend = vdpasim_suspend,
721 .get_config_size = vdpasim_get_config_size,
722 .get_config = vdpasim_get_config,
723 .set_config = vdpasim_set_config,
724 .get_generation = vdpasim_get_generation,
725 .get_iova_range = vdpasim_get_iova_range,
726 .set_group_asid = vdpasim_set_group_asid,
727 .dma_map = vdpasim_dma_map,
728 .dma_unmap = vdpasim_dma_unmap,
729 .free = vdpasim_free,
730 };
731
732 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
733 .set_vq_address = vdpasim_set_vq_address,
734 .set_vq_num = vdpasim_set_vq_num,
735 .kick_vq = vdpasim_kick_vq,
736 .set_vq_cb = vdpasim_set_vq_cb,
737 .set_vq_ready = vdpasim_set_vq_ready,
738 .get_vq_ready = vdpasim_get_vq_ready,
739 .set_vq_state = vdpasim_set_vq_state,
740 .get_vq_state = vdpasim_get_vq_state,
741 .get_vq_align = vdpasim_get_vq_align,
742 .get_vq_group = vdpasim_get_vq_group,
743 .get_device_features = vdpasim_get_device_features,
744 .set_driver_features = vdpasim_set_driver_features,
745 .get_driver_features = vdpasim_get_driver_features,
746 .set_config_cb = vdpasim_set_config_cb,
747 .get_vq_num_max = vdpasim_get_vq_num_max,
748 .get_device_id = vdpasim_get_device_id,
749 .get_vendor_id = vdpasim_get_vendor_id,
750 .get_status = vdpasim_get_status,
751 .set_status = vdpasim_set_status,
752 .reset = vdpasim_reset,
753 .suspend = vdpasim_suspend,
754 .get_config_size = vdpasim_get_config_size,
755 .get_config = vdpasim_get_config,
756 .set_config = vdpasim_set_config,
757 .get_generation = vdpasim_get_generation,
758 .get_iova_range = vdpasim_get_iova_range,
759 .set_group_asid = vdpasim_set_group_asid,
760 .set_map = vdpasim_set_map,
761 .free = vdpasim_free,
762 };
763
764 MODULE_VERSION(DRV_VERSION);
765 MODULE_LICENSE(DRV_LICENSE);
766 MODULE_AUTHOR(DRV_AUTHOR);
767 MODULE_DESCRIPTION(DRV_DESC);
768