1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDPA device simulator core.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <linux/iova.h>
21
22 #include "vdpa_sim.h"
23
24 #define DRV_VERSION "0.1"
25 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC "vDPA Device Simulator core"
27 #define DRV_LICENSE "GPL v2"
28
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
37
38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39 #define VDPASIM_QUEUE_MAX 256
40 #define VDPASIM_VENDOR_ID 0
41
vdpa_to_sim(struct vdpa_device * vdpa)42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
43 {
44 return container_of(vdpa, struct vdpasim, vdpa);
45 }
46
dev_to_sim(struct device * dev)47 static struct vdpasim *dev_to_sim(struct device *dev)
48 {
49 struct vdpa_device *vdpa = dev_to_vdpa(dev);
50
51 return vdpa_to_sim(vdpa);
52 }
53
vdpasim_vq_notify(struct vringh * vring)54 static void vdpasim_vq_notify(struct vringh *vring)
55 {
56 struct vdpasim_virtqueue *vq =
57 container_of(vring, struct vdpasim_virtqueue, vring);
58
59 if (!vq->cb)
60 return;
61
62 vq->cb(vq->private);
63 }
64
vdpasim_queue_ready(struct vdpasim * vdpasim,unsigned int idx)65 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
66 {
67 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
68
69 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
70 VDPASIM_QUEUE_MAX, false,
71 (struct vring_desc *)(uintptr_t)vq->desc_addr,
72 (struct vring_avail *)
73 (uintptr_t)vq->driver_addr,
74 (struct vring_used *)
75 (uintptr_t)vq->device_addr);
76
77 vq->vring.notify = vdpasim_vq_notify;
78 }
79
vdpasim_vq_reset(struct vdpasim * vdpasim,struct vdpasim_virtqueue * vq)80 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
81 struct vdpasim_virtqueue *vq)
82 {
83 vq->ready = false;
84 vq->desc_addr = 0;
85 vq->driver_addr = 0;
86 vq->device_addr = 0;
87 vq->cb = NULL;
88 vq->private = NULL;
89 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
90 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
91
92 vq->vring.notify = NULL;
93 }
94
vdpasim_do_reset(struct vdpasim * vdpasim)95 static void vdpasim_do_reset(struct vdpasim *vdpasim)
96 {
97 int i;
98
99 spin_lock(&vdpasim->iommu_lock);
100
101 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
102 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
103 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
104 &vdpasim->iommu_lock);
105 }
106
107 for (i = 0; i < vdpasim->dev_attr.nas; i++)
108 vhost_iotlb_reset(&vdpasim->iommu[i]);
109
110 spin_unlock(&vdpasim->iommu_lock);
111
112 vdpasim->features = 0;
113 vdpasim->status = 0;
114 ++vdpasim->generation;
115 }
116
dir_to_perm(enum dma_data_direction dir)117 static int dir_to_perm(enum dma_data_direction dir)
118 {
119 int perm = -EFAULT;
120
121 switch (dir) {
122 case DMA_FROM_DEVICE:
123 perm = VHOST_MAP_WO;
124 break;
125 case DMA_TO_DEVICE:
126 perm = VHOST_MAP_RO;
127 break;
128 case DMA_BIDIRECTIONAL:
129 perm = VHOST_MAP_RW;
130 break;
131 default:
132 break;
133 }
134
135 return perm;
136 }
137
vdpasim_map_range(struct vdpasim * vdpasim,phys_addr_t paddr,size_t size,unsigned int perm)138 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
139 size_t size, unsigned int perm)
140 {
141 struct iova *iova;
142 dma_addr_t dma_addr;
143 int ret;
144
145 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
146 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
147 ULONG_MAX - 1, true);
148 if (!iova)
149 return DMA_MAPPING_ERROR;
150
151 dma_addr = iova_dma_addr(&vdpasim->iova, iova);
152
153 spin_lock(&vdpasim->iommu_lock);
154 ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
155 (u64)dma_addr + size - 1, (u64)paddr, perm);
156 spin_unlock(&vdpasim->iommu_lock);
157
158 if (ret) {
159 __free_iova(&vdpasim->iova, iova);
160 return DMA_MAPPING_ERROR;
161 }
162
163 return dma_addr;
164 }
165
vdpasim_unmap_range(struct vdpasim * vdpasim,dma_addr_t dma_addr,size_t size)166 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
167 size_t size)
168 {
169 spin_lock(&vdpasim->iommu_lock);
170 vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
171 (u64)dma_addr + size - 1);
172 spin_unlock(&vdpasim->iommu_lock);
173
174 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
175 }
176
vdpasim_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)177 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
178 unsigned long offset, size_t size,
179 enum dma_data_direction dir,
180 unsigned long attrs)
181 {
182 struct vdpasim *vdpasim = dev_to_sim(dev);
183 phys_addr_t paddr = page_to_phys(page) + offset;
184 int perm = dir_to_perm(dir);
185
186 if (perm < 0)
187 return DMA_MAPPING_ERROR;
188
189 return vdpasim_map_range(vdpasim, paddr, size, perm);
190 }
191
vdpasim_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)192 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
193 size_t size, enum dma_data_direction dir,
194 unsigned long attrs)
195 {
196 struct vdpasim *vdpasim = dev_to_sim(dev);
197
198 vdpasim_unmap_range(vdpasim, dma_addr, size);
199 }
200
vdpasim_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t flag,unsigned long attrs)201 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
202 dma_addr_t *dma_addr, gfp_t flag,
203 unsigned long attrs)
204 {
205 struct vdpasim *vdpasim = dev_to_sim(dev);
206 phys_addr_t paddr;
207 void *addr;
208
209 addr = kmalloc(size, flag);
210 if (!addr) {
211 *dma_addr = DMA_MAPPING_ERROR;
212 return NULL;
213 }
214
215 paddr = virt_to_phys(addr);
216
217 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
218 if (*dma_addr == DMA_MAPPING_ERROR) {
219 kfree(addr);
220 return NULL;
221 }
222
223 return addr;
224 }
225
vdpasim_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_addr,unsigned long attrs)226 static void vdpasim_free_coherent(struct device *dev, size_t size,
227 void *vaddr, dma_addr_t dma_addr,
228 unsigned long attrs)
229 {
230 struct vdpasim *vdpasim = dev_to_sim(dev);
231
232 vdpasim_unmap_range(vdpasim, dma_addr, size);
233
234 kfree(vaddr);
235 }
236
237 static const struct dma_map_ops vdpasim_dma_ops = {
238 .map_page = vdpasim_map_page,
239 .unmap_page = vdpasim_unmap_page,
240 .alloc = vdpasim_alloc_coherent,
241 .free = vdpasim_free_coherent,
242 };
243
244 static const struct vdpa_config_ops vdpasim_config_ops;
245 static const struct vdpa_config_ops vdpasim_batch_config_ops;
246
vdpasim_create(struct vdpasim_dev_attr * dev_attr)247 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
248 {
249 const struct vdpa_config_ops *ops;
250 struct vdpasim *vdpasim;
251 struct device *dev;
252 int i, ret = -ENOMEM;
253
254 if (batch_mapping)
255 ops = &vdpasim_batch_config_ops;
256 else
257 ops = &vdpasim_config_ops;
258
259 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
260 dev_attr->ngroups, dev_attr->nas,
261 dev_attr->name, false);
262 if (IS_ERR(vdpasim)) {
263 ret = PTR_ERR(vdpasim);
264 goto err_alloc;
265 }
266
267 vdpasim->dev_attr = *dev_attr;
268 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
269 spin_lock_init(&vdpasim->lock);
270 spin_lock_init(&vdpasim->iommu_lock);
271
272 dev = &vdpasim->vdpa.dev;
273 dev->dma_mask = &dev->coherent_dma_mask;
274 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
275 goto err_iommu;
276 set_dma_ops(dev, &vdpasim_dma_ops);
277 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
278
279 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
280 if (!vdpasim->config)
281 goto err_iommu;
282
283 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
284 GFP_KERNEL);
285 if (!vdpasim->vqs)
286 goto err_iommu;
287
288 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
289 sizeof(*vdpasim->iommu), GFP_KERNEL);
290 if (!vdpasim->iommu)
291 goto err_iommu;
292
293 for (i = 0; i < vdpasim->dev_attr.nas; i++)
294 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
295
296 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
297 if (!vdpasim->buffer)
298 goto err_iommu;
299
300 for (i = 0; i < dev_attr->nvqs; i++)
301 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
302 &vdpasim->iommu_lock);
303
304 ret = iova_cache_get();
305 if (ret)
306 goto err_iommu;
307
308 /* For simplicity we use an IOVA allocator with byte granularity */
309 init_iova_domain(&vdpasim->iova, 1, 0);
310
311 vdpasim->vdpa.dma_dev = dev;
312
313 return vdpasim;
314
315 err_iommu:
316 put_device(dev);
317 err_alloc:
318 return ERR_PTR(ret);
319 }
320 EXPORT_SYMBOL_GPL(vdpasim_create);
321
vdpasim_set_vq_address(struct vdpa_device * vdpa,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)322 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
323 u64 desc_area, u64 driver_area,
324 u64 device_area)
325 {
326 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
327 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
328
329 vq->desc_addr = desc_area;
330 vq->driver_addr = driver_area;
331 vq->device_addr = device_area;
332
333 return 0;
334 }
335
vdpasim_set_vq_num(struct vdpa_device * vdpa,u16 idx,u32 num)336 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
337 {
338 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
339 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
340
341 vq->num = num;
342 }
343
vdpasim_kick_vq(struct vdpa_device * vdpa,u16 idx)344 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
345 {
346 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
347 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
348
349 if (vq->ready)
350 schedule_work(&vdpasim->work);
351 }
352
vdpasim_set_vq_cb(struct vdpa_device * vdpa,u16 idx,struct vdpa_callback * cb)353 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
354 struct vdpa_callback *cb)
355 {
356 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
357 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
358
359 vq->cb = cb->callback;
360 vq->private = cb->private;
361 }
362
vdpasim_set_vq_ready(struct vdpa_device * vdpa,u16 idx,bool ready)363 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
364 {
365 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
366 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
367 bool old_ready;
368
369 spin_lock(&vdpasim->lock);
370 old_ready = vq->ready;
371 vq->ready = ready;
372 if (vq->ready && !old_ready) {
373 vdpasim_queue_ready(vdpasim, idx);
374 }
375 spin_unlock(&vdpasim->lock);
376 }
377
vdpasim_get_vq_ready(struct vdpa_device * vdpa,u16 idx)378 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
379 {
380 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
381 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
382
383 return vq->ready;
384 }
385
vdpasim_set_vq_state(struct vdpa_device * vdpa,u16 idx,const struct vdpa_vq_state * state)386 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
387 const struct vdpa_vq_state *state)
388 {
389 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
390 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
391 struct vringh *vrh = &vq->vring;
392
393 spin_lock(&vdpasim->lock);
394 vrh->last_avail_idx = state->split.avail_index;
395 spin_unlock(&vdpasim->lock);
396
397 return 0;
398 }
399
vdpasim_get_vq_state(struct vdpa_device * vdpa,u16 idx,struct vdpa_vq_state * state)400 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
401 struct vdpa_vq_state *state)
402 {
403 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
404 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
405 struct vringh *vrh = &vq->vring;
406
407 state->split.avail_index = vrh->last_avail_idx;
408 return 0;
409 }
410
vdpasim_get_vq_align(struct vdpa_device * vdpa)411 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
412 {
413 return VDPASIM_QUEUE_ALIGN;
414 }
415
vdpasim_get_vq_group(struct vdpa_device * vdpa,u16 idx)416 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
417 {
418 /* RX and TX belongs to group 0, CVQ belongs to group 1 */
419 if (idx == 2)
420 return 1;
421 else
422 return 0;
423 }
424
vdpasim_get_device_features(struct vdpa_device * vdpa)425 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
426 {
427 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
428
429 return vdpasim->dev_attr.supported_features;
430 }
431
vdpasim_set_driver_features(struct vdpa_device * vdpa,u64 features)432 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
433 {
434 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
435
436 /* DMA mapping must be done by driver */
437 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
438 return -EINVAL;
439
440 vdpasim->features = features & vdpasim->dev_attr.supported_features;
441
442 return 0;
443 }
444
vdpasim_get_driver_features(struct vdpa_device * vdpa)445 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
446 {
447 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
448
449 return vdpasim->features;
450 }
451
vdpasim_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)452 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
453 struct vdpa_callback *cb)
454 {
455 /* We don't support config interrupt */
456 }
457
vdpasim_get_vq_num_max(struct vdpa_device * vdpa)458 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
459 {
460 return VDPASIM_QUEUE_MAX;
461 }
462
vdpasim_get_device_id(struct vdpa_device * vdpa)463 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
464 {
465 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
466
467 return vdpasim->dev_attr.id;
468 }
469
vdpasim_get_vendor_id(struct vdpa_device * vdpa)470 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
471 {
472 return VDPASIM_VENDOR_ID;
473 }
474
vdpasim_get_status(struct vdpa_device * vdpa)475 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
476 {
477 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
478 u8 status;
479
480 spin_lock(&vdpasim->lock);
481 status = vdpasim->status;
482 spin_unlock(&vdpasim->lock);
483
484 return status;
485 }
486
vdpasim_set_status(struct vdpa_device * vdpa,u8 status)487 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
488 {
489 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
490
491 spin_lock(&vdpasim->lock);
492 vdpasim->status = status;
493 spin_unlock(&vdpasim->lock);
494 }
495
vdpasim_reset(struct vdpa_device * vdpa)496 static int vdpasim_reset(struct vdpa_device *vdpa)
497 {
498 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
499
500 spin_lock(&vdpasim->lock);
501 vdpasim->status = 0;
502 vdpasim_do_reset(vdpasim);
503 spin_unlock(&vdpasim->lock);
504
505 return 0;
506 }
507
vdpasim_get_config_size(struct vdpa_device * vdpa)508 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
509 {
510 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
511
512 return vdpasim->dev_attr.config_size;
513 }
514
vdpasim_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)515 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
516 void *buf, unsigned int len)
517 {
518 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
519
520 if (offset + len > vdpasim->dev_attr.config_size)
521 return;
522
523 if (vdpasim->dev_attr.get_config)
524 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
525
526 memcpy(buf, vdpasim->config + offset, len);
527 }
528
vdpasim_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)529 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
530 const void *buf, unsigned int len)
531 {
532 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
533
534 if (offset + len > vdpasim->dev_attr.config_size)
535 return;
536
537 memcpy(vdpasim->config + offset, buf, len);
538
539 if (vdpasim->dev_attr.set_config)
540 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
541 }
542
vdpasim_get_generation(struct vdpa_device * vdpa)543 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
544 {
545 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
546
547 return vdpasim->generation;
548 }
549
vdpasim_get_iova_range(struct vdpa_device * vdpa)550 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
551 {
552 struct vdpa_iova_range range = {
553 .first = 0ULL,
554 .last = ULLONG_MAX,
555 };
556
557 return range;
558 }
559
vdpasim_set_group_asid(struct vdpa_device * vdpa,unsigned int group,unsigned int asid)560 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
561 unsigned int asid)
562 {
563 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
564 struct vhost_iotlb *iommu;
565 int i;
566
567 if (group > vdpasim->dev_attr.ngroups)
568 return -EINVAL;
569
570 if (asid >= vdpasim->dev_attr.nas)
571 return -EINVAL;
572
573 iommu = &vdpasim->iommu[asid];
574
575 spin_lock(&vdpasim->lock);
576
577 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
578 if (vdpasim_get_vq_group(vdpa, i) == group)
579 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
580 &vdpasim->iommu_lock);
581
582 spin_unlock(&vdpasim->lock);
583
584 return 0;
585 }
586
vdpasim_set_map(struct vdpa_device * vdpa,unsigned int asid,struct vhost_iotlb * iotlb)587 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
588 struct vhost_iotlb *iotlb)
589 {
590 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
591 struct vhost_iotlb_map *map;
592 struct vhost_iotlb *iommu;
593 u64 start = 0ULL, last = 0ULL - 1;
594 int ret;
595
596 if (asid >= vdpasim->dev_attr.nas)
597 return -EINVAL;
598
599 spin_lock(&vdpasim->iommu_lock);
600
601 iommu = &vdpasim->iommu[asid];
602 vhost_iotlb_reset(iommu);
603
604 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
605 map = vhost_iotlb_itree_next(map, start, last)) {
606 ret = vhost_iotlb_add_range(iommu, map->start,
607 map->last, map->addr, map->perm);
608 if (ret)
609 goto err;
610 }
611 spin_unlock(&vdpasim->iommu_lock);
612 return 0;
613
614 err:
615 vhost_iotlb_reset(iommu);
616 spin_unlock(&vdpasim->iommu_lock);
617 return ret;
618 }
619
vdpasim_dma_map(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)620 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
621 u64 iova, u64 size,
622 u64 pa, u32 perm, void *opaque)
623 {
624 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
625 int ret;
626
627 if (asid >= vdpasim->dev_attr.nas)
628 return -EINVAL;
629
630 spin_lock(&vdpasim->iommu_lock);
631 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
632 iova + size - 1, pa, perm, opaque);
633 spin_unlock(&vdpasim->iommu_lock);
634
635 return ret;
636 }
637
vdpasim_dma_unmap(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size)638 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
639 u64 iova, u64 size)
640 {
641 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
642
643 if (asid >= vdpasim->dev_attr.nas)
644 return -EINVAL;
645
646 spin_lock(&vdpasim->iommu_lock);
647 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
648 spin_unlock(&vdpasim->iommu_lock);
649
650 return 0;
651 }
652
vdpasim_free(struct vdpa_device * vdpa)653 static void vdpasim_free(struct vdpa_device *vdpa)
654 {
655 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
656 int i;
657
658 cancel_work_sync(&vdpasim->work);
659
660 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
661 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
662 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
663 }
664
665 if (vdpa_get_dma_dev(vdpa)) {
666 put_iova_domain(&vdpasim->iova);
667 iova_cache_put();
668 }
669
670 kvfree(vdpasim->buffer);
671 vhost_iotlb_free(vdpasim->iommu);
672 kfree(vdpasim->vqs);
673 kfree(vdpasim->config);
674 }
675
676 static const struct vdpa_config_ops vdpasim_config_ops = {
677 .set_vq_address = vdpasim_set_vq_address,
678 .set_vq_num = vdpasim_set_vq_num,
679 .kick_vq = vdpasim_kick_vq,
680 .set_vq_cb = vdpasim_set_vq_cb,
681 .set_vq_ready = vdpasim_set_vq_ready,
682 .get_vq_ready = vdpasim_get_vq_ready,
683 .set_vq_state = vdpasim_set_vq_state,
684 .get_vq_state = vdpasim_get_vq_state,
685 .get_vq_align = vdpasim_get_vq_align,
686 .get_vq_group = vdpasim_get_vq_group,
687 .get_device_features = vdpasim_get_device_features,
688 .set_driver_features = vdpasim_set_driver_features,
689 .get_driver_features = vdpasim_get_driver_features,
690 .set_config_cb = vdpasim_set_config_cb,
691 .get_vq_num_max = vdpasim_get_vq_num_max,
692 .get_device_id = vdpasim_get_device_id,
693 .get_vendor_id = vdpasim_get_vendor_id,
694 .get_status = vdpasim_get_status,
695 .set_status = vdpasim_set_status,
696 .reset = vdpasim_reset,
697 .get_config_size = vdpasim_get_config_size,
698 .get_config = vdpasim_get_config,
699 .set_config = vdpasim_set_config,
700 .get_generation = vdpasim_get_generation,
701 .get_iova_range = vdpasim_get_iova_range,
702 .set_group_asid = vdpasim_set_group_asid,
703 .dma_map = vdpasim_dma_map,
704 .dma_unmap = vdpasim_dma_unmap,
705 .free = vdpasim_free,
706 };
707
708 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
709 .set_vq_address = vdpasim_set_vq_address,
710 .set_vq_num = vdpasim_set_vq_num,
711 .kick_vq = vdpasim_kick_vq,
712 .set_vq_cb = vdpasim_set_vq_cb,
713 .set_vq_ready = vdpasim_set_vq_ready,
714 .get_vq_ready = vdpasim_get_vq_ready,
715 .set_vq_state = vdpasim_set_vq_state,
716 .get_vq_state = vdpasim_get_vq_state,
717 .get_vq_align = vdpasim_get_vq_align,
718 .get_vq_group = vdpasim_get_vq_group,
719 .get_device_features = vdpasim_get_device_features,
720 .set_driver_features = vdpasim_set_driver_features,
721 .get_driver_features = vdpasim_get_driver_features,
722 .set_config_cb = vdpasim_set_config_cb,
723 .get_vq_num_max = vdpasim_get_vq_num_max,
724 .get_device_id = vdpasim_get_device_id,
725 .get_vendor_id = vdpasim_get_vendor_id,
726 .get_status = vdpasim_get_status,
727 .set_status = vdpasim_set_status,
728 .reset = vdpasim_reset,
729 .get_config_size = vdpasim_get_config_size,
730 .get_config = vdpasim_get_config,
731 .set_config = vdpasim_set_config,
732 .get_generation = vdpasim_get_generation,
733 .get_iova_range = vdpasim_get_iova_range,
734 .set_group_asid = vdpasim_set_group_asid,
735 .set_map = vdpasim_set_map,
736 .free = vdpasim_free,
737 };
738
739 MODULE_VERSION(DRV_VERSION);
740 MODULE_LICENSE(DRV_LICENSE);
741 MODULE_AUTHOR(DRV_AUTHOR);
742 MODULE_DESCRIPTION(DRV_DESC);
743