1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <linux/uaccess.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/cputype.h>
13 #include "vgic.h"
14
15 /* common helpers */
16
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18 phys_addr_t addr, phys_addr_t alignment,
19 phys_addr_t size)
20 {
21 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22 return -EEXIST;
23
24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25 return -EINVAL;
26
27 if (addr + size < addr)
28 return -EINVAL;
29
30 if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
31 return -E2BIG;
32
33 return 0;
34 }
35
vgic_check_type(struct kvm * kvm,int type_needed)36 static int vgic_check_type(struct kvm *kvm, int type_needed)
37 {
38 if (kvm->arch.vgic.vgic_model != type_needed)
39 return -ENODEV;
40 else
41 return 0;
42 }
43
kvm_set_legacy_vgic_v2_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)44 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
45 {
46 struct vgic_dist *vgic = &kvm->arch.vgic;
47 int r;
48
49 mutex_lock(&kvm->lock);
50 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
51 case KVM_VGIC_V2_ADDR_TYPE_DIST:
52 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
53 if (!r)
54 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
55 SZ_4K, KVM_VGIC_V2_DIST_SIZE);
56 if (!r)
57 vgic->vgic_dist_base = dev_addr->addr;
58 break;
59 case KVM_VGIC_V2_ADDR_TYPE_CPU:
60 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
61 if (!r)
62 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
63 SZ_4K, KVM_VGIC_V2_CPU_SIZE);
64 if (!r)
65 vgic->vgic_cpu_base = dev_addr->addr;
66 break;
67 default:
68 r = -ENODEV;
69 }
70
71 mutex_unlock(&kvm->lock);
72
73 return r;
74 }
75
76 /**
77 * kvm_vgic_addr - set or get vgic VM base addresses
78 * @kvm: pointer to the vm struct
79 * @attr: pointer to the attribute being retrieved/updated
80 * @write: if true set the address in the VM address space, if false read the
81 * address
82 *
83 * Set or get the vgic base addresses for the distributor and the virtual CPU
84 * interface in the VM physical address space. These addresses are properties
85 * of the emulated core/SoC and therefore user space initially knows this
86 * information.
87 * Check them for sanity (alignment, double assignment). We can't check for
88 * overlapping regions in case of a virtual GICv3 here, since we don't know
89 * the number of VCPUs yet, so we defer this check to map_resources().
90 */
kvm_vgic_addr(struct kvm * kvm,struct kvm_device_attr * attr,bool write)91 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
92 {
93 u64 __user *uaddr = (u64 __user *)attr->addr;
94 struct vgic_dist *vgic = &kvm->arch.vgic;
95 phys_addr_t *addr_ptr, alignment, size;
96 u64 undef_value = VGIC_ADDR_UNDEF;
97 u64 addr;
98 int r;
99
100 /* Reading a redistributor region addr implies getting the index */
101 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
102 if (get_user(addr, uaddr))
103 return -EFAULT;
104
105 mutex_lock(&kvm->lock);
106 switch (attr->attr) {
107 case KVM_VGIC_V2_ADDR_TYPE_DIST:
108 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
109 addr_ptr = &vgic->vgic_dist_base;
110 alignment = SZ_4K;
111 size = KVM_VGIC_V2_DIST_SIZE;
112 break;
113 case KVM_VGIC_V2_ADDR_TYPE_CPU:
114 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
115 addr_ptr = &vgic->vgic_cpu_base;
116 alignment = SZ_4K;
117 size = KVM_VGIC_V2_CPU_SIZE;
118 break;
119 case KVM_VGIC_V3_ADDR_TYPE_DIST:
120 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
121 addr_ptr = &vgic->vgic_dist_base;
122 alignment = SZ_64K;
123 size = KVM_VGIC_V3_DIST_SIZE;
124 break;
125 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
126 struct vgic_redist_region *rdreg;
127
128 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
129 if (r)
130 break;
131 if (write) {
132 r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
133 goto out;
134 }
135 rdreg = list_first_entry_or_null(&vgic->rd_regions,
136 struct vgic_redist_region, list);
137 if (!rdreg)
138 addr_ptr = &undef_value;
139 else
140 addr_ptr = &rdreg->base;
141 break;
142 }
143 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
144 {
145 struct vgic_redist_region *rdreg;
146 u8 index;
147
148 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
149 if (r)
150 break;
151
152 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
153
154 if (write) {
155 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
156 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
157 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
158
159 if (!count || flags)
160 r = -EINVAL;
161 else
162 r = vgic_v3_set_redist_base(kvm, index,
163 base, count);
164 goto out;
165 }
166
167 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
168 if (!rdreg) {
169 r = -ENOENT;
170 goto out;
171 }
172
173 addr = index;
174 addr |= rdreg->base;
175 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
176 goto out;
177 }
178 default:
179 r = -ENODEV;
180 }
181
182 if (r)
183 goto out;
184
185 if (write) {
186 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
187 if (!r)
188 *addr_ptr = addr;
189 } else {
190 addr = *addr_ptr;
191 }
192
193 out:
194 mutex_unlock(&kvm->lock);
195
196 if (!r && !write)
197 r = put_user(addr, uaddr);
198
199 return r;
200 }
201
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)202 static int vgic_set_common_attr(struct kvm_device *dev,
203 struct kvm_device_attr *attr)
204 {
205 int r;
206
207 switch (attr->group) {
208 case KVM_DEV_ARM_VGIC_GRP_ADDR:
209 r = kvm_vgic_addr(dev->kvm, attr, true);
210 return (r == -ENODEV) ? -ENXIO : r;
211 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
212 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
213 u32 val;
214 int ret = 0;
215
216 if (get_user(val, uaddr))
217 return -EFAULT;
218
219 /*
220 * We require:
221 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
222 * - at most 1024 interrupts
223 * - a multiple of 32 interrupts
224 */
225 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
226 val > VGIC_MAX_RESERVED ||
227 (val & 31))
228 return -EINVAL;
229
230 mutex_lock(&dev->kvm->lock);
231
232 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
233 ret = -EBUSY;
234 else
235 dev->kvm->arch.vgic.nr_spis =
236 val - VGIC_NR_PRIVATE_IRQS;
237
238 mutex_unlock(&dev->kvm->lock);
239
240 return ret;
241 }
242 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
243 switch (attr->attr) {
244 case KVM_DEV_ARM_VGIC_CTRL_INIT:
245 mutex_lock(&dev->kvm->lock);
246 r = vgic_init(dev->kvm);
247 mutex_unlock(&dev->kvm->lock);
248 return r;
249 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
250 /*
251 * OK, this one isn't common at all, but we
252 * want to handle all control group attributes
253 * in a single place.
254 */
255 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
256 return -ENXIO;
257 mutex_lock(&dev->kvm->lock);
258
259 if (!lock_all_vcpus(dev->kvm)) {
260 mutex_unlock(&dev->kvm->lock);
261 return -EBUSY;
262 }
263 r = vgic_v3_save_pending_tables(dev->kvm);
264 unlock_all_vcpus(dev->kvm);
265 mutex_unlock(&dev->kvm->lock);
266 return r;
267 }
268 break;
269 }
270 }
271
272 return -ENXIO;
273 }
274
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)275 static int vgic_get_common_attr(struct kvm_device *dev,
276 struct kvm_device_attr *attr)
277 {
278 int r = -ENXIO;
279
280 switch (attr->group) {
281 case KVM_DEV_ARM_VGIC_GRP_ADDR:
282 r = kvm_vgic_addr(dev->kvm, attr, false);
283 return (r == -ENODEV) ? -ENXIO : r;
284 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
285 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
286
287 r = put_user(dev->kvm->arch.vgic.nr_spis +
288 VGIC_NR_PRIVATE_IRQS, uaddr);
289 break;
290 }
291 }
292
293 return r;
294 }
295
vgic_create(struct kvm_device * dev,u32 type)296 static int vgic_create(struct kvm_device *dev, u32 type)
297 {
298 return kvm_vgic_create(dev->kvm, type);
299 }
300
vgic_destroy(struct kvm_device * dev)301 static void vgic_destroy(struct kvm_device *dev)
302 {
303 kfree(dev);
304 }
305
kvm_register_vgic_device(unsigned long type)306 int kvm_register_vgic_device(unsigned long type)
307 {
308 int ret = -ENODEV;
309
310 switch (type) {
311 case KVM_DEV_TYPE_ARM_VGIC_V2:
312 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
313 KVM_DEV_TYPE_ARM_VGIC_V2);
314 break;
315 case KVM_DEV_TYPE_ARM_VGIC_V3:
316 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
317 KVM_DEV_TYPE_ARM_VGIC_V3);
318
319 if (ret)
320 break;
321 ret = kvm_vgic_register_its_device();
322 break;
323 }
324
325 return ret;
326 }
327
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)328 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
329 struct vgic_reg_attr *reg_attr)
330 {
331 int cpuid;
332
333 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
334 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
335
336 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
337 return -EINVAL;
338
339 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
340 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
341
342 return 0;
343 }
344
345 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)346 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
347 {
348 struct kvm_vcpu *tmp_vcpu;
349
350 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
351 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
352 mutex_unlock(&tmp_vcpu->mutex);
353 }
354 }
355
unlock_all_vcpus(struct kvm * kvm)356 void unlock_all_vcpus(struct kvm *kvm)
357 {
358 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
359 }
360
361 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)362 bool lock_all_vcpus(struct kvm *kvm)
363 {
364 struct kvm_vcpu *tmp_vcpu;
365 unsigned long c;
366
367 /*
368 * Any time a vcpu is run, vcpu_load is called which tries to grab the
369 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
370 * that no other VCPUs are run and fiddle with the vgic state while we
371 * access it.
372 */
373 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
374 if (!mutex_trylock(&tmp_vcpu->mutex)) {
375 unlock_vcpus(kvm, c - 1);
376 return false;
377 }
378 }
379
380 return true;
381 }
382
383 /**
384 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
385 *
386 * @dev: kvm device handle
387 * @attr: kvm device attribute
388 * @is_write: true if userspace is writing a register
389 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)390 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
391 struct kvm_device_attr *attr,
392 bool is_write)
393 {
394 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
395 struct vgic_reg_attr reg_attr;
396 gpa_t addr;
397 struct kvm_vcpu *vcpu;
398 int ret;
399 u32 val;
400
401 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
402 if (ret)
403 return ret;
404
405 vcpu = reg_attr.vcpu;
406 addr = reg_attr.addr;
407
408 if (is_write)
409 if (get_user(val, uaddr))
410 return -EFAULT;
411
412 mutex_lock(&dev->kvm->lock);
413
414 ret = vgic_init(dev->kvm);
415 if (ret)
416 goto out;
417
418 if (!lock_all_vcpus(dev->kvm)) {
419 ret = -EBUSY;
420 goto out;
421 }
422
423 switch (attr->group) {
424 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
425 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
426 break;
427 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
428 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
429 break;
430 default:
431 ret = -EINVAL;
432 break;
433 }
434
435 unlock_all_vcpus(dev->kvm);
436 out:
437 mutex_unlock(&dev->kvm->lock);
438
439 if (!ret && !is_write)
440 ret = put_user(val, uaddr);
441
442 return ret;
443 }
444
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)445 static int vgic_v2_set_attr(struct kvm_device *dev,
446 struct kvm_device_attr *attr)
447 {
448 switch (attr->group) {
449 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
450 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
451 return vgic_v2_attr_regs_access(dev, attr, true);
452 default:
453 return vgic_set_common_attr(dev, attr);
454 }
455 }
456
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)457 static int vgic_v2_get_attr(struct kvm_device *dev,
458 struct kvm_device_attr *attr)
459 {
460 switch (attr->group) {
461 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
462 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
463 return vgic_v2_attr_regs_access(dev, attr, false);
464 default:
465 return vgic_get_common_attr(dev, attr);
466 }
467 }
468
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)469 static int vgic_v2_has_attr(struct kvm_device *dev,
470 struct kvm_device_attr *attr)
471 {
472 switch (attr->group) {
473 case KVM_DEV_ARM_VGIC_GRP_ADDR:
474 switch (attr->attr) {
475 case KVM_VGIC_V2_ADDR_TYPE_DIST:
476 case KVM_VGIC_V2_ADDR_TYPE_CPU:
477 return 0;
478 }
479 break;
480 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
481 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
482 return vgic_v2_has_attr_regs(dev, attr);
483 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
484 return 0;
485 case KVM_DEV_ARM_VGIC_GRP_CTRL:
486 switch (attr->attr) {
487 case KVM_DEV_ARM_VGIC_CTRL_INIT:
488 return 0;
489 }
490 }
491 return -ENXIO;
492 }
493
494 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
495 .name = "kvm-arm-vgic-v2",
496 .create = vgic_create,
497 .destroy = vgic_destroy,
498 .set_attr = vgic_v2_set_attr,
499 .get_attr = vgic_v2_get_attr,
500 .has_attr = vgic_v2_has_attr,
501 };
502
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)503 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
504 struct vgic_reg_attr *reg_attr)
505 {
506 unsigned long vgic_mpidr, mpidr_reg;
507
508 /*
509 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
510 * attr might not hold MPIDR. Hence assume vcpu0.
511 */
512 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
513 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
514 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
515
516 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
517 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
518 } else {
519 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
520 }
521
522 if (!reg_attr->vcpu)
523 return -EINVAL;
524
525 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
526
527 return 0;
528 }
529
530 /*
531 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
532 *
533 * @dev: kvm device handle
534 * @attr: kvm device attribute
535 * @is_write: true if userspace is writing a register
536 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)537 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
538 struct kvm_device_attr *attr,
539 bool is_write)
540 {
541 struct vgic_reg_attr reg_attr;
542 gpa_t addr;
543 struct kvm_vcpu *vcpu;
544 bool uaccess;
545 u32 val;
546 int ret;
547
548 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
549 if (ret)
550 return ret;
551
552 vcpu = reg_attr.vcpu;
553 addr = reg_attr.addr;
554
555 switch (attr->group) {
556 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
557 /* Sysregs uaccess is performed by the sysreg handling code */
558 uaccess = false;
559 break;
560 default:
561 uaccess = true;
562 }
563
564 if (uaccess && is_write) {
565 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
566 if (get_user(val, uaddr))
567 return -EFAULT;
568 }
569
570 mutex_lock(&dev->kvm->lock);
571
572 if (unlikely(!vgic_initialized(dev->kvm))) {
573 ret = -EBUSY;
574 goto out;
575 }
576
577 if (!lock_all_vcpus(dev->kvm)) {
578 ret = -EBUSY;
579 goto out;
580 }
581
582 switch (attr->group) {
583 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
584 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
585 break;
586 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
587 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
588 break;
589 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
590 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
591 break;
592 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
593 unsigned int info, intid;
594
595 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
596 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
597 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
598 intid = attr->attr &
599 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
600 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
601 intid, &val);
602 } else {
603 ret = -EINVAL;
604 }
605 break;
606 }
607 default:
608 ret = -EINVAL;
609 break;
610 }
611
612 unlock_all_vcpus(dev->kvm);
613 out:
614 mutex_unlock(&dev->kvm->lock);
615
616 if (!ret && uaccess && !is_write) {
617 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
618 ret = put_user(val, uaddr);
619 }
620
621 return ret;
622 }
623
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)624 static int vgic_v3_set_attr(struct kvm_device *dev,
625 struct kvm_device_attr *attr)
626 {
627 switch (attr->group) {
628 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
629 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
630 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
631 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
632 return vgic_v3_attr_regs_access(dev, attr, true);
633 default:
634 return vgic_set_common_attr(dev, attr);
635 }
636 }
637
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)638 static int vgic_v3_get_attr(struct kvm_device *dev,
639 struct kvm_device_attr *attr)
640 {
641 switch (attr->group) {
642 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
643 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
644 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
645 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
646 return vgic_v3_attr_regs_access(dev, attr, false);
647 default:
648 return vgic_get_common_attr(dev, attr);
649 }
650 }
651
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)652 static int vgic_v3_has_attr(struct kvm_device *dev,
653 struct kvm_device_attr *attr)
654 {
655 switch (attr->group) {
656 case KVM_DEV_ARM_VGIC_GRP_ADDR:
657 switch (attr->attr) {
658 case KVM_VGIC_V3_ADDR_TYPE_DIST:
659 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
660 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
661 return 0;
662 }
663 break;
664 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
665 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
666 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
667 return vgic_v3_has_attr_regs(dev, attr);
668 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
669 return 0;
670 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
671 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
672 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
673 VGIC_LEVEL_INFO_LINE_LEVEL)
674 return 0;
675 break;
676 }
677 case KVM_DEV_ARM_VGIC_GRP_CTRL:
678 switch (attr->attr) {
679 case KVM_DEV_ARM_VGIC_CTRL_INIT:
680 return 0;
681 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
682 return 0;
683 }
684 }
685 return -ENXIO;
686 }
687
688 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
689 .name = "kvm-arm-vgic-v3",
690 .create = vgic_create,
691 .destroy = vgic_destroy,
692 .set_attr = vgic_v3_set_attr,
693 .get_attr = vgic_v3_get_attr,
694 .has_attr = vgic_v3_has_attr,
695 };
696