GNU Linux-libre 4.19.286-gnu1
[releases.git] / virt / kvm / arm / vgic / vgic-mmio-v2.c
1 /*
2  * VGICv2 MMIO handling functions
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/nospec.h>
18
19 #include <kvm/iodev.h>
20 #include <kvm/arm_vgic.h>
21
22 #include "vgic.h"
23 #include "vgic-mmio.h"
24
25 /*
26  * The Revision field in the IIDR have the following meanings:
27  *
28  * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
29  * Revision 2: Interrupt groups are guest-configurable and signaled using
30  *             their configured groups.
31  */
32
33 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
34                                             gpa_t addr, unsigned int len)
35 {
36         struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
37         u32 value;
38
39         switch (addr & 0x0c) {
40         case GIC_DIST_CTRL:
41                 value = vgic->enabled ? GICD_ENABLE : 0;
42                 break;
43         case GIC_DIST_CTR:
44                 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
45                 value = (value >> 5) - 1;
46                 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
47                 break;
48         case GIC_DIST_IIDR:
49                 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
50                         (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
51                         (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
52                 break;
53         default:
54                 return 0;
55         }
56
57         return value;
58 }
59
60 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
61                                     gpa_t addr, unsigned int len,
62                                     unsigned long val)
63 {
64         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
65         bool was_enabled = dist->enabled;
66
67         switch (addr & 0x0c) {
68         case GIC_DIST_CTRL:
69                 dist->enabled = val & GICD_ENABLE;
70                 if (!was_enabled && dist->enabled)
71                         vgic_kick_vcpus(vcpu->kvm);
72                 break;
73         case GIC_DIST_CTR:
74         case GIC_DIST_IIDR:
75                 /* Nothing to do */
76                 return;
77         }
78 }
79
80 static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
81                                            gpa_t addr, unsigned int len,
82                                            unsigned long val)
83 {
84         switch (addr & 0x0c) {
85         case GIC_DIST_IIDR:
86                 if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
87                         return -EINVAL;
88
89                 /*
90                  * If we observe a write to GICD_IIDR we know that userspace
91                  * has been updated and has had a chance to cope with older
92                  * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
93                  * interrupts as group 1, and therefore we now allow groups to
94                  * be user writable.  Doing this by default would break
95                  * migration from old kernels to new kernels with legacy
96                  * userspace.
97                  */
98                 vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
99                 return 0;
100         }
101
102         vgic_mmio_write_v2_misc(vcpu, addr, len, val);
103         return 0;
104 }
105
106 static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
107                                             gpa_t addr, unsigned int len,
108                                             unsigned long val)
109 {
110         if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
111                 vgic_mmio_write_group(vcpu, addr, len, val);
112
113         return 0;
114 }
115
116 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
117                                  gpa_t addr, unsigned int len,
118                                  unsigned long val)
119 {
120         int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
121         int intid = val & 0xf;
122         int targets = (val >> 16) & 0xff;
123         int mode = (val >> 24) & 0x03;
124         int c;
125         struct kvm_vcpu *vcpu;
126         unsigned long flags;
127
128         switch (mode) {
129         case 0x0:               /* as specified by targets */
130                 break;
131         case 0x1:
132                 targets = (1U << nr_vcpus) - 1;                 /* all, ... */
133                 targets &= ~(1U << source_vcpu->vcpu_id);       /* but self */
134                 break;
135         case 0x2:               /* this very vCPU only */
136                 targets = (1U << source_vcpu->vcpu_id);
137                 break;
138         case 0x3:               /* reserved */
139                 return;
140         }
141
142         kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
143                 struct vgic_irq *irq;
144
145                 if (!(targets & (1U << c)))
146                         continue;
147
148                 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149
150                 spin_lock_irqsave(&irq->irq_lock, flags);
151                 irq->pending_latch = true;
152                 irq->source |= 1U << source_vcpu->vcpu_id;
153
154                 vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
155                 vgic_put_irq(source_vcpu->kvm, irq);
156         }
157 }
158
159 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
160                                            gpa_t addr, unsigned int len)
161 {
162         u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
163         int i;
164         u64 val = 0;
165
166         for (i = 0; i < len; i++) {
167                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
168
169                 val |= (u64)irq->targets << (i * 8);
170
171                 vgic_put_irq(vcpu->kvm, irq);
172         }
173
174         return val;
175 }
176
177 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
178                                    gpa_t addr, unsigned int len,
179                                    unsigned long val)
180 {
181         u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
182         u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
183         int i;
184         unsigned long flags;
185
186         /* GICD_ITARGETSR[0-7] are read-only */
187         if (intid < VGIC_NR_PRIVATE_IRQS)
188                 return;
189
190         for (i = 0; i < len; i++) {
191                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192                 int target;
193
194                 spin_lock_irqsave(&irq->irq_lock, flags);
195
196                 irq->targets = (val >> (i * 8)) & cpu_mask;
197                 target = irq->targets ? __ffs(irq->targets) : 0;
198                 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199
200                 spin_unlock_irqrestore(&irq->irq_lock, flags);
201                 vgic_put_irq(vcpu->kvm, irq);
202         }
203 }
204
205 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
206                                             gpa_t addr, unsigned int len)
207 {
208         u32 intid = addr & 0x0f;
209         int i;
210         u64 val = 0;
211
212         for (i = 0; i < len; i++) {
213                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
214
215                 val |= (u64)irq->source << (i * 8);
216
217                 vgic_put_irq(vcpu->kvm, irq);
218         }
219         return val;
220 }
221
222 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
223                                      gpa_t addr, unsigned int len,
224                                      unsigned long val)
225 {
226         u32 intid = addr & 0x0f;
227         int i;
228         unsigned long flags;
229
230         for (i = 0; i < len; i++) {
231                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232
233                 spin_lock_irqsave(&irq->irq_lock, flags);
234
235                 irq->source &= ~((val >> (i * 8)) & 0xff);
236                 if (!irq->source)
237                         irq->pending_latch = false;
238
239                 spin_unlock_irqrestore(&irq->irq_lock, flags);
240                 vgic_put_irq(vcpu->kvm, irq);
241         }
242 }
243
244 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
245                                      gpa_t addr, unsigned int len,
246                                      unsigned long val)
247 {
248         u32 intid = addr & 0x0f;
249         int i;
250         unsigned long flags;
251
252         for (i = 0; i < len; i++) {
253                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254
255                 spin_lock_irqsave(&irq->irq_lock, flags);
256
257                 irq->source |= (val >> (i * 8)) & 0xff;
258
259                 if (irq->source) {
260                         irq->pending_latch = true;
261                         vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262                 } else {
263                         spin_unlock_irqrestore(&irq->irq_lock, flags);
264                 }
265                 vgic_put_irq(vcpu->kvm, irq);
266         }
267 }
268
269 #define GICC_ARCH_VERSION_V2    0x2
270
271 /* These are for userland accesses only, there is no guest-facing emulation. */
272 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
273                                            gpa_t addr, unsigned int len)
274 {
275         struct vgic_vmcr vmcr;
276         u32 val;
277
278         vgic_get_vmcr(vcpu, &vmcr);
279
280         switch (addr & 0xff) {
281         case GIC_CPU_CTRL:
282                 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
283                 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
284                 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
285                 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
286                 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
287                 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
288
289                 break;
290         case GIC_CPU_PRIMASK:
291                 /*
292                  * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
293                  * the PMR field as GICH_VMCR.VMPriMask rather than
294                  * GICC_PMR.Priority, so we expose the upper five bits of
295                  * priority mask to userspace using the lower bits in the
296                  * unsigned long.
297                  */
298                 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
299                         GICV_PMR_PRIORITY_SHIFT;
300                 break;
301         case GIC_CPU_BINPOINT:
302                 val = vmcr.bpr;
303                 break;
304         case GIC_CPU_ALIAS_BINPOINT:
305                 val = vmcr.abpr;
306                 break;
307         case GIC_CPU_IDENT:
308                 val = ((PRODUCT_ID_KVM << 20) |
309                        (GICC_ARCH_VERSION_V2 << 16) |
310                        IMPLEMENTER_ARM);
311                 break;
312         default:
313                 return 0;
314         }
315
316         return val;
317 }
318
319 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
320                                    gpa_t addr, unsigned int len,
321                                    unsigned long val)
322 {
323         struct vgic_vmcr vmcr;
324
325         vgic_get_vmcr(vcpu, &vmcr);
326
327         switch (addr & 0xff) {
328         case GIC_CPU_CTRL:
329                 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
330                 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
331                 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
332                 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
333                 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
334                 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
335
336                 break;
337         case GIC_CPU_PRIMASK:
338                 /*
339                  * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
340                  * the PMR field as GICH_VMCR.VMPriMask rather than
341                  * GICC_PMR.Priority, so we expose the upper five bits of
342                  * priority mask to userspace using the lower bits in the
343                  * unsigned long.
344                  */
345                 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
346                         GICV_PMR_PRIORITY_MASK;
347                 break;
348         case GIC_CPU_BINPOINT:
349                 vmcr.bpr = val;
350                 break;
351         case GIC_CPU_ALIAS_BINPOINT:
352                 vmcr.abpr = val;
353                 break;
354         }
355
356         vgic_set_vmcr(vcpu, &vmcr);
357 }
358
359 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
360                                         gpa_t addr, unsigned int len)
361 {
362         int n; /* which APRn is this */
363
364         n = (addr >> 2) & 0x3;
365
366         if (kvm_vgic_global_state.type == VGIC_V2) {
367                 /* GICv2 hardware systems support max. 32 groups */
368                 if (n != 0)
369                         return 0;
370                 return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
371         } else {
372                 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
373
374                 if (n > vgic_v3_max_apr_idx(vcpu))
375                         return 0;
376
377                 n = array_index_nospec(n, 4);
378
379                 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
380                 return vgicv3->vgic_ap1r[n];
381         }
382 }
383
384 static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
385                                 gpa_t addr, unsigned int len,
386                                 unsigned long val)
387 {
388         int n; /* which APRn is this */
389
390         n = (addr >> 2) & 0x3;
391
392         if (kvm_vgic_global_state.type == VGIC_V2) {
393                 /* GICv2 hardware systems support max. 32 groups */
394                 if (n != 0)
395                         return;
396                 vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
397         } else {
398                 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
399
400                 if (n > vgic_v3_max_apr_idx(vcpu))
401                         return;
402
403                 n = array_index_nospec(n, 4);
404
405                 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
406                 vgicv3->vgic_ap1r[n] = val;
407         }
408 }
409
410 static const struct vgic_register_region vgic_v2_dist_registers[] = {
411         REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
412                 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
413                 NULL, vgic_mmio_uaccess_write_v2_misc,
414                 12, VGIC_ACCESS_32bit),
415         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
416                 vgic_mmio_read_group, vgic_mmio_write_group,
417                 NULL, vgic_mmio_uaccess_write_v2_group, 1,
418                 VGIC_ACCESS_32bit),
419         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
420                 vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
421                 VGIC_ACCESS_32bit),
422         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
423                 vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
424                 VGIC_ACCESS_32bit),
425         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
426                 vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
427                 VGIC_ACCESS_32bit),
428         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
429                 vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
430                 VGIC_ACCESS_32bit),
431         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
432                 vgic_mmio_read_active, vgic_mmio_write_sactive,
433                 NULL, vgic_mmio_uaccess_write_sactive, 1,
434                 VGIC_ACCESS_32bit),
435         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
436                 vgic_mmio_read_active, vgic_mmio_write_cactive,
437                 NULL, vgic_mmio_uaccess_write_cactive, 1,
438                 VGIC_ACCESS_32bit),
439         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
440                 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
441                 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
442         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
443                 vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
444                 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
445         REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
446                 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
447                 VGIC_ACCESS_32bit),
448         REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
449                 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
450                 VGIC_ACCESS_32bit),
451         REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
452                 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
453                 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
454         REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
455                 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
456                 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
457 };
458
459 static const struct vgic_register_region vgic_v2_cpu_registers[] = {
460         REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
461                 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
462                 VGIC_ACCESS_32bit),
463         REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
464                 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
465                 VGIC_ACCESS_32bit),
466         REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
467                 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
468                 VGIC_ACCESS_32bit),
469         REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
470                 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
471                 VGIC_ACCESS_32bit),
472         REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
473                 vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
474                 VGIC_ACCESS_32bit),
475         REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
476                 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
477                 VGIC_ACCESS_32bit),
478 };
479
480 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
481 {
482         dev->regions = vgic_v2_dist_registers;
483         dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
484
485         kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
486
487         return SZ_4K;
488 }
489
490 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
491 {
492         const struct vgic_register_region *region;
493         struct vgic_io_device iodev;
494         struct vgic_reg_attr reg_attr;
495         struct kvm_vcpu *vcpu;
496         gpa_t addr;
497         int ret;
498
499         ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
500         if (ret)
501                 return ret;
502
503         vcpu = reg_attr.vcpu;
504         addr = reg_attr.addr;
505
506         switch (attr->group) {
507         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
508                 iodev.regions = vgic_v2_dist_registers;
509                 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
510                 iodev.base_addr = 0;
511                 break;
512         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
513                 iodev.regions = vgic_v2_cpu_registers;
514                 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
515                 iodev.base_addr = 0;
516                 break;
517         default:
518                 return -ENXIO;
519         }
520
521         /* We only support aligned 32-bit accesses. */
522         if (addr & 3)
523                 return -ENXIO;
524
525         region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
526         if (!region)
527                 return -ENXIO;
528
529         return 0;
530 }
531
532 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
533                           int offset, u32 *val)
534 {
535         struct vgic_io_device dev = {
536                 .regions = vgic_v2_cpu_registers,
537                 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
538                 .iodev_type = IODEV_CPUIF,
539         };
540
541         return vgic_uaccess(vcpu, &dev, is_write, offset, val);
542 }
543
544 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
545                          int offset, u32 *val)
546 {
547         struct vgic_io_device dev = {
548                 .regions = vgic_v2_dist_registers,
549                 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
550                 .iodev_type = IODEV_DIST,
551         };
552
553         return vgic_uaccess(vcpu, &dev, is_write, offset, val);
554 }