4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
26 #include <linux/list_sort.h>
28 #include <linux/irqchip/arm-gic-v3.h>
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
35 #include "vgic-mmio.h"
37 static int vgic_its_save_tables_v0(struct vgic_its *its);
38 static int vgic_its_restore_tables_v0(struct vgic_its *its);
39 static int vgic_its_commit_v0(struct vgic_its *its);
40 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
41 struct kvm_vcpu *filter_vcpu, bool needs_inv);
44 * Creates a new (reference to a) struct vgic_irq for a given LPI.
45 * If this LPI is already mapped on another ITS, we increase its refcount
46 * and return a pointer to the existing structure.
47 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
48 * This function returns a pointer to the _unlocked_ structure.
50 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
51 struct kvm_vcpu *vcpu)
53 struct vgic_dist *dist = &kvm->arch.vgic;
54 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
58 /* In this case there is no put, since we keep the reference. */
62 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
64 return ERR_PTR(-ENOMEM);
66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock);
70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount);
73 irq->target_vcpu = vcpu;
76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
79 * There could be a race with another vgic_add_lpi(), so we need to
80 * check that we don't add a second list entry with the same LPI.
82 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
83 if (oldirq->intid != intid)
86 /* Someone was faster with adding this LPI, lets use that. */
91 * This increases the refcount, the caller is expected to
92 * call vgic_put_irq() on the returned pointer once it's
93 * finished with the IRQ.
95 vgic_get_irq_kref(irq);
100 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
101 dist->lpi_list_count++;
104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
107 * We "cache" the configuration table entries in our struct vgic_irq's.
108 * However we only have those structs for mapped IRQs, so we read in
109 * the respective config data from memory here upon mapping the LPI.
111 * Should any of these fail, behave as if we couldn't create the LPI
112 * by dropping the refcount and returning the error.
114 ret = update_lpi_config(kvm, irq, NULL, false);
116 vgic_put_irq(kvm, irq);
120 ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
122 vgic_put_irq(kvm, irq);
130 struct list_head dev_list;
132 /* the head for the list of ITTEs */
133 struct list_head itt_head;
134 u32 num_eventid_bits;
139 #define COLLECTION_NOT_MAPPED ((u32)~0)
141 struct its_collection {
142 struct list_head coll_list;
148 #define its_is_collection_mapped(coll) ((coll) && \
149 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
152 struct list_head ite_list;
154 struct vgic_irq *irq;
155 struct its_collection *collection;
160 * struct vgic_its_abi - ITS abi ops and settings
161 * @cte_esz: collection table entry size
162 * @dte_esz: device table entry size
163 * @ite_esz: interrupt translation table entry size
164 * @save tables: save the ITS tables into guest RAM
165 * @restore_tables: restore the ITS internal structs from tables
166 * stored in guest RAM
167 * @commit: initialize the registers which expose the ABI settings,
168 * especially the entry sizes
170 struct vgic_its_abi {
174 int (*save_tables)(struct vgic_its *its);
175 int (*restore_tables)(struct vgic_its *its);
176 int (*commit)(struct vgic_its *its);
180 #define ESZ_MAX ABI_0_ESZ
182 static const struct vgic_its_abi its_table_abi_versions[] = {
184 .cte_esz = ABI_0_ESZ,
185 .dte_esz = ABI_0_ESZ,
186 .ite_esz = ABI_0_ESZ,
187 .save_tables = vgic_its_save_tables_v0,
188 .restore_tables = vgic_its_restore_tables_v0,
189 .commit = vgic_its_commit_v0,
193 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
195 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
197 return &its_table_abi_versions[its->abi_rev];
200 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
202 const struct vgic_its_abi *abi;
205 abi = vgic_its_get_abi(its);
206 return abi->commit(its);
210 * Find and returns a device in the device table for an ITS.
211 * Must be called with the its_lock mutex held.
213 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
215 struct its_device *device;
217 list_for_each_entry(device, &its->device_list, dev_list)
218 if (device_id == device->device_id)
225 * Find and returns an interrupt translation table entry (ITTE) for a given
226 * Device ID/Event ID pair on an ITS.
227 * Must be called with the its_lock mutex held.
229 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
232 struct its_device *device;
235 device = find_its_device(its, device_id);
239 list_for_each_entry(ite, &device->itt_head, ite_list)
240 if (ite->event_id == event_id)
246 /* To be used as an iterator this macro misses the enclosing parentheses */
247 #define for_each_lpi_its(dev, ite, its) \
248 list_for_each_entry(dev, &(its)->device_list, dev_list) \
249 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
252 * We only implement 48 bits of PA at the moment, although the ITS
253 * supports more. Let's be restrictive here.
255 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
256 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
258 #define GIC_LPI_OFFSET 8192
260 #define VITS_TYPER_IDBITS 16
261 #define VITS_TYPER_DEVBITS 16
262 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
263 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
266 * Finds and returns a collection in the ITS collection table.
267 * Must be called with the its_lock mutex held.
269 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
271 struct its_collection *collection;
273 list_for_each_entry(collection, &its->collection_list, coll_list) {
274 if (coll_id == collection->collection_id)
281 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
282 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
285 * Reads the configuration data for a given LPI from guest memory and
286 * updates the fields in struct vgic_irq.
287 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
288 * VCPU. Unconditionally applies if filter_vcpu is NULL.
290 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
291 struct kvm_vcpu *filter_vcpu, bool needs_inv)
293 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
298 ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
304 spin_lock_irqsave(&irq->irq_lock, flags);
306 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
307 irq->priority = LPI_PROP_PRIORITY(prop);
308 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
311 vgic_queue_irq_unlock(kvm, irq, flags);
316 spin_unlock_irqrestore(&irq->irq_lock, flags);
319 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
325 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
326 * enumerate those LPIs without holding any lock.
327 * Returns their number and puts the kmalloc'ed array into intid_ptr.
329 int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
331 struct vgic_dist *dist = &kvm->arch.vgic;
332 struct vgic_irq *irq;
335 int irq_count, i = 0;
338 * There is an obvious race between allocating the array and LPIs
339 * being mapped/unmapped. If we ended up here as a result of a
340 * command, we're safe (locks are held, preventing another
341 * command). If coming from another path (such as enabling LPIs),
342 * we must be careful not to overrun the array.
344 irq_count = READ_ONCE(dist->lpi_list_count);
345 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
349 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
350 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
353 /* We don't need to "get" the IRQ, as we hold the list lock. */
354 if (vcpu && irq->target_vcpu != vcpu)
356 intids[i++] = irq->intid;
358 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
364 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
369 spin_lock_irqsave(&irq->irq_lock, flags);
370 irq->target_vcpu = vcpu;
371 spin_unlock_irqrestore(&irq->irq_lock, flags);
374 struct its_vlpi_map map;
376 ret = its_get_vlpi(irq->host_irq, &map);
380 map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
382 ret = its_map_vlpi(irq->host_irq, &map);
389 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
390 * is targeting) to the VGIC's view, which deals with target VCPUs.
391 * Needs to be called whenever either the collection for a LPIs has
392 * changed or the collection itself got retargeted.
394 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
396 struct kvm_vcpu *vcpu;
398 if (!its_is_collection_mapped(ite->collection))
401 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
402 update_affinity(ite->irq, vcpu);
406 * Updates the target VCPU for every LPI targeting this collection.
407 * Must be called with the its_lock mutex held.
409 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
410 struct its_collection *coll)
412 struct its_device *device;
415 for_each_lpi_its(device, ite, its) {
416 if (!ite->collection || coll != ite->collection)
419 update_affinity_ite(kvm, ite);
423 static u32 max_lpis_propbaser(u64 propbaser)
425 int nr_idbits = (propbaser & 0x1f) + 1;
427 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
431 * Sync the pending table pending bit of LPIs targeting @vcpu
432 * with our own data structures. This relies on the LPI being
435 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
437 gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
438 struct vgic_irq *irq;
439 int last_byte_offset = -1;
446 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
450 for (i = 0; i < nr_irqs; i++) {
451 int byte_offset, bit_nr;
453 byte_offset = intids[i] / BITS_PER_BYTE;
454 bit_nr = intids[i] % BITS_PER_BYTE;
457 * For contiguously allocated LPIs chances are we just read
458 * this very same byte in the last iteration. Reuse that.
460 if (byte_offset != last_byte_offset) {
461 ret = kvm_read_guest_lock(vcpu->kvm,
462 pendbase + byte_offset,
468 last_byte_offset = byte_offset;
471 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
472 spin_lock_irqsave(&irq->irq_lock, flags);
473 irq->pending_latch = pendmask & (1U << bit_nr);
474 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
475 vgic_put_irq(vcpu->kvm, irq);
483 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
484 struct vgic_its *its,
485 gpa_t addr, unsigned int len)
487 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
488 u64 reg = GITS_TYPER_PLPIS;
491 * We use linear CPU numbers for redistributor addressing,
492 * so GITS_TYPER.PTA is 0.
493 * Also we force all PROPBASER registers to be the same, so
494 * CommonLPIAff is 0 as well.
495 * To avoid memory waste in the guest, we keep the number of IDBits and
496 * DevBits low - as least for the time being.
498 reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
499 reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
500 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
502 return extract_bytes(reg, addr & 7, len);
505 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
506 struct vgic_its *its,
507 gpa_t addr, unsigned int len)
511 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
512 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
516 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
517 struct vgic_its *its,
518 gpa_t addr, unsigned int len,
521 u32 rev = GITS_IIDR_REV(val);
523 if (rev >= NR_ITS_ABIS)
525 return vgic_its_set_abi(its, rev);
528 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
529 struct vgic_its *its,
530 gpa_t addr, unsigned int len)
532 switch (addr & 0xffff) {
534 return 0x92; /* part number, bits[7:0] */
536 return 0xb4; /* part number, bits[11:8] */
538 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
540 return 0x40; /* This is a 64K software visible page */
541 /* The following are the ID registers for (any) GIC. */
555 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
556 u32 devid, u32 eventid, struct vgic_irq **irq)
558 struct kvm_vcpu *vcpu;
564 ite = find_ite(its, devid, eventid);
565 if (!ite || !its_is_collection_mapped(ite->collection))
566 return E_ITS_INT_UNMAPPED_INTERRUPT;
568 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
570 return E_ITS_INT_UNMAPPED_INTERRUPT;
572 if (!vcpu->arch.vgic_cpu.lpis_enabled)
579 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
582 struct kvm_io_device *kvm_io_dev;
583 struct vgic_io_device *iodev;
585 if (!vgic_has_its(kvm))
586 return ERR_PTR(-ENODEV);
588 if (!(msi->flags & KVM_MSI_VALID_DEVID))
589 return ERR_PTR(-EINVAL);
591 address = (u64)msi->address_hi << 32 | msi->address_lo;
593 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
595 return ERR_PTR(-EINVAL);
597 if (kvm_io_dev->ops != &kvm_io_gic_ops)
598 return ERR_PTR(-EINVAL);
600 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
601 if (iodev->iodev_type != IODEV_ITS)
602 return ERR_PTR(-EINVAL);
608 * Find the target VCPU and the LPI number for a given devid/eventid pair
609 * and make this IRQ pending, possibly injecting it.
610 * Must be called with the its_lock mutex held.
611 * Returns 0 on success, a positive error value for any ITS mapping
612 * related errors and negative error values for generic errors.
614 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
615 u32 devid, u32 eventid)
617 struct vgic_irq *irq = NULL;
621 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
626 return irq_set_irqchip_state(irq->host_irq,
627 IRQCHIP_STATE_PENDING, true);
629 spin_lock_irqsave(&irq->irq_lock, flags);
630 irq->pending_latch = true;
631 vgic_queue_irq_unlock(kvm, irq, flags);
637 * Queries the KVM IO bus framework to get the ITS pointer from the given
639 * We then call vgic_its_trigger_msi() with the decoded data.
640 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
642 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
644 struct vgic_its *its;
647 its = vgic_msi_to_its(kvm, msi);
651 mutex_lock(&its->its_lock);
652 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
653 mutex_unlock(&its->its_lock);
659 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
660 * if the guest has blocked the MSI. So we map any LPI mapping
661 * related error to that.
669 /* Requires the its_lock to be held. */
670 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
672 list_del(&ite->ite_list);
674 /* This put matches the get in vgic_add_lpi. */
677 WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
679 vgic_put_irq(kvm, ite->irq);
685 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
687 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
690 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
691 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
692 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
693 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
694 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
695 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
696 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
697 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
698 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
701 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
702 * Must be called with the its_lock mutex held.
704 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
707 u32 device_id = its_cmd_get_deviceid(its_cmd);
708 u32 event_id = its_cmd_get_id(its_cmd);
712 ite = find_ite(its, device_id, event_id);
713 if (ite && ite->collection) {
715 * Though the spec talks about removing the pending state, we
716 * don't bother here since we clear the ITTE anyway and the
717 * pending state is a property of the ITTE struct.
719 its_free_ite(kvm, ite);
723 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
727 * The MOVI command moves an ITTE to a different collection.
728 * Must be called with the its_lock mutex held.
730 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
733 u32 device_id = its_cmd_get_deviceid(its_cmd);
734 u32 event_id = its_cmd_get_id(its_cmd);
735 u32 coll_id = its_cmd_get_collection(its_cmd);
736 struct kvm_vcpu *vcpu;
738 struct its_collection *collection;
740 ite = find_ite(its, device_id, event_id);
742 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
744 if (!its_is_collection_mapped(ite->collection))
745 return E_ITS_MOVI_UNMAPPED_COLLECTION;
747 collection = find_collection(its, coll_id);
748 if (!its_is_collection_mapped(collection))
749 return E_ITS_MOVI_UNMAPPED_COLLECTION;
751 ite->collection = collection;
752 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
754 return update_affinity(ite->irq, vcpu);
758 * Check whether an ID can be stored into the corresponding guest table.
759 * For a direct table this is pretty easy, but gets a bit nasty for
760 * indirect tables. We check whether the resulting guest physical address
761 * is actually valid (covered by a memslot and guest accessible).
762 * For this we have to read the respective first level entry.
764 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
767 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
768 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
769 int esz = GITS_BASER_ENTRY_SIZE(baser);
775 case GITS_BASER_TYPE_DEVICE:
776 if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
779 case GITS_BASER_TYPE_COLLECTION:
780 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
781 if (id >= BIT_ULL(16))
788 if (!(baser & GITS_BASER_INDIRECT)) {
791 if (id >= (l1_tbl_size / esz))
794 addr = BASER_ADDRESS(baser) + id * esz;
795 gfn = addr >> PAGE_SHIFT;
803 /* calculate and check the index into the 1st level */
804 index = id / (SZ_64K / esz);
805 if (index >= (l1_tbl_size / sizeof(u64)))
808 /* Each 1st level entry is represented by a 64-bit value. */
809 if (kvm_read_guest_lock(its->dev->kvm,
810 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
811 &indirect_ptr, sizeof(indirect_ptr)))
814 indirect_ptr = le64_to_cpu(indirect_ptr);
816 /* check the valid bit of the first level entry */
817 if (!(indirect_ptr & BIT_ULL(63)))
821 * Mask the guest physical address and calculate the frame number.
822 * Any address beyond our supported 48 bits of PA will be caught
823 * by the actual check in the final step.
825 indirect_ptr &= GENMASK_ULL(51, 16);
827 /* Find the address of the actual entry */
828 index = id % (SZ_64K / esz);
829 indirect_ptr += index * esz;
830 gfn = indirect_ptr >> PAGE_SHIFT;
833 *eaddr = indirect_ptr;
836 idx = srcu_read_lock(&its->dev->kvm->srcu);
837 ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
838 srcu_read_unlock(&its->dev->kvm->srcu, idx);
842 static int vgic_its_alloc_collection(struct vgic_its *its,
843 struct its_collection **colp,
846 struct its_collection *collection;
848 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
849 return E_ITS_MAPC_COLLECTION_OOR;
851 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
855 collection->collection_id = coll_id;
856 collection->target_addr = COLLECTION_NOT_MAPPED;
858 list_add_tail(&collection->coll_list, &its->collection_list);
864 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
866 struct its_collection *collection;
867 struct its_device *device;
871 * Clearing the mapping for that collection ID removes the
872 * entry from the list. If there wasn't any before, we can
875 collection = find_collection(its, coll_id);
879 for_each_lpi_its(device, ite, its)
880 if (ite->collection &&
881 ite->collection->collection_id == coll_id)
882 ite->collection = NULL;
884 list_del(&collection->coll_list);
888 /* Must be called with its_lock mutex held */
889 static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
890 struct its_collection *collection,
895 ite = kzalloc(sizeof(*ite), GFP_KERNEL);
897 return ERR_PTR(-ENOMEM);
899 ite->event_id = event_id;
900 ite->collection = collection;
902 list_add_tail(&ite->ite_list, &device->itt_head);
907 * The MAPTI and MAPI commands map LPIs to ITTEs.
908 * Must be called with its_lock mutex held.
910 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
913 u32 device_id = its_cmd_get_deviceid(its_cmd);
914 u32 event_id = its_cmd_get_id(its_cmd);
915 u32 coll_id = its_cmd_get_collection(its_cmd);
917 struct kvm_vcpu *vcpu = NULL;
918 struct its_device *device;
919 struct its_collection *collection, *new_coll = NULL;
920 struct vgic_irq *irq;
923 device = find_its_device(its, device_id);
925 return E_ITS_MAPTI_UNMAPPED_DEVICE;
927 if (event_id >= BIT_ULL(device->num_eventid_bits))
928 return E_ITS_MAPTI_ID_OOR;
930 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
931 lpi_nr = its_cmd_get_physical_id(its_cmd);
934 if (lpi_nr < GIC_LPI_OFFSET ||
935 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
936 return E_ITS_MAPTI_PHYSICALID_OOR;
938 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
939 if (find_ite(its, device_id, event_id))
942 collection = find_collection(its, coll_id);
944 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
947 new_coll = collection;
950 ite = vgic_its_alloc_ite(device, collection, event_id);
953 vgic_its_free_collection(its, coll_id);
957 if (its_is_collection_mapped(collection))
958 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
960 irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
963 vgic_its_free_collection(its, coll_id);
964 its_free_ite(kvm, ite);
972 /* Requires the its_lock to be held. */
973 static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
975 struct its_ite *ite, *temp;
978 * The spec says that unmapping a device with still valid
979 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
980 * since we cannot leave the memory unreferenced.
982 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
983 its_free_ite(kvm, ite);
985 list_del(&device->dev_list);
989 /* its lock must be held */
990 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
992 struct its_device *cur, *temp;
994 list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
995 vgic_its_free_device(kvm, cur);
998 /* its lock must be held */
999 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1001 struct its_collection *cur, *temp;
1003 list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1004 vgic_its_free_collection(its, cur->collection_id);
1007 /* Must be called with its_lock mutex held */
1008 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1009 u32 device_id, gpa_t itt_addr,
1010 u8 num_eventid_bits)
1012 struct its_device *device;
1014 device = kzalloc(sizeof(*device), GFP_KERNEL);
1016 return ERR_PTR(-ENOMEM);
1018 device->device_id = device_id;
1019 device->itt_addr = itt_addr;
1020 device->num_eventid_bits = num_eventid_bits;
1021 INIT_LIST_HEAD(&device->itt_head);
1023 list_add_tail(&device->dev_list, &its->device_list);
1028 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1029 * Must be called with the its_lock mutex held.
1031 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1034 u32 device_id = its_cmd_get_deviceid(its_cmd);
1035 bool valid = its_cmd_get_validbit(its_cmd);
1036 u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1037 gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1038 struct its_device *device;
1040 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1041 return E_ITS_MAPD_DEVICE_OOR;
1043 if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1044 return E_ITS_MAPD_ITTSIZE_OOR;
1046 device = find_its_device(its, device_id);
1049 * The spec says that calling MAPD on an already mapped device
1050 * invalidates all cached data for this device. We implement this
1051 * by removing the mapping and re-establishing it.
1054 vgic_its_free_device(kvm, device);
1057 * The spec does not say whether unmapping a not-mapped device
1058 * is an error, so we are done in any case.
1063 device = vgic_its_alloc_device(its, device_id, itt_addr,
1066 return PTR_ERR_OR_ZERO(device);
1070 * The MAPC command maps collection IDs to redistributors.
1071 * Must be called with the its_lock mutex held.
1073 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1078 struct its_collection *collection;
1081 valid = its_cmd_get_validbit(its_cmd);
1082 coll_id = its_cmd_get_collection(its_cmd);
1083 target_addr = its_cmd_get_target_addr(its_cmd);
1085 if (target_addr >= atomic_read(&kvm->online_vcpus))
1086 return E_ITS_MAPC_PROCNUM_OOR;
1089 vgic_its_free_collection(its, coll_id);
1091 collection = find_collection(its, coll_id);
1096 ret = vgic_its_alloc_collection(its, &collection,
1100 collection->target_addr = target_addr;
1102 collection->target_addr = target_addr;
1103 update_affinity_collection(kvm, its, collection);
1111 * The CLEAR command removes the pending state for a particular LPI.
1112 * Must be called with the its_lock mutex held.
1114 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1117 u32 device_id = its_cmd_get_deviceid(its_cmd);
1118 u32 event_id = its_cmd_get_id(its_cmd);
1119 struct its_ite *ite;
1122 ite = find_ite(its, device_id, event_id);
1124 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1126 ite->irq->pending_latch = false;
1129 return irq_set_irqchip_state(ite->irq->host_irq,
1130 IRQCHIP_STATE_PENDING, false);
1136 * The INV command syncs the configuration bits from the memory table.
1137 * Must be called with the its_lock mutex held.
1139 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1142 u32 device_id = its_cmd_get_deviceid(its_cmd);
1143 u32 event_id = its_cmd_get_id(its_cmd);
1144 struct its_ite *ite;
1147 ite = find_ite(its, device_id, event_id);
1149 return E_ITS_INV_UNMAPPED_INTERRUPT;
1151 return update_lpi_config(kvm, ite->irq, NULL, true);
1155 * The INVALL command requests flushing of all IRQ data in this collection.
1156 * Find the VCPU mapped to that collection, then iterate over the VM's list
1157 * of mapped LPIs and update the configuration for each IRQ which targets
1158 * the specified vcpu. The configuration will be read from the in-memory
1159 * configuration table.
1160 * Must be called with the its_lock mutex held.
1162 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1165 u32 coll_id = its_cmd_get_collection(its_cmd);
1166 struct its_collection *collection;
1167 struct kvm_vcpu *vcpu;
1168 struct vgic_irq *irq;
1172 collection = find_collection(its, coll_id);
1173 if (!its_is_collection_mapped(collection))
1174 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1176 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1178 irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1182 for (i = 0; i < irq_count; i++) {
1183 irq = vgic_get_irq(kvm, NULL, intids[i]);
1186 update_lpi_config(kvm, irq, vcpu, false);
1187 vgic_put_irq(kvm, irq);
1192 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1193 its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1199 * The MOVALL command moves the pending state of all IRQs targeting one
1200 * redistributor to another. We don't hold the pending state in the VCPUs,
1201 * but in the IRQs instead, so there is really not much to do for us here.
1202 * However the spec says that no IRQ must target the old redistributor
1203 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1204 * This command affects all LPIs in the system that target that redistributor.
1206 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1209 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1210 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1211 struct kvm_vcpu *vcpu1, *vcpu2;
1212 struct vgic_irq *irq;
1216 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1217 target2_addr >= atomic_read(&kvm->online_vcpus))
1218 return E_ITS_MOVALL_PROCNUM_OOR;
1220 if (target1_addr == target2_addr)
1223 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1224 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1226 irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1230 for (i = 0; i < irq_count; i++) {
1231 irq = vgic_get_irq(kvm, NULL, intids[i]);
1233 update_affinity(irq, vcpu2);
1235 vgic_put_irq(kvm, irq);
1243 * The INT command injects the LPI associated with that DevID/EvID pair.
1244 * Must be called with the its_lock mutex held.
1246 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1249 u32 msi_data = its_cmd_get_id(its_cmd);
1250 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1252 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1256 * This function is called with the its_cmd lock held, but the ITS data
1257 * structure lock dropped.
1259 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1264 mutex_lock(&its->its_lock);
1265 switch (its_cmd_get_command(its_cmd)) {
1267 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1270 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1273 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1275 case GITS_CMD_MAPTI:
1276 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1279 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1281 case GITS_CMD_DISCARD:
1282 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1284 case GITS_CMD_CLEAR:
1285 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1287 case GITS_CMD_MOVALL:
1288 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1291 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1294 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1296 case GITS_CMD_INVALL:
1297 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1300 /* we ignore this command: we are in sync all of the time */
1304 mutex_unlock(&its->its_lock);
1309 static u64 vgic_sanitise_its_baser(u64 reg)
1311 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1312 GITS_BASER_SHAREABILITY_SHIFT,
1313 vgic_sanitise_shareability);
1314 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1315 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1316 vgic_sanitise_inner_cacheability);
1317 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1318 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1319 vgic_sanitise_outer_cacheability);
1321 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1322 reg &= ~GENMASK_ULL(15, 12);
1324 /* We support only one (ITS) page size: 64K */
1325 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1330 static u64 vgic_sanitise_its_cbaser(u64 reg)
1332 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1333 GITS_CBASER_SHAREABILITY_SHIFT,
1334 vgic_sanitise_shareability);
1335 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1336 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1337 vgic_sanitise_inner_cacheability);
1338 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1339 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1340 vgic_sanitise_outer_cacheability);
1343 * Sanitise the physical address to be 64k aligned.
1344 * Also limit the physical addresses to 48 bits.
1346 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1351 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1352 struct vgic_its *its,
1353 gpa_t addr, unsigned int len)
1355 return extract_bytes(its->cbaser, addr & 7, len);
1358 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1359 gpa_t addr, unsigned int len,
1362 /* When GITS_CTLR.Enable is 1, this register is RO. */
1366 mutex_lock(&its->cmd_lock);
1367 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1368 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1371 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1372 * it to CREADR to make sure we start with an empty command buffer.
1374 its->cwriter = its->creadr;
1375 mutex_unlock(&its->cmd_lock);
1378 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1379 #define ITS_CMD_SIZE 32
1380 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1382 /* Must be called with the cmd_lock held. */
1383 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1388 /* Commands are only processed when the ITS is enabled. */
1392 cbaser = CBASER_ADDRESS(its->cbaser);
1394 while (its->cwriter != its->creadr) {
1395 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1396 cmd_buf, ITS_CMD_SIZE);
1398 * If kvm_read_guest() fails, this could be due to the guest
1399 * programming a bogus value in CBASER or something else going
1400 * wrong from which we cannot easily recover.
1401 * According to section 6.3.2 in the GICv3 spec we can just
1402 * ignore that command then.
1405 vgic_its_handle_command(kvm, its, cmd_buf);
1407 its->creadr += ITS_CMD_SIZE;
1408 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1414 * By writing to CWRITER the guest announces new commands to be processed.
1415 * To avoid any races in the first place, we take the its_cmd lock, which
1416 * protects our ring buffer variables, so that there is only one user
1417 * per ITS handling commands at a given time.
1419 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1420 gpa_t addr, unsigned int len,
1428 mutex_lock(&its->cmd_lock);
1430 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1431 reg = ITS_CMD_OFFSET(reg);
1432 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1433 mutex_unlock(&its->cmd_lock);
1438 vgic_its_process_commands(kvm, its);
1440 mutex_unlock(&its->cmd_lock);
1443 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1444 struct vgic_its *its,
1445 gpa_t addr, unsigned int len)
1447 return extract_bytes(its->cwriter, addr & 0x7, len);
1450 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1451 struct vgic_its *its,
1452 gpa_t addr, unsigned int len)
1454 return extract_bytes(its->creadr, addr & 0x7, len);
1457 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1458 struct vgic_its *its,
1459 gpa_t addr, unsigned int len,
1465 mutex_lock(&its->cmd_lock);
1472 cmd_offset = ITS_CMD_OFFSET(val);
1473 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1478 its->creadr = cmd_offset;
1480 mutex_unlock(&its->cmd_lock);
1484 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1485 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1486 struct vgic_its *its,
1487 gpa_t addr, unsigned int len)
1491 switch (BASER_INDEX(addr)) {
1493 reg = its->baser_device_table;
1496 reg = its->baser_coll_table;
1503 return extract_bytes(reg, addr & 7, len);
1506 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1507 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1508 struct vgic_its *its,
1509 gpa_t addr, unsigned int len,
1512 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1513 u64 entry_size, table_type;
1514 u64 reg, *regptr, clearbits = 0;
1516 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1520 switch (BASER_INDEX(addr)) {
1522 regptr = &its->baser_device_table;
1523 entry_size = abi->dte_esz;
1524 table_type = GITS_BASER_TYPE_DEVICE;
1527 regptr = &its->baser_coll_table;
1528 entry_size = abi->cte_esz;
1529 table_type = GITS_BASER_TYPE_COLLECTION;
1530 clearbits = GITS_BASER_INDIRECT;
1536 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1537 reg &= ~GITS_BASER_RO_MASK;
1540 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1541 reg |= table_type << GITS_BASER_TYPE_SHIFT;
1542 reg = vgic_sanitise_its_baser(reg);
1546 if (!(reg & GITS_BASER_VALID)) {
1547 /* Take the its_lock to prevent a race with a save/restore */
1548 mutex_lock(&its->its_lock);
1549 switch (table_type) {
1550 case GITS_BASER_TYPE_DEVICE:
1551 vgic_its_free_device_list(kvm, its);
1553 case GITS_BASER_TYPE_COLLECTION:
1554 vgic_its_free_collection_list(kvm, its);
1557 mutex_unlock(&its->its_lock);
1561 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1562 struct vgic_its *its,
1563 gpa_t addr, unsigned int len)
1567 mutex_lock(&its->cmd_lock);
1568 if (its->creadr == its->cwriter)
1569 reg |= GITS_CTLR_QUIESCENT;
1571 reg |= GITS_CTLR_ENABLE;
1572 mutex_unlock(&its->cmd_lock);
1577 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1578 gpa_t addr, unsigned int len,
1581 mutex_lock(&its->cmd_lock);
1584 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1585 * device/collection BASER are invalid
1587 if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1588 (!(its->baser_device_table & GITS_BASER_VALID) ||
1589 !(its->baser_coll_table & GITS_BASER_VALID) ||
1590 !(its->cbaser & GITS_CBASER_VALID)))
1593 its->enabled = !!(val & GITS_CTLR_ENABLE);
1596 * Try to process any pending commands. This function bails out early
1597 * if the ITS is disabled or no commands have been queued.
1599 vgic_its_process_commands(kvm, its);
1602 mutex_unlock(&its->cmd_lock);
1605 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1607 .reg_offset = off, \
1609 .access_flags = acc, \
1614 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1616 .reg_offset = off, \
1618 .access_flags = acc, \
1621 .uaccess_its_write = uwr, \
1624 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1625 gpa_t addr, unsigned int len, unsigned long val)
1630 static struct vgic_register_region its_registers[] = {
1631 REGISTER_ITS_DESC(GITS_CTLR,
1632 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1634 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1635 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1636 vgic_mmio_uaccess_write_its_iidr, 4,
1638 REGISTER_ITS_DESC(GITS_TYPER,
1639 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1640 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1641 REGISTER_ITS_DESC(GITS_CBASER,
1642 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1643 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1644 REGISTER_ITS_DESC(GITS_CWRITER,
1645 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1646 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1647 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1648 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1649 vgic_mmio_uaccess_write_its_creadr, 8,
1650 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1651 REGISTER_ITS_DESC(GITS_BASER,
1652 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1653 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1654 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1655 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1659 /* This is called on setting the LPI enable bit in the redistributor. */
1660 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1662 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1663 its_sync_lpi_pending_table(vcpu);
1666 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1669 struct vgic_io_device *iodev = &its->iodev;
1672 mutex_lock(&kvm->slots_lock);
1673 if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1678 its->vgic_its_base = addr;
1679 iodev->regions = its_registers;
1680 iodev->nr_regions = ARRAY_SIZE(its_registers);
1681 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1683 iodev->base_addr = its->vgic_its_base;
1684 iodev->iodev_type = IODEV_ITS;
1686 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1687 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1689 mutex_unlock(&kvm->slots_lock);
1694 #define INITIAL_BASER_VALUE \
1695 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1696 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1697 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1698 GITS_BASER_PAGE_SIZE_64K)
1700 #define INITIAL_PROPBASER_VALUE \
1701 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1702 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1703 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1705 static int vgic_its_create(struct kvm_device *dev, u32 type)
1707 struct vgic_its *its;
1709 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1712 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1716 if (vgic_initialized(dev->kvm)) {
1717 int ret = vgic_v4_init(dev->kvm);
1724 mutex_init(&its->its_lock);
1725 mutex_init(&its->cmd_lock);
1727 its->vgic_its_base = VGIC_ADDR_UNDEF;
1729 INIT_LIST_HEAD(&its->device_list);
1730 INIT_LIST_HEAD(&its->collection_list);
1732 dev->kvm->arch.vgic.msis_require_devid = true;
1733 dev->kvm->arch.vgic.has_its = true;
1734 its->enabled = false;
1737 its->baser_device_table = INITIAL_BASER_VALUE |
1738 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1739 its->baser_coll_table = INITIAL_BASER_VALUE |
1740 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1741 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1745 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1748 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1750 struct kvm *kvm = kvm_dev->kvm;
1751 struct vgic_its *its = kvm_dev->private;
1753 mutex_lock(&its->its_lock);
1755 vgic_its_free_device_list(kvm, its);
1756 vgic_its_free_collection_list(kvm, its);
1758 mutex_unlock(&its->its_lock);
1760 kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1763 int vgic_its_has_attr_regs(struct kvm_device *dev,
1764 struct kvm_device_attr *attr)
1766 const struct vgic_register_region *region;
1767 gpa_t offset = attr->attr;
1770 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1775 region = vgic_find_mmio_region(its_registers,
1776 ARRAY_SIZE(its_registers),
1784 int vgic_its_attr_regs_access(struct kvm_device *dev,
1785 struct kvm_device_attr *attr,
1786 u64 *reg, bool is_write)
1788 const struct vgic_register_region *region;
1789 struct vgic_its *its;
1795 offset = attr->attr;
1798 * Although the spec supports upper/lower 32-bit accesses to
1799 * 64-bit ITS registers, the userspace ABI requires 64-bit
1800 * accesses to all 64-bit wide registers. We therefore only
1801 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1804 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1812 mutex_lock(&dev->kvm->lock);
1814 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1819 region = vgic_find_mmio_region(its_registers,
1820 ARRAY_SIZE(its_registers),
1827 if (!lock_all_vcpus(dev->kvm)) {
1832 addr = its->vgic_its_base + offset;
1834 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1837 if (region->uaccess_its_write)
1838 ret = region->uaccess_its_write(dev->kvm, its, addr,
1841 region->its_write(dev->kvm, its, addr, len, *reg);
1843 *reg = region->its_read(dev->kvm, its, addr, len);
1845 unlock_all_vcpus(dev->kvm);
1847 mutex_unlock(&dev->kvm->lock);
1851 static u32 compute_next_devid_offset(struct list_head *h,
1852 struct its_device *dev)
1854 struct its_device *next;
1857 if (list_is_last(&dev->dev_list, h))
1859 next = list_next_entry(dev, dev_list);
1860 next_offset = next->device_id - dev->device_id;
1862 return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
1865 static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
1867 struct its_ite *next;
1870 if (list_is_last(&ite->ite_list, h))
1872 next = list_next_entry(ite, ite_list);
1873 next_offset = next->event_id - ite->event_id;
1875 return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
1879 * entry_fn_t - Callback called on a table entry restore path
1881 * @id: id of the entry
1882 * @entry: pointer to the entry
1883 * @opaque: pointer to an opaque data
1885 * Return: < 0 on error, 0 if last element was identified, id offset to next
1888 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
1892 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1896 * @base: base gpa of the table
1897 * @size: size of the table in bytes
1898 * @esz: entry size in bytes
1899 * @start_id: the ID of the first entry in the table
1900 * (non zero for 2d level tables)
1901 * @fn: function to apply on each entry
1903 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1904 * (the last element may not be found on second level tables)
1906 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
1907 int start_id, entry_fn_t fn, void *opaque)
1909 struct kvm *kvm = its->dev->kvm;
1910 unsigned long len = size;
1913 char entry[ESZ_MAX];
1916 memset(entry, 0, esz);
1922 ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
1926 next_offset = fn(its, id, entry, opaque);
1927 if (next_offset <= 0)
1930 byte_offset = next_offset * esz;
1931 if (byte_offset >= len)
1942 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
1944 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
1945 struct its_ite *ite, gpa_t gpa, int ite_esz)
1947 struct kvm *kvm = its->dev->kvm;
1951 next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
1952 val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
1953 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
1954 ite->collection->collection_id;
1955 val = cpu_to_le64(val);
1956 return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
1960 * vgic_its_restore_ite - restore an interrupt translation entry
1961 * @event_id: id used for indexing
1962 * @ptr: pointer to the ITE entry
1963 * @opaque: pointer to the its_device
1965 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
1966 void *ptr, void *opaque)
1968 struct its_device *dev = (struct its_device *)opaque;
1969 struct its_collection *collection;
1970 struct kvm *kvm = its->dev->kvm;
1971 struct kvm_vcpu *vcpu = NULL;
1973 u64 *p = (u64 *)ptr;
1974 struct vgic_irq *irq;
1975 u32 coll_id, lpi_id;
1976 struct its_ite *ite;
1981 val = le64_to_cpu(val);
1983 coll_id = val & KVM_ITS_ITE_ICID_MASK;
1984 lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
1987 return 1; /* invalid entry, no choice but to scan next entry */
1989 if (lpi_id < VGIC_MIN_LPI)
1992 offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
1993 if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
1996 collection = find_collection(its, coll_id);
2000 ite = vgic_its_alloc_ite(dev, collection, event_id);
2002 return PTR_ERR(ite);
2004 if (its_is_collection_mapped(collection))
2005 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
2007 irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2009 return PTR_ERR(irq);
2015 static int vgic_its_ite_cmp(void *priv, struct list_head *a,
2016 struct list_head *b)
2018 struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2019 struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2021 if (itea->event_id < iteb->event_id)
2027 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2029 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2030 gpa_t base = device->itt_addr;
2031 struct its_ite *ite;
2033 int ite_esz = abi->ite_esz;
2035 list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2037 list_for_each_entry(ite, &device->itt_head, ite_list) {
2038 gpa_t gpa = base + ite->event_id * ite_esz;
2041 * If an LPI carries the HW bit, this means that this
2042 * interrupt is controlled by GICv4, and we do not
2043 * have direct access to that state. Let's simply fail
2044 * the save operation...
2049 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2057 * vgic_its_restore_itt - restore the ITT of a device
2060 * @dev: device handle
2062 * Return 0 on success, < 0 on error
2064 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2066 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2067 gpa_t base = dev->itt_addr;
2069 int ite_esz = abi->ite_esz;
2070 size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2072 ret = scan_its_table(its, base, max_size, ite_esz, 0,
2073 vgic_its_restore_ite, dev);
2075 /* scan_its_table returns +1 if all ITEs are invalid */
2083 * vgic_its_save_dte - Save a device table entry at a given GPA
2089 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2090 gpa_t ptr, int dte_esz)
2092 struct kvm *kvm = its->dev->kvm;
2093 u64 val, itt_addr_field;
2096 itt_addr_field = dev->itt_addr >> 8;
2097 next_offset = compute_next_devid_offset(&its->device_list, dev);
2098 val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2099 ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2100 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2101 (dev->num_eventid_bits - 1));
2102 val = cpu_to_le64(val);
2103 return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2107 * vgic_its_restore_dte - restore a device table entry
2110 * @id: device id the DTE corresponds to
2111 * @ptr: kernel VA where the 8 byte DTE is located
2114 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2115 * next dte otherwise
2117 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2118 void *ptr, void *opaque)
2120 struct its_device *dev;
2122 u8 num_eventid_bits;
2123 u64 entry = *(u64 *)ptr;
2128 entry = le64_to_cpu(entry);
2130 valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2131 num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2132 itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2133 >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2138 /* dte entry is valid */
2139 offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2141 dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2143 return PTR_ERR(dev);
2145 ret = vgic_its_restore_itt(its, dev);
2147 vgic_its_free_device(its->dev->kvm, dev);
2154 static int vgic_its_device_cmp(void *priv, struct list_head *a,
2155 struct list_head *b)
2157 struct its_device *deva = container_of(a, struct its_device, dev_list);
2158 struct its_device *devb = container_of(b, struct its_device, dev_list);
2160 if (deva->device_id < devb->device_id)
2167 * vgic_its_save_device_tables - Save the device table and all ITT
2170 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2171 * returns the GPA of the device entry
2173 static int vgic_its_save_device_tables(struct vgic_its *its)
2175 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2176 u64 baser = its->baser_device_table;
2177 struct its_device *dev;
2178 int dte_esz = abi->dte_esz;
2180 if (!(baser & GITS_BASER_VALID))
2183 list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2185 list_for_each_entry(dev, &its->device_list, dev_list) {
2189 if (!vgic_its_check_id(its, baser,
2190 dev->device_id, &eaddr))
2193 ret = vgic_its_save_itt(its, dev);
2197 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2205 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2208 * @id: index of the entry in the L1 table
2212 * L1 table entries are scanned by steps of 1 entry
2213 * Return < 0 if error, 0 if last dte was found when scanning the L2
2214 * table, +1 otherwise (meaning next L1 entry must be scanned)
2216 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2219 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2220 int l2_start_id = id * (SZ_64K / abi->dte_esz);
2221 u64 entry = *(u64 *)addr;
2222 int dte_esz = abi->dte_esz;
2226 entry = le64_to_cpu(entry);
2228 if (!(entry & KVM_ITS_L1E_VALID_MASK))
2231 gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2233 ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2234 l2_start_id, vgic_its_restore_dte, NULL);
2240 * vgic_its_restore_device_tables - Restore the device table and all ITT
2241 * from guest RAM to internal data structs
2243 static int vgic_its_restore_device_tables(struct vgic_its *its)
2245 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2246 u64 baser = its->baser_device_table;
2248 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2251 if (!(baser & GITS_BASER_VALID))
2254 l1_gpa = BASER_ADDRESS(baser);
2256 if (baser & GITS_BASER_INDIRECT) {
2257 l1_esz = GITS_LVL1_ENTRY_SIZE;
2258 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2259 handle_l1_dte, NULL);
2261 l1_esz = abi->dte_esz;
2262 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2263 vgic_its_restore_dte, NULL);
2266 /* scan_its_table returns +1 if all entries are invalid */
2273 static int vgic_its_save_cte(struct vgic_its *its,
2274 struct its_collection *collection,
2279 val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2280 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2281 collection->collection_id);
2282 val = cpu_to_le64(val);
2283 return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2286 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2288 struct its_collection *collection;
2289 struct kvm *kvm = its->dev->kvm;
2290 u32 target_addr, coll_id;
2294 BUG_ON(esz > sizeof(val));
2295 ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2298 val = le64_to_cpu(val);
2299 if (!(val & KVM_ITS_CTE_VALID_MASK))
2302 target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2303 coll_id = val & KVM_ITS_CTE_ICID_MASK;
2305 if (target_addr != COLLECTION_NOT_MAPPED &&
2306 target_addr >= atomic_read(&kvm->online_vcpus))
2309 collection = find_collection(its, coll_id);
2312 ret = vgic_its_alloc_collection(its, &collection, coll_id);
2315 collection->target_addr = target_addr;
2320 * vgic_its_save_collection_table - Save the collection table into
2323 static int vgic_its_save_collection_table(struct vgic_its *its)
2325 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2326 u64 baser = its->baser_coll_table;
2327 gpa_t gpa = BASER_ADDRESS(baser);
2328 struct its_collection *collection;
2330 size_t max_size, filled = 0;
2331 int ret, cte_esz = abi->cte_esz;
2333 if (!(baser & GITS_BASER_VALID))
2336 max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2338 list_for_each_entry(collection, &its->collection_list, coll_list) {
2339 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2346 if (filled == max_size)
2350 * table is not fully filled, add a last dummy element
2351 * with valid bit unset
2354 BUG_ON(cte_esz > sizeof(val));
2355 ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2360 * vgic_its_restore_collection_table - reads the collection table
2361 * in guest memory and restores the ITS internal state. Requires the
2362 * BASER registers to be restored before.
2364 static int vgic_its_restore_collection_table(struct vgic_its *its)
2366 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2367 u64 baser = its->baser_coll_table;
2368 int cte_esz = abi->cte_esz;
2369 size_t max_size, read = 0;
2373 if (!(baser & GITS_BASER_VALID))
2376 gpa = BASER_ADDRESS(baser);
2378 max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2380 while (read < max_size) {
2381 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2395 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2396 * according to v0 ABI
2398 static int vgic_its_save_tables_v0(struct vgic_its *its)
2402 ret = vgic_its_save_device_tables(its);
2406 return vgic_its_save_collection_table(its);
2410 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2411 * to internal data structs according to V0 ABI
2414 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2418 ret = vgic_its_restore_collection_table(its);
2422 return vgic_its_restore_device_tables(its);
2425 static int vgic_its_commit_v0(struct vgic_its *its)
2427 const struct vgic_its_abi *abi;
2429 abi = vgic_its_get_abi(its);
2430 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2431 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2433 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2434 << GITS_BASER_ENTRY_SIZE_SHIFT);
2436 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2437 << GITS_BASER_ENTRY_SIZE_SHIFT);
2441 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2443 /* We need to keep the ABI specific field values */
2444 its->baser_coll_table &= ~GITS_BASER_VALID;
2445 its->baser_device_table &= ~GITS_BASER_VALID;
2450 vgic_its_free_device_list(kvm, its);
2451 vgic_its_free_collection_list(kvm, its);
2454 static int vgic_its_has_attr(struct kvm_device *dev,
2455 struct kvm_device_attr *attr)
2457 switch (attr->group) {
2458 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2459 switch (attr->attr) {
2460 case KVM_VGIC_ITS_ADDR_TYPE:
2464 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2465 switch (attr->attr) {
2466 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2468 case KVM_DEV_ARM_ITS_CTRL_RESET:
2470 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2472 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2476 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2477 return vgic_its_has_attr_regs(dev, attr);
2482 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2484 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2487 if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2490 mutex_lock(&kvm->lock);
2491 mutex_lock(&its->its_lock);
2493 if (!lock_all_vcpus(kvm)) {
2494 mutex_unlock(&its->its_lock);
2495 mutex_unlock(&kvm->lock);
2500 case KVM_DEV_ARM_ITS_CTRL_RESET:
2501 vgic_its_reset(kvm, its);
2503 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2504 ret = abi->save_tables(its);
2506 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2507 ret = abi->restore_tables(its);
2511 unlock_all_vcpus(kvm);
2512 mutex_unlock(&its->its_lock);
2513 mutex_unlock(&kvm->lock);
2517 static int vgic_its_set_attr(struct kvm_device *dev,
2518 struct kvm_device_attr *attr)
2520 struct vgic_its *its = dev->private;
2523 switch (attr->group) {
2524 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2525 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2526 unsigned long type = (unsigned long)attr->attr;
2529 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2532 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2535 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2540 return vgic_register_its_iodev(dev->kvm, its, addr);
2542 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2543 return vgic_its_ctrl(dev->kvm, its, attr->attr);
2544 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2545 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2548 if (get_user(reg, uaddr))
2551 return vgic_its_attr_regs_access(dev, attr, ®, true);
2557 static int vgic_its_get_attr(struct kvm_device *dev,
2558 struct kvm_device_attr *attr)
2560 switch (attr->group) {
2561 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2562 struct vgic_its *its = dev->private;
2563 u64 addr = its->vgic_its_base;
2564 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2565 unsigned long type = (unsigned long)attr->attr;
2567 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2570 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2574 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2575 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2579 ret = vgic_its_attr_regs_access(dev, attr, ®, false);
2582 return put_user(reg, uaddr);
2591 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2592 .name = "kvm-arm-vgic-its",
2593 .create = vgic_its_create,
2594 .destroy = vgic_its_destroy,
2595 .set_attr = vgic_its_set_attr,
2596 .get_attr = vgic_its_get_attr,
2597 .has_attr = vgic_its_has_attr,
2600 int kvm_vgic_register_its_device(void)
2602 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2603 KVM_DEV_TYPE_ARM_VGIC_ITS);