2 * GICv3 distributor and redistributor emulation
4 * GICv3 emulation is currently only supported on a GICv3 host (because
5 * we rely on the hardware's CPU interface virtualization support), but
6 * supports both hardware with or without the optional GICv2 backwards
7 * compatibility features.
9 * Limitations of the emulation:
10 * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11 * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12 * - We do not support the message based interrupts (MBIs) triggered by
13 * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14 * - We do not support the (optional) backwards compatibility feature.
15 * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16 * the compatiblity feature, you can use a GICv2 in the guest, though.
17 * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18 * - Priorities are not emulated (same as the GICv2 emulation). Linux
19 * as a guest is fine with this, because it does not use priorities.
20 * - We only support Group1 interrupts. Again Linux uses only those.
22 * Copyright (C) 2014 ARM Ltd.
23 * Author: Andre Przywara <andre.przywara@arm.com>
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License version 2 as
27 * published by the Free Software Foundation.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program. If not, see <http://www.gnu.org/licenses/>.
38 #include <linux/cpu.h>
39 #include <linux/kvm.h>
40 #include <linux/kvm_host.h>
41 #include <linux/interrupt.h>
43 #include <linux/irqchip/arm-gic-v3.h>
44 #include <kvm/arm_vgic.h>
46 #include <asm/kvm_emulate.h>
47 #include <asm/kvm_arm.h>
48 #include <asm/kvm_mmu.h>
52 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
53 struct kvm_exit_mmio *mmio, phys_addr_t offset)
57 vgic_reg_access(mmio, ®, offset,
58 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
63 static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
64 struct kvm_exit_mmio *mmio, phys_addr_t offset)
69 * Force ARE and DS to 1, the guest cannot change this.
70 * For the time being we only support Group1 interrupts.
72 if (vcpu->kvm->arch.vgic.enabled)
73 reg = GICD_CTLR_ENABLE_SS_G1;
74 reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
76 vgic_reg_access(mmio, ®, offset,
77 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
79 if (reg & GICD_CTLR_ENABLE_SS_G0)
80 kvm_info("guest tried to enable unsupported Group0 interrupts\n");
81 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
82 vgic_update_state(vcpu->kvm);
89 * As this implementation does not provide compatibility
90 * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
91 * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
92 * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
94 #define INTERRUPT_ID_BITS 10
95 static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
96 struct kvm_exit_mmio *mmio, phys_addr_t offset)
100 reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
102 reg |= (INTERRUPT_ID_BITS - 1) << 19;
104 vgic_reg_access(mmio, ®, offset,
105 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
110 static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, phys_addr_t offset)
115 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
116 vgic_reg_access(mmio, ®, offset,
117 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
122 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
123 struct kvm_exit_mmio *mmio,
126 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
127 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
129 ACCESS_WRITE_SETBIT);
131 vgic_reg_access(mmio, NULL, offset,
132 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
136 static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
137 struct kvm_exit_mmio *mmio,
140 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
141 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
143 ACCESS_WRITE_CLEARBIT);
145 vgic_reg_access(mmio, NULL, offset,
146 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
150 static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
151 struct kvm_exit_mmio *mmio,
154 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
155 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
158 vgic_reg_access(mmio, NULL, offset,
159 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
163 static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
164 struct kvm_exit_mmio *mmio,
167 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
168 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
171 vgic_reg_access(mmio, NULL, offset,
172 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
176 static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
177 struct kvm_exit_mmio *mmio,
182 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
183 vgic_reg_access(mmio, NULL, offset,
184 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
188 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
189 vcpu->vcpu_id, offset);
190 vgic_reg_access(mmio, reg, offset,
191 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
195 static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
196 struct kvm_exit_mmio *mmio,
201 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
202 vgic_reg_access(mmio, NULL, offset,
203 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
207 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
208 vcpu->vcpu_id, offset >> 1);
210 return vgic_handle_cfg_reg(reg, mmio, offset);
214 * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
215 * when we store the target MPIDR written by the guest.
217 static u32 compress_mpidr(unsigned long mpidr)
221 ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
222 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
223 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
224 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
229 static unsigned long uncompress_mpidr(u32 value)
233 mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
234 mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
235 mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
236 mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
242 * Lookup the given MPIDR value to get the vcpu_id (if there is one)
243 * and store that in the irq_spi_cpu[] array.
244 * This limits the number of VCPUs to 255 for now, extending the data
245 * type (or storing kvm_vcpu pointers) should lift the limit.
246 * Store the original MPIDR value in an extra array to support read-as-written.
247 * Unallocated MPIDRs are translated to a special value and caught
248 * before any array accesses.
250 static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
251 struct kvm_exit_mmio *mmio,
254 struct kvm *kvm = vcpu->kvm;
255 struct vgic_dist *dist = &kvm->arch.vgic;
259 unsigned long *bmap, mpidr;
262 * The upper 32 bits of each 64 bit register are zero,
263 * as we don't support Aff3.
266 vgic_reg_access(mmio, NULL, offset,
267 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
271 /* This region only covers SPIs, so no handling of private IRQs here. */
274 /* get the stored MPIDR for this IRQ */
275 mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
278 vgic_reg_access(mmio, ®, offset,
279 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
285 * Now clear the currently assigned vCPU from the map, making room
286 * for the new one to be written below
288 vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
290 vcpu_id = vcpu->vcpu_id;
291 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
292 __clear_bit(spi, bmap);
295 dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
296 vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
299 * The spec says that non-existent MPIDR values should not be
300 * forwarded to any existent (v)CPU, but should be able to become
301 * pending anyway. We simply keep the irq_spi_target[] array empty, so
302 * the interrupt will never be injected.
303 * irq_spi_cpu[irq] gets a magic value in this case.
306 vcpu_id = vcpu->vcpu_id;
307 dist->irq_spi_cpu[spi] = vcpu_id;
308 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
309 __set_bit(spi, bmap);
311 dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
314 vgic_update_state(kvm);
320 * We should be careful about promising too much when a guest reads
321 * this register. Don't claim to be like any hardware implementation,
322 * but just report the GIC as version 3 - which is what a Linux guest
325 static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
326 struct kvm_exit_mmio *mmio,
331 switch (offset + GICD_IDREGS) {
337 vgic_reg_access(mmio, ®, offset,
338 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
343 static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
348 .handle_mmio = handle_mmio_ctlr,
354 .handle_mmio = handle_mmio_typer,
360 .handle_mmio = handle_mmio_iidr,
363 /* this register is optional, it is RAZ/WI if not implemented */
364 .base = GICD_STATUSR,
367 .handle_mmio = handle_mmio_raz_wi,
370 /* this write only register is WI when TYPER.MBIS=0 */
371 .base = GICD_SETSPI_NSR,
374 .handle_mmio = handle_mmio_raz_wi,
377 /* this write only register is WI when TYPER.MBIS=0 */
378 .base = GICD_CLRSPI_NSR,
381 .handle_mmio = handle_mmio_raz_wi,
384 /* this is RAZ/WI when DS=1 */
385 .base = GICD_SETSPI_SR,
388 .handle_mmio = handle_mmio_raz_wi,
391 /* this is RAZ/WI when DS=1 */
392 .base = GICD_CLRSPI_SR,
395 .handle_mmio = handle_mmio_raz_wi,
398 .base = GICD_IGROUPR,
401 .handle_mmio = handle_mmio_rao_wi,
404 .base = GICD_ISENABLER,
407 .handle_mmio = handle_mmio_set_enable_reg_dist,
410 .base = GICD_ICENABLER,
413 .handle_mmio = handle_mmio_clear_enable_reg_dist,
416 .base = GICD_ISPENDR,
419 .handle_mmio = handle_mmio_set_pending_reg_dist,
422 .base = GICD_ICPENDR,
425 .handle_mmio = handle_mmio_clear_pending_reg_dist,
428 .base = GICD_ISACTIVER,
431 .handle_mmio = handle_mmio_raz_wi,
434 .base = GICD_ICACTIVER,
437 .handle_mmio = handle_mmio_raz_wi,
440 .base = GICD_IPRIORITYR,
443 .handle_mmio = handle_mmio_priority_reg_dist,
446 /* TARGETSRn is RES0 when ARE=1 */
447 .base = GICD_ITARGETSR,
450 .handle_mmio = handle_mmio_raz_wi,
456 .handle_mmio = handle_mmio_cfg_reg_dist,
459 /* this is RAZ/WI when DS=1 */
460 .base = GICD_IGRPMODR,
463 .handle_mmio = handle_mmio_raz_wi,
466 /* this is RAZ/WI when DS=1 */
470 .handle_mmio = handle_mmio_raz_wi,
473 /* this is RAZ/WI when ARE=1 */
476 .handle_mmio = handle_mmio_raz_wi,
479 /* this is RAZ/WI when ARE=1 */
480 .base = GICD_CPENDSGIR,
482 .handle_mmio = handle_mmio_raz_wi,
485 /* this is RAZ/WI when ARE=1 */
486 .base = GICD_SPENDSGIR,
488 .handle_mmio = handle_mmio_raz_wi,
491 .base = GICD_IROUTER + 0x100,
494 .handle_mmio = handle_mmio_route_reg,
500 .handle_mmio = handle_mmio_idregs,
505 static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
509 struct kvm_vcpu *redist_vcpu = mmio->private;
511 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
512 redist_vcpu->vcpu_id,
513 ACCESS_WRITE_SETBIT);
516 static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
517 struct kvm_exit_mmio *mmio,
520 struct kvm_vcpu *redist_vcpu = mmio->private;
522 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
523 redist_vcpu->vcpu_id,
524 ACCESS_WRITE_CLEARBIT);
527 static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
528 struct kvm_exit_mmio *mmio,
531 struct kvm_vcpu *redist_vcpu = mmio->private;
533 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
534 redist_vcpu->vcpu_id);
537 static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
538 struct kvm_exit_mmio *mmio,
541 struct kvm_vcpu *redist_vcpu = mmio->private;
543 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
544 redist_vcpu->vcpu_id);
547 static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
548 struct kvm_exit_mmio *mmio,
551 struct kvm_vcpu *redist_vcpu = mmio->private;
554 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
555 redist_vcpu->vcpu_id, offset);
556 vgic_reg_access(mmio, reg, offset,
557 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
561 static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
562 struct kvm_exit_mmio *mmio,
565 struct kvm_vcpu *redist_vcpu = mmio->private;
567 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
568 redist_vcpu->vcpu_id, offset >> 1);
570 return vgic_handle_cfg_reg(reg, mmio, offset);
573 static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
575 .base = GICR_IGROUPR0,
578 .handle_mmio = handle_mmio_rao_wi,
581 .base = GICR_ISENABLER0,
584 .handle_mmio = handle_mmio_set_enable_reg_redist,
587 .base = GICR_ICENABLER0,
590 .handle_mmio = handle_mmio_clear_enable_reg_redist,
593 .base = GICR_ISPENDR0,
596 .handle_mmio = handle_mmio_set_pending_reg_redist,
599 .base = GICR_ICPENDR0,
602 .handle_mmio = handle_mmio_clear_pending_reg_redist,
605 .base = GICR_ISACTIVER0,
608 .handle_mmio = handle_mmio_raz_wi,
611 .base = GICR_ICACTIVER0,
614 .handle_mmio = handle_mmio_raz_wi,
617 .base = GICR_IPRIORITYR0,
620 .handle_mmio = handle_mmio_priority_reg_redist,
626 .handle_mmio = handle_mmio_cfg_reg_redist,
629 .base = GICR_IGRPMODR0,
632 .handle_mmio = handle_mmio_raz_wi,
637 .handle_mmio = handle_mmio_raz_wi,
642 static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643 struct kvm_exit_mmio *mmio,
646 /* since we don't support LPIs, this register is zero for now */
647 vgic_reg_access(mmio, NULL, offset,
648 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
652 static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653 struct kvm_exit_mmio *mmio,
658 struct kvm_vcpu *redist_vcpu = mmio->private;
659 int target_vcpu_id = redist_vcpu->vcpu_id;
661 /* the upper 32 bits contain the affinity value */
662 if ((offset & ~3) == 4) {
663 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664 reg = compress_mpidr(mpidr);
666 vgic_reg_access(mmio, ®, offset,
667 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
671 reg = redist_vcpu->vcpu_id << 8;
672 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673 reg |= GICR_TYPER_LAST;
674 vgic_reg_access(mmio, ®, offset,
675 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
679 static const struct kvm_mmio_range vgic_redist_ranges[] = {
684 .handle_mmio = handle_mmio_ctlr_redist,
690 .handle_mmio = handle_mmio_typer_redist,
696 .handle_mmio = handle_mmio_iidr,
702 .handle_mmio = handle_mmio_raz_wi,
708 .handle_mmio = handle_mmio_idregs,
714 * This function splits accesses between the distributor and the two
715 * redistributor parts (private/SPI). As each redistributor is accessible
716 * from any CPU, we have to determine the affected VCPU by taking the faulting
717 * address into account. We then pass this VCPU to the handler function via
718 * the private parameter.
720 #define SGI_BASE_OFFSET SZ_64K
721 static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
722 struct kvm_exit_mmio *mmio)
724 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
725 unsigned long dbase = dist->vgic_dist_base;
726 unsigned long rdbase = dist->vgic_redist_base;
727 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
729 const struct kvm_mmio_range *mmio_range;
731 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732 return vgic_handle_mmio_range(vcpu, run, mmio,
733 vgic_v3_dist_ranges, dbase);
736 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
737 GIC_V3_REDIST_SIZE * nrcpus))
740 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
741 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
744 if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
745 rdbase += SGI_BASE_OFFSET;
746 mmio_range = vgic_redist_sgi_ranges;
748 mmio_range = vgic_redist_ranges;
750 return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
753 static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
755 if (vgic_queue_irq(vcpu, 0, irq)) {
756 vgic_dist_irq_clear_pending(vcpu, irq);
757 vgic_cpu_irq_clear(vcpu, irq);
764 static int vgic_v3_map_resources(struct kvm *kvm,
765 const struct vgic_params *params)
768 struct vgic_dist *dist = &kvm->arch.vgic;
770 if (!irqchip_in_kernel(kvm))
773 mutex_lock(&kvm->lock);
778 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
779 IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
780 kvm_err("Need to set vgic distributor addresses first\n");
786 * For a VGICv3 we require the userland to explicitly initialize
787 * the VGIC before we need to use it.
789 if (!vgic_initialized(kvm)) {
794 kvm->arch.vgic.ready = true;
797 kvm_vgic_destroy(kvm);
798 mutex_unlock(&kvm->lock);
802 static int vgic_v3_init_model(struct kvm *kvm)
806 struct vgic_dist *dist = &kvm->arch.vgic;
807 int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
809 dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
812 if (!dist->irq_spi_mpidr)
815 /* Initialize the target VCPUs for each IRQ to VCPU 0 */
816 mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
817 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
818 dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
819 dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
820 vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
826 /* GICv3 does not keep track of SGI sources anymore. */
827 static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
831 void vgic_v3_init_emulation(struct kvm *kvm)
833 struct vgic_dist *dist = &kvm->arch.vgic;
835 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
836 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
837 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
838 dist->vm_ops.init_model = vgic_v3_init_model;
839 dist->vm_ops.map_resources = vgic_v3_map_resources;
841 kvm->arch.max_vcpus = KVM_MAX_VCPUS;
845 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
846 * generation register ICC_SGI1R_EL1) with a given VCPU.
847 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
850 static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
852 unsigned long affinity;
856 * Split the current VCPU's MPIDR into affinity level 0 and the
857 * rest as this is what we have to compare against.
859 affinity = kvm_vcpu_get_mpidr_aff(vcpu);
860 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
861 affinity &= ~MPIDR_LEVEL_MASK;
863 /* bail out if the upper three levels don't match */
864 if (sgi_aff != affinity)
867 /* Is this VCPU's bit set in the mask ? */
868 if (!(sgi_cpu_mask & BIT(level0)))
874 #define SGI_AFFINITY_LEVEL(reg, level) \
875 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
876 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
879 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
880 * @vcpu: The VCPU requesting a SGI
881 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
883 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
884 * This will trap in sys_regs.c and call this function.
885 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
886 * target processors as well as a bitmask of 16 Aff0 CPUs.
887 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
888 * check for matching ones. If this bit is set, we signal all, but not the
891 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
893 struct kvm *kvm = vcpu->kvm;
894 struct kvm_vcpu *c_vcpu;
895 struct vgic_dist *dist = &kvm->arch.vgic;
899 int vcpu_id = vcpu->vcpu_id;
903 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
904 broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
905 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
906 mpidr = SGI_AFFINITY_LEVEL(reg, 3);
907 mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
908 mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
911 * We take the dist lock here, because we come from the sysregs
912 * code path and not from the MMIO one (which already takes the lock).
914 spin_lock(&dist->lock);
917 * We iterate over all VCPUs to find the MPIDRs matching the request.
918 * If we have handled one CPU, we clear it's bit to detect early
919 * if we are already finished. This avoids iterating through all
920 * VCPUs when most of the times we just signal a single VCPU.
922 kvm_for_each_vcpu(c, c_vcpu, kvm) {
924 /* Exit early if we have dealt with all requested CPUs */
925 if (!broadcast && target_cpus == 0)
928 /* Don't signal the calling VCPU */
929 if (broadcast && c == vcpu_id)
935 level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
939 /* remove this matching VCPU from the mask */
940 target_cpus &= ~BIT(level0);
943 /* Flag the SGI as pending */
944 vgic_dist_irq_set_pending(c_vcpu, sgi);
946 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
949 vgic_update_state(vcpu->kvm);
950 spin_unlock(&dist->lock);
952 vgic_kick_vcpus(vcpu->kvm);
955 static int vgic_v3_create(struct kvm_device *dev, u32 type)
957 return kvm_vgic_create(dev->kvm, type);
960 static void vgic_v3_destroy(struct kvm_device *dev)
965 static int vgic_v3_set_attr(struct kvm_device *dev,
966 struct kvm_device_attr *attr)
970 ret = vgic_set_common_attr(dev, attr);
974 switch (attr->group) {
975 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
976 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
983 static int vgic_v3_get_attr(struct kvm_device *dev,
984 struct kvm_device_attr *attr)
988 ret = vgic_get_common_attr(dev, attr);
992 switch (attr->group) {
993 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
994 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1001 static int vgic_v3_has_attr(struct kvm_device *dev,
1002 struct kvm_device_attr *attr)
1004 switch (attr->group) {
1005 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1006 switch (attr->attr) {
1007 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1008 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1010 case KVM_VGIC_V3_ADDR_TYPE_DIST:
1011 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
1015 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1016 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1018 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
1020 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1021 switch (attr->attr) {
1022 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1029 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
1030 .name = "kvm-arm-vgic-v3",
1031 .create = vgic_v3_create,
1032 .destroy = vgic_v3_destroy,
1033 .set_attr = vgic_v3_set_attr,
1034 .get_attr = vgic_v3_get_attr,
1035 .has_attr = vgic_v3_has_attr,