2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96 static void vgic_update_state(struct kvm *kvm);
97 static void vgic_kick_vcpus(struct kvm *kvm);
98 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
99 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
100 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
101 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
102 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
103 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
105 static const struct vgic_ops *vgic_ops;
106 static const struct vgic_params *vgic;
109 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
110 * extracts u32s out of them.
112 * This does not work on 64-bit BE systems, because the bitmap access
113 * will store two consecutive 32-bit words with the higher-addressed
114 * register's bits at the lower index and the lower-addressed register's
115 * bits at the higher index.
117 * Therefore, swizzle the register index when accessing the 32-bit word
118 * registers to access the right register's value.
120 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
121 #define REG_OFFSET_SWIZZLE 1
123 #define REG_OFFSET_SWIZZLE 0
126 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
130 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
132 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
136 b->shared = b->private + nr_cpus;
141 static void vgic_free_bitmap(struct vgic_bitmap *b)
148 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
149 int cpuid, u32 offset)
153 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
155 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
158 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
161 if (irq < VGIC_NR_PRIVATE_IRQS)
162 return test_bit(irq, x->private + cpuid);
164 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
167 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
172 if (irq < VGIC_NR_PRIVATE_IRQS) {
173 reg = x->private + cpuid;
176 irq -= VGIC_NR_PRIVATE_IRQS;
185 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
187 return x->private + cpuid;
190 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
195 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
199 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
200 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
202 x->private = kzalloc(size, GFP_KERNEL);
206 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
210 static void vgic_free_bytemap(struct vgic_bytemap *b)
217 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
221 if (offset < VGIC_NR_PRIVATE_IRQS) {
223 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
226 offset -= VGIC_NR_PRIVATE_IRQS;
229 return reg + (offset / sizeof(u32));
232 #define VGIC_CFG_LEVEL 0
233 #define VGIC_CFG_EDGE 1
235 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
237 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
240 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
241 return irq_val == VGIC_CFG_EDGE;
244 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
246 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
248 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
251 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
253 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
255 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
258 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
260 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
262 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
265 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
267 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
269 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
272 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
274 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
276 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
279 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
281 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
283 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
286 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
288 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
290 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
293 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
295 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
297 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
300 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
302 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
304 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
307 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
309 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
311 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
314 static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
316 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
318 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
321 static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
323 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
325 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
328 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
330 if (irq < VGIC_NR_PRIVATE_IRQS)
331 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
333 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
334 vcpu->arch.vgic_cpu.pending_shared);
337 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
339 if (irq < VGIC_NR_PRIVATE_IRQS)
340 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
342 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
343 vcpu->arch.vgic_cpu.pending_shared);
346 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
348 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
351 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
353 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
356 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
358 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
362 * vgic_reg_access - access vgic register
363 * @mmio: pointer to the data describing the mmio access
364 * @reg: pointer to the virtual backing of vgic distributor data
365 * @offset: least significant 2 bits used for word offset
366 * @mode: ACCESS_ mode (see defines above)
368 * Helper to make vgic register access easier using one of the access
369 * modes defined for vgic register access
370 * (read,raz,write-ignored,setbit,clearbit,write)
372 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
373 phys_addr_t offset, int mode)
375 int word_offset = (offset & 3) * 8;
376 u32 mask = (1UL << (mmio->len * 8)) - 1;
380 * Any alignment fault should have been delivered to the guest
381 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
387 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
391 if (mmio->is_write) {
392 u32 data = mmio_data_read(mmio, mask) << word_offset;
393 switch (ACCESS_WRITE_MASK(mode)) {
394 case ACCESS_WRITE_IGNORED:
397 case ACCESS_WRITE_SETBIT:
401 case ACCESS_WRITE_CLEARBIT:
405 case ACCESS_WRITE_VALUE:
406 regval = (regval & ~(mask << word_offset)) | data;
411 switch (ACCESS_READ_MASK(mode)) {
412 case ACCESS_READ_RAZ:
416 case ACCESS_READ_VALUE:
417 mmio_data_write(mmio, mask, regval >> word_offset);
422 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
423 struct kvm_exit_mmio *mmio, phys_addr_t offset)
426 u32 word_offset = offset & 3;
428 switch (offset & ~3) {
429 case 0: /* GICD_CTLR */
430 reg = vcpu->kvm->arch.vgic.enabled;
431 vgic_reg_access(mmio, ®, word_offset,
432 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
433 if (mmio->is_write) {
434 vcpu->kvm->arch.vgic.enabled = reg & 1;
435 vgic_update_state(vcpu->kvm);
440 case 4: /* GICD_TYPER */
441 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
442 reg |= (VGIC_NR_IRQS >> 5) - 1;
443 vgic_reg_access(mmio, ®, word_offset,
444 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
447 case 8: /* GICD_IIDR */
448 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
449 vgic_reg_access(mmio, ®, word_offset,
450 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
457 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
458 struct kvm_exit_mmio *mmio, phys_addr_t offset)
460 vgic_reg_access(mmio, NULL, offset,
461 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
465 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
466 struct kvm_exit_mmio *mmio,
469 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
470 vcpu->vcpu_id, offset);
471 vgic_reg_access(mmio, reg, offset,
472 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
473 if (mmio->is_write) {
474 vgic_update_state(vcpu->kvm);
481 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
482 struct kvm_exit_mmio *mmio,
485 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
486 vcpu->vcpu_id, offset);
487 vgic_reg_access(mmio, reg, offset,
488 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
489 if (mmio->is_write) {
490 if (offset < 4) /* Force SGI enabled */
492 vgic_retire_disabled_irqs(vcpu);
493 vgic_update_state(vcpu->kvm);
500 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
501 struct kvm_exit_mmio *mmio,
506 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
508 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
509 level_mask = (~(*reg));
511 /* Mark both level and edge triggered irqs as pending */
512 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
514 vgic_reg_access(mmio, reg, offset,
515 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
517 if (mmio->is_write) {
518 /* Set the soft-pending flag only for level-triggered irqs */
519 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
520 vcpu->vcpu_id, offset);
521 vgic_reg_access(mmio, reg, offset,
522 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
525 /* Ignore writes to SGIs */
528 *reg |= orig & 0xffff;
531 vgic_update_state(vcpu->kvm);
538 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
539 struct kvm_exit_mmio *mmio,
544 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
546 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
548 vgic_reg_access(mmio, reg, offset,
549 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
550 if (mmio->is_write) {
551 /* Re-set level triggered level-active interrupts */
552 level_active = vgic_bitmap_get_reg(&dist->irq_level,
553 vcpu->vcpu_id, offset);
554 reg = vgic_bitmap_get_reg(&dist->irq_pending,
555 vcpu->vcpu_id, offset);
556 *reg |= *level_active;
558 /* Ignore writes to SGIs */
561 *reg |= orig & 0xffff;
564 /* Clear soft-pending flags */
565 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
566 vcpu->vcpu_id, offset);
567 vgic_reg_access(mmio, reg, offset,
568 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
570 vgic_update_state(vcpu->kvm);
577 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
578 struct kvm_exit_mmio *mmio,
581 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
582 vcpu->vcpu_id, offset);
583 vgic_reg_access(mmio, reg, offset,
584 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
588 #define GICD_ITARGETSR_SIZE 32
589 #define GICD_CPUTARGETS_BITS 8
590 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
591 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
593 struct vgic_dist *dist = &kvm->arch.vgic;
597 irq -= VGIC_NR_PRIVATE_IRQS;
599 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
600 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
605 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
607 struct vgic_dist *dist = &kvm->arch.vgic;
608 struct kvm_vcpu *vcpu;
613 irq -= VGIC_NR_PRIVATE_IRQS;
616 * Pick the LSB in each byte. This ensures we target exactly
617 * one vcpu per IRQ. If the byte is null, assume we target
620 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
621 int shift = i * GICD_CPUTARGETS_BITS;
622 target = ffs((val >> shift) & 0xffU);
623 target = target ? (target - 1) : 0;
624 dist->irq_spi_cpu[irq + i] = target;
625 kvm_for_each_vcpu(c, vcpu, kvm) {
626 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
628 set_bit(irq + i, bmap);
630 clear_bit(irq + i, bmap);
635 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
636 struct kvm_exit_mmio *mmio,
641 /* We treat the banked interrupts targets as read-only */
643 u32 roreg = 1 << vcpu->vcpu_id;
645 roreg |= roreg << 16;
647 vgic_reg_access(mmio, &roreg, offset,
648 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
652 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
653 vgic_reg_access(mmio, ®, offset,
654 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
655 if (mmio->is_write) {
656 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
657 vgic_update_state(vcpu->kvm);
664 static u32 vgic_cfg_expand(u16 val)
670 * Turn a 16bit value like abcd...mnop into a 32bit word
671 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
673 for (i = 0; i < 16; i++)
674 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
679 static u16 vgic_cfg_compress(u32 val)
685 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
686 * abcd...mnop which is what we really care about.
688 for (i = 0; i < 16; i++)
689 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
695 * The distributor uses 2 bits per IRQ for the CFG register, but the
696 * LSB is always 0. As such, we only keep the upper bit, and use the
697 * two above functions to compress/expand the bits
699 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
700 struct kvm_exit_mmio *mmio, phys_addr_t offset)
705 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
706 vcpu->vcpu_id, offset >> 1);
713 val = vgic_cfg_expand(val);
714 vgic_reg_access(mmio, &val, offset,
715 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
716 if (mmio->is_write) {
718 *reg = ~0U; /* Force PPIs/SGIs to 1 */
722 val = vgic_cfg_compress(val);
727 *reg &= 0xffff << 16;
735 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
736 struct kvm_exit_mmio *mmio, phys_addr_t offset)
739 vgic_reg_access(mmio, ®, offset,
740 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
741 if (mmio->is_write) {
742 vgic_dispatch_sgi(vcpu, reg);
743 vgic_update_state(vcpu->kvm);
751 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
752 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
754 * Move any pending IRQs that have already been assigned to LRs back to the
755 * emulated distributor state so that the complete emulated state can be read
756 * from the main emulation structures without investigating the LRs.
758 * Note that IRQs in the active state in the LRs get their pending state moved
759 * to the distributor but the active state stays in the LRs, because we don't
760 * track the active state on the distributor side.
762 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
764 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
765 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
766 int vcpu_id = vcpu->vcpu_id;
769 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
770 struct vgic_lr lr = vgic_get_lr(vcpu, i);
773 * There are three options for the state bits:
777 * 11: pending and active
779 * If the LR holds only an active interrupt (not pending) then
780 * just leave it alone.
782 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
786 * Reestablish the pending state on the distributor and the
787 * CPU interface. It may have already been pending, but that
788 * is fine, then we are only setting a few bits that were
791 vgic_dist_irq_set_pending(vcpu, lr.irq);
792 if (lr.irq < VGIC_NR_SGIS)
793 *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
794 lr.state &= ~LR_STATE_PENDING;
795 vgic_set_lr(vcpu, i, lr);
798 * If there's no state left on the LR (it could still be
799 * active), then the LR does not hold any useful info and can
800 * be marked as free for other use.
802 if (!(lr.state & LR_STATE_MASK)) {
803 vgic_retire_lr(i, lr.irq, vcpu);
804 vgic_irq_clear_queued(vcpu, lr.irq);
807 /* Finally update the VGIC state. */
808 vgic_update_state(vcpu->kvm);
812 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
813 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
814 struct kvm_exit_mmio *mmio,
817 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
819 int min_sgi = (offset & ~0x3) * 4;
820 int max_sgi = min_sgi + 3;
821 int vcpu_id = vcpu->vcpu_id;
824 /* Copy source SGIs from distributor side */
825 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
826 int shift = 8 * (sgi - min_sgi);
827 reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
830 mmio_data_write(mmio, ~0, reg);
834 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
835 struct kvm_exit_mmio *mmio,
836 phys_addr_t offset, bool set)
838 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
840 int min_sgi = (offset & ~0x3) * 4;
841 int max_sgi = min_sgi + 3;
842 int vcpu_id = vcpu->vcpu_id;
844 bool updated = false;
846 reg = mmio_data_read(mmio, ~0);
848 /* Clear pending SGIs on the distributor */
849 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
850 u8 mask = reg >> (8 * (sgi - min_sgi));
851 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
853 if ((*src & mask) != mask)
864 vgic_update_state(vcpu->kvm);
869 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
870 struct kvm_exit_mmio *mmio,
874 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
876 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
879 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
880 struct kvm_exit_mmio *mmio,
884 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
886 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
890 * I would have liked to use the kvm_bus_io_*() API instead, but it
891 * cannot cope with banked registers (only the VM pointer is passed
892 * around, and we need the vcpu). One of these days, someone please
899 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
903 static const struct mmio_range vgic_dist_ranges[] = {
905 .base = GIC_DIST_CTRL,
908 .handle_mmio = handle_mmio_misc,
911 .base = GIC_DIST_IGROUP,
912 .len = VGIC_MAX_IRQS / 8,
914 .handle_mmio = handle_mmio_raz_wi,
917 .base = GIC_DIST_ENABLE_SET,
918 .len = VGIC_MAX_IRQS / 8,
920 .handle_mmio = handle_mmio_set_enable_reg,
923 .base = GIC_DIST_ENABLE_CLEAR,
924 .len = VGIC_MAX_IRQS / 8,
926 .handle_mmio = handle_mmio_clear_enable_reg,
929 .base = GIC_DIST_PENDING_SET,
930 .len = VGIC_MAX_IRQS / 8,
932 .handle_mmio = handle_mmio_set_pending_reg,
935 .base = GIC_DIST_PENDING_CLEAR,
936 .len = VGIC_MAX_IRQS / 8,
938 .handle_mmio = handle_mmio_clear_pending_reg,
941 .base = GIC_DIST_ACTIVE_SET,
942 .len = VGIC_MAX_IRQS / 8,
944 .handle_mmio = handle_mmio_raz_wi,
947 .base = GIC_DIST_ACTIVE_CLEAR,
948 .len = VGIC_MAX_IRQS / 8,
950 .handle_mmio = handle_mmio_raz_wi,
953 .base = GIC_DIST_PRI,
954 .len = VGIC_MAX_IRQS,
956 .handle_mmio = handle_mmio_priority_reg,
959 .base = GIC_DIST_TARGET,
960 .len = VGIC_MAX_IRQS,
962 .handle_mmio = handle_mmio_target_reg,
965 .base = GIC_DIST_CONFIG,
966 .len = VGIC_MAX_IRQS / 4,
968 .handle_mmio = handle_mmio_cfg_reg,
971 .base = GIC_DIST_SOFTINT,
973 .handle_mmio = handle_mmio_sgi_reg,
976 .base = GIC_DIST_SGI_PENDING_CLEAR,
978 .handle_mmio = handle_mmio_sgi_clear,
981 .base = GIC_DIST_SGI_PENDING_SET,
983 .handle_mmio = handle_mmio_sgi_set,
989 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
990 struct kvm_exit_mmio *mmio,
993 const struct mmio_range *r = ranges;
996 if (offset >= r->base &&
997 (offset + mmio->len) <= (r->base + r->len))
1005 static bool vgic_validate_access(const struct vgic_dist *dist,
1006 const struct mmio_range *range,
1007 unsigned long offset)
1011 if (!range->bits_per_irq)
1012 return true; /* Not an irq-based access */
1014 irq = offset * 8 / range->bits_per_irq;
1015 if (irq >= dist->nr_irqs)
1022 * vgic_handle_mmio - handle an in-kernel MMIO access
1023 * @vcpu: pointer to the vcpu performing the access
1024 * @run: pointer to the kvm_run structure
1025 * @mmio: pointer to the data describing the access
1027 * returns true if the MMIO access has been performed in kernel space,
1028 * and false if it needs to be emulated in user space.
1030 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
1031 struct kvm_exit_mmio *mmio)
1033 const struct mmio_range *range;
1034 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1035 unsigned long base = dist->vgic_dist_base;
1037 unsigned long offset;
1039 if (!irqchip_in_kernel(vcpu->kvm) ||
1040 mmio->phys_addr < base ||
1041 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
1044 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
1045 if (mmio->len > 4) {
1046 kvm_inject_dabt(vcpu, mmio->phys_addr);
1050 offset = mmio->phys_addr - base;
1051 range = find_matching_range(vgic_dist_ranges, mmio, offset);
1052 if (unlikely(!range || !range->handle_mmio)) {
1053 pr_warn("Unhandled access %d %08llx %d\n",
1054 mmio->is_write, mmio->phys_addr, mmio->len);
1058 spin_lock(&vcpu->kvm->arch.vgic.lock);
1059 offset = mmio->phys_addr - range->base - base;
1060 if (vgic_validate_access(dist, range, offset)) {
1061 updated_state = range->handle_mmio(vcpu, mmio, offset);
1063 vgic_reg_access(mmio, NULL, offset,
1064 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
1065 updated_state = false;
1067 spin_unlock(&vcpu->kvm->arch.vgic.lock);
1068 kvm_prepare_mmio(run, mmio);
1069 kvm_handle_mmio_return(vcpu, run);
1072 vgic_kick_vcpus(vcpu->kvm);
1077 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
1079 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
1082 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
1084 struct kvm *kvm = vcpu->kvm;
1085 struct vgic_dist *dist = &kvm->arch.vgic;
1086 int nrcpus = atomic_read(&kvm->online_vcpus);
1088 int sgi, mode, c, vcpu_id;
1090 vcpu_id = vcpu->vcpu_id;
1093 target_cpus = (reg >> 16) & 0xff;
1094 mode = (reg >> 24) & 3;
1103 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
1107 target_cpus = 1 << vcpu_id;
1111 kvm_for_each_vcpu(c, vcpu, kvm) {
1112 if (target_cpus & 1) {
1113 /* Flag the SGI as pending */
1114 vgic_dist_irq_set_pending(vcpu, sgi);
1115 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
1116 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1123 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
1125 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
1128 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
1130 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1131 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
1132 unsigned long pending_private, pending_shared;
1133 int nr_shared = vgic_nr_shared_irqs(dist);
1136 vcpu_id = vcpu->vcpu_id;
1137 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
1138 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
1140 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1141 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
1142 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
1144 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1145 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
1146 bitmap_and(pend_shared, pending, enabled, nr_shared);
1147 bitmap_and(pend_shared, pend_shared,
1148 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
1151 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1152 pending_shared = find_first_bit(pend_shared, nr_shared);
1153 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1154 pending_shared < vgic_nr_shared_irqs(dist));
1158 * Update the interrupt state and determine which CPUs have pending
1159 * interrupts. Must be called with distributor lock held.
1161 static void vgic_update_state(struct kvm *kvm)
1163 struct vgic_dist *dist = &kvm->arch.vgic;
1164 struct kvm_vcpu *vcpu;
1167 if (!dist->enabled) {
1168 set_bit(0, dist->irq_pending_on_cpu);
1172 kvm_for_each_vcpu(c, vcpu, kvm) {
1173 if (compute_pending_for_cpu(vcpu)) {
1174 pr_debug("CPU%d has pending interrupts\n", c);
1175 set_bit(c, dist->irq_pending_on_cpu);
1180 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1182 return vgic_ops->get_lr(vcpu, lr);
1185 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1188 vgic_ops->set_lr(vcpu, lr, vlr);
1191 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1194 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1197 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1199 return vgic_ops->get_elrsr(vcpu);
1202 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1204 return vgic_ops->get_eisr(vcpu);
1207 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1209 return vgic_ops->get_interrupt_status(vcpu);
1212 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1214 vgic_ops->enable_underflow(vcpu);
1217 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1219 vgic_ops->disable_underflow(vcpu);
1222 static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1224 vgic_ops->get_vmcr(vcpu, vmcr);
1227 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1229 vgic_ops->set_vmcr(vcpu, vmcr);
1232 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1234 vgic_ops->enable(vcpu);
1237 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1239 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1240 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1243 vgic_set_lr(vcpu, lr_nr, vlr);
1244 clear_bit(lr_nr, vgic_cpu->lr_used);
1245 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1249 * An interrupt may have been disabled after being made pending on the
1250 * CPU interface (the classic case is a timer running while we're
1251 * rebooting the guest - the interrupt would kick as soon as the CPU
1252 * interface gets enabled, with deadly consequences).
1254 * The solution is to examine already active LRs, and check the
1255 * interrupt is still enabled. If not, just retire it.
1257 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1259 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1262 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1263 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1265 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1266 vgic_retire_lr(lr, vlr.irq, vcpu);
1267 if (vgic_irq_is_queued(vcpu, vlr.irq))
1268 vgic_irq_clear_queued(vcpu, vlr.irq);
1274 * Queue an interrupt to a CPU virtual interface. Return true on success,
1275 * or false if it wasn't possible to queue it.
1277 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1279 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1283 /* Sanitize the input... */
1284 BUG_ON(sgi_source_id & ~7);
1285 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1286 BUG_ON(irq >= VGIC_NR_IRQS);
1288 kvm_debug("Queue IRQ%d\n", irq);
1290 lr = vgic_cpu->vgic_irq_lr_map[irq];
1292 /* Do we have an active interrupt for the same CPUID? */
1293 if (lr != LR_EMPTY) {
1294 vlr = vgic_get_lr(vcpu, lr);
1295 if (vlr.source == sgi_source_id) {
1296 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1297 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1298 vlr.state |= LR_STATE_PENDING;
1299 vgic_set_lr(vcpu, lr, vlr);
1304 /* Try to use another LR for this interrupt */
1305 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1307 if (lr >= vgic->nr_lr)
1310 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1311 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1312 set_bit(lr, vgic_cpu->lr_used);
1315 vlr.source = sgi_source_id;
1316 vlr.state = LR_STATE_PENDING;
1317 if (!vgic_irq_is_edge(vcpu, irq))
1318 vlr.state |= LR_EOI_INT;
1320 vgic_set_lr(vcpu, lr, vlr);
1325 static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1327 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1328 unsigned long sources;
1329 int vcpu_id = vcpu->vcpu_id;
1332 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
1334 for_each_set_bit(c, &sources, dist->nr_cpus) {
1335 if (vgic_queue_irq(vcpu, c, irq))
1336 clear_bit(c, &sources);
1339 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
1342 * If the sources bitmap has been cleared it means that we
1343 * could queue all the SGIs onto link registers (see the
1344 * clear_bit above), and therefore we are done with them in
1345 * our emulated gic and can get rid of them.
1348 vgic_dist_irq_clear_pending(vcpu, irq);
1349 vgic_cpu_irq_clear(vcpu, irq);
1356 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1358 if (!vgic_can_sample_irq(vcpu, irq))
1359 return true; /* level interrupt, already queued */
1361 if (vgic_queue_irq(vcpu, 0, irq)) {
1362 if (vgic_irq_is_edge(vcpu, irq)) {
1363 vgic_dist_irq_clear_pending(vcpu, irq);
1364 vgic_cpu_irq_clear(vcpu, irq);
1366 vgic_irq_set_queued(vcpu, irq);
1376 * Fill the list registers with pending interrupts before running the
1379 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1381 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1382 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1386 vcpu_id = vcpu->vcpu_id;
1389 * We may not have any pending interrupt, or the interrupts
1390 * may have been serviced from another vcpu. In all cases,
1393 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1394 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1399 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1400 if (!vgic_queue_sgi(vcpu, i))
1405 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1406 if (!vgic_queue_hwirq(vcpu, i))
1411 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
1412 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1418 vgic_enable_underflow(vcpu);
1420 vgic_disable_underflow(vcpu);
1422 * We're about to run this VCPU, and we've consumed
1423 * everything the distributor had in store for
1424 * us. Claim we don't have anything pending. We'll
1425 * adjust that if needed while exiting.
1427 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1431 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1433 u32 status = vgic_get_interrupt_status(vcpu);
1434 bool level_pending = false;
1436 kvm_debug("STATUS = %08x\n", status);
1438 if (status & INT_STATUS_EOI) {
1440 * Some level interrupts have been EOIed. Clear their
1443 u64 eisr = vgic_get_eisr(vcpu);
1444 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1447 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1448 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1449 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1451 vgic_irq_clear_queued(vcpu, vlr.irq);
1452 WARN_ON(vlr.state & LR_STATE_MASK);
1454 vgic_set_lr(vcpu, lr, vlr);
1457 * If the IRQ was EOIed it was also ACKed and we we
1458 * therefore assume we can clear the soft pending
1459 * state (should it had been set) for this interrupt.
1461 * Note: if the IRQ soft pending state was set after
1462 * the IRQ was acked, it actually shouldn't be
1463 * cleared, but we have no way of knowing that unless
1464 * we start trapping ACKs when the soft-pending state
1467 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1469 /* Any additional pending interrupt? */
1470 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1471 vgic_cpu_irq_set(vcpu, vlr.irq);
1472 level_pending = true;
1474 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1475 vgic_cpu_irq_clear(vcpu, vlr.irq);
1479 * Despite being EOIed, the LR may not have
1480 * been marked as empty.
1482 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1486 if (status & INT_STATUS_UNDERFLOW)
1487 vgic_disable_underflow(vcpu);
1489 return level_pending;
1493 * Sync back the VGIC state after a guest run. The distributor lock is
1494 * needed so we don't get preempted in the middle of the state processing.
1496 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1498 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1499 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1501 unsigned long *elrsr_ptr;
1505 level_pending = vgic_process_maintenance(vcpu);
1506 elrsr = vgic_get_elrsr(vcpu);
1507 elrsr_ptr = (unsigned long *)&elrsr;
1509 /* Clear mappings for empty LRs */
1510 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1513 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1516 vlr = vgic_get_lr(vcpu, lr);
1518 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1519 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1522 /* Check if we still have something up our sleeve... */
1523 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1524 if (level_pending || pending < vgic->nr_lr)
1525 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1528 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1530 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1532 if (!irqchip_in_kernel(vcpu->kvm))
1535 spin_lock(&dist->lock);
1536 __kvm_vgic_flush_hwstate(vcpu);
1537 spin_unlock(&dist->lock);
1540 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1542 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1544 if (!irqchip_in_kernel(vcpu->kvm))
1547 spin_lock(&dist->lock);
1548 __kvm_vgic_sync_hwstate(vcpu);
1549 spin_unlock(&dist->lock);
1552 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1554 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1556 if (!irqchip_in_kernel(vcpu->kvm))
1559 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1562 static void vgic_kick_vcpus(struct kvm *kvm)
1564 struct kvm_vcpu *vcpu;
1568 * We've injected an interrupt, time to find out who deserves
1571 kvm_for_each_vcpu(c, vcpu, kvm) {
1572 if (kvm_vgic_vcpu_pending_irq(vcpu))
1573 kvm_vcpu_kick(vcpu);
1577 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1579 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1582 * Only inject an interrupt if:
1583 * - edge triggered and we have a rising edge
1584 * - level triggered and we change level
1586 if (edge_triggered) {
1587 int state = vgic_dist_irq_is_pending(vcpu, irq);
1588 return level > state;
1590 int state = vgic_dist_irq_get_level(vcpu, irq);
1591 return level != state;
1595 static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1596 unsigned int irq_num, bool level)
1598 struct vgic_dist *dist = &kvm->arch.vgic;
1599 struct kvm_vcpu *vcpu;
1600 int edge_triggered, level_triggered;
1604 spin_lock(&dist->lock);
1606 vcpu = kvm_get_vcpu(kvm, cpuid);
1607 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1608 level_triggered = !edge_triggered;
1610 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1615 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1616 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1617 vcpu = kvm_get_vcpu(kvm, cpuid);
1620 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1623 if (level_triggered)
1624 vgic_dist_irq_set_level(vcpu, irq_num);
1625 vgic_dist_irq_set_pending(vcpu, irq_num);
1627 if (level_triggered) {
1628 vgic_dist_irq_clear_level(vcpu, irq_num);
1629 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1630 vgic_dist_irq_clear_pending(vcpu, irq_num);
1632 vgic_dist_irq_clear_pending(vcpu, irq_num);
1636 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1643 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1645 * Level interrupt in progress, will be picked up
1653 vgic_cpu_irq_set(vcpu, irq_num);
1654 set_bit(cpuid, dist->irq_pending_on_cpu);
1658 spin_unlock(&dist->lock);
1664 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1665 * @kvm: The VM structure pointer
1666 * @cpuid: The CPU for PPIs
1667 * @irq_num: The IRQ number that is assigned to the device
1668 * @level: Edge-triggered: true: to trigger the interrupt
1669 * false: to ignore the call
1670 * Level-sensitive true: activates an interrupt
1671 * false: deactivates an interrupt
1673 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1674 * level-sensitive interrupts. You can think of the level parameter as 1
1675 * being HIGH and 0 being LOW and all devices being active-HIGH.
1677 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1680 if (likely(vgic_initialized(kvm)) &&
1681 vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1682 vgic_kick_vcpus(kvm);
1687 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1690 * We cannot rely on the vgic maintenance interrupt to be
1691 * delivered synchronously. This means we can only use it to
1692 * exit the VM, and we perform the handling of EOIed
1693 * interrupts on the exit path (see vgic_process_maintenance).
1698 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1700 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1702 kfree(vgic_cpu->pending_shared);
1703 kfree(vgic_cpu->vgic_irq_lr_map);
1704 vgic_cpu->pending_shared = NULL;
1705 vgic_cpu->vgic_irq_lr_map = NULL;
1708 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1710 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1712 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1713 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1714 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
1716 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1717 kvm_vgic_vcpu_destroy(vcpu);
1725 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1726 * @vcpu: pointer to the vcpu struct
1728 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1729 * this vcpu and enable the VGIC for this VCPU
1731 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1733 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1734 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1737 if (vcpu->vcpu_id >= dist->nr_cpus)
1740 for (i = 0; i < VGIC_NR_IRQS; i++) {
1741 if (i < VGIC_NR_PPIS)
1742 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1743 vcpu->vcpu_id, i, 1);
1744 if (i < VGIC_NR_PRIVATE_IRQS)
1745 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1746 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1748 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1752 * Store the number of LRs per vcpu, so we don't have to go
1753 * all the way to the distributor structure to find out. Only
1754 * assembly code should use this one.
1756 vgic_cpu->nr_lr = vgic->nr_lr;
1763 void kvm_vgic_destroy(struct kvm *kvm)
1765 struct vgic_dist *dist = &kvm->arch.vgic;
1766 struct kvm_vcpu *vcpu;
1769 kvm_for_each_vcpu(i, vcpu, kvm)
1770 kvm_vgic_vcpu_destroy(vcpu);
1772 vgic_free_bitmap(&dist->irq_enabled);
1773 vgic_free_bitmap(&dist->irq_level);
1774 vgic_free_bitmap(&dist->irq_pending);
1775 vgic_free_bitmap(&dist->irq_soft_pend);
1776 vgic_free_bitmap(&dist->irq_queued);
1777 vgic_free_bitmap(&dist->irq_cfg);
1778 vgic_free_bytemap(&dist->irq_priority);
1779 if (dist->irq_spi_target) {
1780 for (i = 0; i < dist->nr_cpus; i++)
1781 vgic_free_bitmap(&dist->irq_spi_target[i]);
1783 kfree(dist->irq_sgi_sources);
1784 kfree(dist->irq_spi_cpu);
1785 kfree(dist->irq_spi_target);
1786 kfree(dist->irq_pending_on_cpu);
1787 dist->irq_sgi_sources = NULL;
1788 dist->irq_spi_cpu = NULL;
1789 dist->irq_spi_target = NULL;
1790 dist->irq_pending_on_cpu = NULL;
1794 * Allocate and initialize the various data structures. Must be called
1795 * with kvm->lock held!
1797 static int vgic_init_maps(struct kvm *kvm)
1799 struct vgic_dist *dist = &kvm->arch.vgic;
1800 struct kvm_vcpu *vcpu;
1801 int nr_cpus, nr_irqs;
1804 nr_cpus = dist->nr_cpus = KVM_MAX_VCPUS;
1805 nr_irqs = dist->nr_irqs = VGIC_NR_IRQS;
1807 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1808 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1809 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1810 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1811 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1812 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1813 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1818 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1819 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1820 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1822 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1824 if (!dist->irq_sgi_sources ||
1825 !dist->irq_spi_cpu ||
1826 !dist->irq_spi_target ||
1827 !dist->irq_pending_on_cpu) {
1832 for (i = 0; i < nr_cpus; i++)
1833 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1839 kvm_for_each_vcpu(i, vcpu, kvm) {
1840 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1842 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1849 kvm_vgic_destroy(kvm);
1855 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1856 * @kvm: pointer to the kvm struct
1858 * Map the virtual CPU interface into the VM before running any VCPUs. We
1859 * can't do this at creation time, because user space must first set the
1860 * virtual CPU interface address in the guest physical address space. Also
1861 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1863 int kvm_vgic_init(struct kvm *kvm)
1867 if (!irqchip_in_kernel(kvm))
1870 mutex_lock(&kvm->lock);
1872 if (vgic_initialized(kvm))
1875 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1876 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1877 kvm_err("Need to set vgic cpu and dist addresses first\n");
1882 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1883 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1885 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1889 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1890 vgic_set_target_reg(kvm, 0, i);
1892 kvm->arch.vgic.ready = true;
1894 mutex_unlock(&kvm->lock);
1898 int kvm_vgic_create(struct kvm *kvm)
1900 int i, vcpu_lock_idx = -1, ret = 0;
1901 struct kvm_vcpu *vcpu;
1903 mutex_lock(&kvm->lock);
1905 if (kvm->arch.vgic.vctrl_base) {
1911 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1912 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1913 * that no other VCPUs are run while we create the vgic.
1915 kvm_for_each_vcpu(i, vcpu, kvm) {
1916 if (!mutex_trylock(&vcpu->mutex))
1921 kvm_for_each_vcpu(i, vcpu, kvm) {
1922 if (vcpu->arch.has_run_once) {
1928 spin_lock_init(&kvm->arch.vgic.lock);
1929 kvm->arch.vgic.in_kernel = true;
1930 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1931 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1932 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1934 ret = vgic_init_maps(kvm);
1936 kvm_err("Unable to allocate maps\n");
1939 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1940 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1941 mutex_unlock(&vcpu->mutex);
1945 mutex_unlock(&kvm->lock);
1949 static int vgic_ioaddr_overlap(struct kvm *kvm)
1951 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1952 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1954 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1956 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1957 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1962 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1963 phys_addr_t addr, phys_addr_t size)
1967 if (addr & ~KVM_PHYS_MASK)
1970 if (addr & (SZ_4K - 1))
1973 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1975 if (addr + size < addr)
1979 ret = vgic_ioaddr_overlap(kvm);
1981 *ioaddr = VGIC_ADDR_UNDEF;
1987 * kvm_vgic_addr - set or get vgic VM base addresses
1988 * @kvm: pointer to the vm struct
1989 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1990 * @addr: pointer to address value
1991 * @write: if true set the address in the VM address space, if false read the
1994 * Set or get the vgic base addresses for the distributor and the virtual CPU
1995 * interface in the VM physical address space. These addresses are properties
1996 * of the emulated core/SoC and therefore user space initially knows this
1999 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2002 struct vgic_dist *vgic = &kvm->arch.vgic;
2004 mutex_lock(&kvm->lock);
2006 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2008 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
2009 *addr, KVM_VGIC_V2_DIST_SIZE);
2011 *addr = vgic->vgic_dist_base;
2014 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2016 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
2017 *addr, KVM_VGIC_V2_CPU_SIZE);
2019 *addr = vgic->vgic_cpu_base;
2026 mutex_unlock(&kvm->lock);
2030 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
2031 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2033 bool updated = false;
2034 struct vgic_vmcr vmcr;
2038 vgic_get_vmcr(vcpu, &vmcr);
2040 switch (offset & ~0x3) {
2042 vmcr_field = &vmcr.ctlr;
2044 case GIC_CPU_PRIMASK:
2045 vmcr_field = &vmcr.pmr;
2047 case GIC_CPU_BINPOINT:
2048 vmcr_field = &vmcr.bpr;
2050 case GIC_CPU_ALIAS_BINPOINT:
2051 vmcr_field = &vmcr.abpr;
2057 if (!mmio->is_write) {
2059 mmio_data_write(mmio, ~0, reg);
2061 reg = mmio_data_read(mmio, ~0);
2062 if (reg != *vmcr_field) {
2064 vgic_set_vmcr(vcpu, &vmcr);
2071 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
2072 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2074 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
2077 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
2078 struct kvm_exit_mmio *mmio,
2087 reg = (PRODUCT_ID_KVM << 20) |
2088 (GICC_ARCH_VERSION_V2 << 16) |
2089 (IMPLEMENTER_ARM << 0);
2090 mmio_data_write(mmio, ~0, reg);
2095 * CPU Interface Register accesses - these are not accessed by the VM, but by
2096 * user space for saving and restoring VGIC state.
2098 static const struct mmio_range vgic_cpu_ranges[] = {
2100 .base = GIC_CPU_CTRL,
2102 .handle_mmio = handle_cpu_mmio_misc,
2105 .base = GIC_CPU_ALIAS_BINPOINT,
2107 .handle_mmio = handle_mmio_abpr,
2110 .base = GIC_CPU_ACTIVEPRIO,
2112 .handle_mmio = handle_mmio_raz_wi,
2115 .base = GIC_CPU_IDENT,
2117 .handle_mmio = handle_cpu_mmio_ident,
2121 static int vgic_attr_regs_access(struct kvm_device *dev,
2122 struct kvm_device_attr *attr,
2123 u32 *reg, bool is_write)
2125 const struct mmio_range *r = NULL, *ranges;
2128 struct kvm_vcpu *vcpu, *tmp_vcpu;
2129 struct vgic_dist *vgic;
2130 struct kvm_exit_mmio mmio;
2132 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2133 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
2134 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
2136 mutex_lock(&dev->kvm->lock);
2138 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
2143 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
2144 vgic = &dev->kvm->arch.vgic;
2147 mmio.is_write = is_write;
2149 mmio_data_write(&mmio, ~0, *reg);
2150 switch (attr->group) {
2151 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2152 mmio.phys_addr = vgic->vgic_dist_base + offset;
2153 ranges = vgic_dist_ranges;
2155 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2156 mmio.phys_addr = vgic->vgic_cpu_base + offset;
2157 ranges = vgic_cpu_ranges;
2162 r = find_matching_range(ranges, &mmio, offset);
2164 if (unlikely(!r || !r->handle_mmio)) {
2170 spin_lock(&vgic->lock);
2173 * Ensure that no other VCPU is running by checking the vcpu->cpu
2174 * field. If no other VPCUs are running we can safely access the VGIC
2175 * state, because even if another VPU is run after this point, that
2176 * VCPU will not touch the vgic state, because it will block on
2177 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2179 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
2180 if (unlikely(tmp_vcpu->cpu != -1)) {
2182 goto out_vgic_unlock;
2187 * Move all pending IRQs from the LRs on all VCPUs so the pending
2188 * state can be properly represented in the register state accessible
2191 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
2192 vgic_unqueue_irqs(tmp_vcpu);
2195 r->handle_mmio(vcpu, &mmio, offset);
2198 *reg = mmio_data_read(&mmio, ~0);
2202 spin_unlock(&vgic->lock);
2204 mutex_unlock(&dev->kvm->lock);
2208 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2212 switch (attr->group) {
2213 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2214 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2216 unsigned long type = (unsigned long)attr->attr;
2218 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2221 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2222 return (r == -ENODEV) ? -ENXIO : r;
2225 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2226 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2227 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2230 if (get_user(reg, uaddr))
2233 return vgic_attr_regs_access(dev, attr, ®, true);
2241 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2245 switch (attr->group) {
2246 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2247 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2249 unsigned long type = (unsigned long)attr->attr;
2251 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2253 return (r == -ENODEV) ? -ENXIO : r;
2255 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2260 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2261 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2262 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2265 r = vgic_attr_regs_access(dev, attr, ®, false);
2268 r = put_user(reg, uaddr);
2277 static int vgic_has_attr_regs(const struct mmio_range *ranges,
2280 struct kvm_exit_mmio dev_attr_mmio;
2282 dev_attr_mmio.len = 4;
2283 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2289 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2293 switch (attr->group) {
2294 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2295 switch (attr->attr) {
2296 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2297 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2301 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2302 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2303 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2304 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2305 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2306 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2311 static void vgic_destroy(struct kvm_device *dev)
2316 static int vgic_create(struct kvm_device *dev, u32 type)
2318 return kvm_vgic_create(dev->kvm);
2321 static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2322 .name = "kvm-arm-vgic",
2323 .create = vgic_create,
2324 .destroy = vgic_destroy,
2325 .set_attr = vgic_set_attr,
2326 .get_attr = vgic_get_attr,
2327 .has_attr = vgic_has_attr,
2330 static void vgic_init_maintenance_interrupt(void *info)
2332 enable_percpu_irq(vgic->maint_irq, 0);
2335 static int vgic_cpu_notify(struct notifier_block *self,
2336 unsigned long action, void *cpu)
2340 case CPU_STARTING_FROZEN:
2341 vgic_init_maintenance_interrupt(NULL);
2344 case CPU_DYING_FROZEN:
2345 disable_percpu_irq(vgic->maint_irq);
2352 static struct notifier_block vgic_cpu_nb = {
2353 .notifier_call = vgic_cpu_notify,
2356 static const struct of_device_id vgic_ids[] = {
2357 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2358 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2362 int kvm_vgic_hyp_init(void)
2364 const struct of_device_id *matched_id;
2365 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2366 const struct vgic_params **);
2367 struct device_node *vgic_node;
2370 vgic_node = of_find_matching_node_and_match(NULL,
2371 vgic_ids, &matched_id);
2373 kvm_err("error: no compatible GIC node found\n");
2377 vgic_probe = matched_id->data;
2378 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2382 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2383 "vgic", kvm_get_running_vcpus());
2385 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2389 ret = __register_cpu_notifier(&vgic_cpu_nb);
2391 kvm_err("Cannot register vgic CPU notifier\n");
2395 /* Callback into for arch code for setup */
2396 vgic_arch_setup(vgic);
2398 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2400 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2401 KVM_DEV_TYPE_ARM_VGIC_V2);
2404 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());