Merge tag 'irqchip-fixes-4.0-2' of git://git.infradead.org/users/jcooper/linux
[firefly-linux-kernel-4.4.55.git] / virt / kvm / arm / vgic-v2-emul.c
1 /*
2  * Contains GICv2 specific emulation code, was in vgic.c before.
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/uaccess.h>
26
27 #include <linux/irqchip/arm-gic.h>
28
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
32
33 #include "vgic.h"
34
35 #define GICC_ARCH_VERSION_V2            0x2
36
37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
38 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
39 {
40         return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
41 }
42
43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
44                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
45 {
46         u32 reg;
47         u32 word_offset = offset & 3;
48
49         switch (offset & ~3) {
50         case 0:                 /* GICD_CTLR */
51                 reg = vcpu->kvm->arch.vgic.enabled;
52                 vgic_reg_access(mmio, &reg, word_offset,
53                                 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
54                 if (mmio->is_write) {
55                         vcpu->kvm->arch.vgic.enabled = reg & 1;
56                         vgic_update_state(vcpu->kvm);
57                         return true;
58                 }
59                 break;
60
61         case 4:                 /* GICD_TYPER */
62                 reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
63                 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
64                 vgic_reg_access(mmio, &reg, word_offset,
65                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
66                 break;
67
68         case 8:                 /* GICD_IIDR */
69                 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
70                 vgic_reg_access(mmio, &reg, word_offset,
71                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
72                 break;
73         }
74
75         return false;
76 }
77
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
79                                        struct kvm_exit_mmio *mmio,
80                                        phys_addr_t offset)
81 {
82         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
83                                       vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
84 }
85
86 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
87                                          struct kvm_exit_mmio *mmio,
88                                          phys_addr_t offset)
89 {
90         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
91                                       vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
92 }
93
94 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
95                                         struct kvm_exit_mmio *mmio,
96                                         phys_addr_t offset)
97 {
98         return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
99                                            vcpu->vcpu_id);
100 }
101
102 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
103                                           struct kvm_exit_mmio *mmio,
104                                           phys_addr_t offset)
105 {
106         return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
107                                              vcpu->vcpu_id);
108 }
109
110 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
111                                      struct kvm_exit_mmio *mmio,
112                                      phys_addr_t offset)
113 {
114         u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
115                                         vcpu->vcpu_id, offset);
116         vgic_reg_access(mmio, reg, offset,
117                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
118         return false;
119 }
120
121 #define GICD_ITARGETSR_SIZE     32
122 #define GICD_CPUTARGETS_BITS    8
123 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
124 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
125 {
126         struct vgic_dist *dist = &kvm->arch.vgic;
127         int i;
128         u32 val = 0;
129
130         irq -= VGIC_NR_PRIVATE_IRQS;
131
132         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
133                 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
134
135         return val;
136 }
137
138 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
139 {
140         struct vgic_dist *dist = &kvm->arch.vgic;
141         struct kvm_vcpu *vcpu;
142         int i, c;
143         unsigned long *bmap;
144         u32 target;
145
146         irq -= VGIC_NR_PRIVATE_IRQS;
147
148         /*
149          * Pick the LSB in each byte. This ensures we target exactly
150          * one vcpu per IRQ. If the byte is null, assume we target
151          * CPU0.
152          */
153         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
154                 int shift = i * GICD_CPUTARGETS_BITS;
155
156                 target = ffs((val >> shift) & 0xffU);
157                 target = target ? (target - 1) : 0;
158                 dist->irq_spi_cpu[irq + i] = target;
159                 kvm_for_each_vcpu(c, vcpu, kvm) {
160                         bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
161                         if (c == target)
162                                 set_bit(irq + i, bmap);
163                         else
164                                 clear_bit(irq + i, bmap);
165                 }
166         }
167 }
168
169 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
170                                    struct kvm_exit_mmio *mmio,
171                                    phys_addr_t offset)
172 {
173         u32 reg;
174
175         /* We treat the banked interrupts targets as read-only */
176         if (offset < 32) {
177                 u32 roreg;
178
179                 roreg = 1 << vcpu->vcpu_id;
180                 roreg |= roreg << 8;
181                 roreg |= roreg << 16;
182
183                 vgic_reg_access(mmio, &roreg, offset,
184                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
185                 return false;
186         }
187
188         reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
189         vgic_reg_access(mmio, &reg, offset,
190                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
191         if (mmio->is_write) {
192                 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
193                 vgic_update_state(vcpu->kvm);
194                 return true;
195         }
196
197         return false;
198 }
199
200 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
201                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
202 {
203         u32 *reg;
204
205         reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
206                                   vcpu->vcpu_id, offset >> 1);
207
208         return vgic_handle_cfg_reg(reg, mmio, offset);
209 }
210
211 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
212                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
213 {
214         u32 reg;
215
216         vgic_reg_access(mmio, &reg, offset,
217                         ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
218         if (mmio->is_write) {
219                 vgic_dispatch_sgi(vcpu, reg);
220                 vgic_update_state(vcpu->kvm);
221                 return true;
222         }
223
224         return false;
225 }
226
227 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
228 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
229                                         struct kvm_exit_mmio *mmio,
230                                         phys_addr_t offset)
231 {
232         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
233         int sgi;
234         int min_sgi = (offset & ~0x3);
235         int max_sgi = min_sgi + 3;
236         int vcpu_id = vcpu->vcpu_id;
237         u32 reg = 0;
238
239         /* Copy source SGIs from distributor side */
240         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
241                 u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
242
243                 reg |= ((u32)sources) << (8 * (sgi - min_sgi));
244         }
245
246         mmio_data_write(mmio, ~0, reg);
247         return false;
248 }
249
250 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
251                                          struct kvm_exit_mmio *mmio,
252                                          phys_addr_t offset, bool set)
253 {
254         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
255         int sgi;
256         int min_sgi = (offset & ~0x3);
257         int max_sgi = min_sgi + 3;
258         int vcpu_id = vcpu->vcpu_id;
259         u32 reg;
260         bool updated = false;
261
262         reg = mmio_data_read(mmio, ~0);
263
264         /* Clear pending SGIs on the distributor */
265         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
266                 u8 mask = reg >> (8 * (sgi - min_sgi));
267                 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
268
269                 if (set) {
270                         if ((*src & mask) != mask)
271                                 updated = true;
272                         *src |= mask;
273                 } else {
274                         if (*src & mask)
275                                 updated = true;
276                         *src &= ~mask;
277                 }
278         }
279
280         if (updated)
281                 vgic_update_state(vcpu->kvm);
282
283         return updated;
284 }
285
286 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
287                                 struct kvm_exit_mmio *mmio,
288                                 phys_addr_t offset)
289 {
290         if (!mmio->is_write)
291                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
292         else
293                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
294 }
295
296 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
297                                   struct kvm_exit_mmio *mmio,
298                                   phys_addr_t offset)
299 {
300         if (!mmio->is_write)
301                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
302         else
303                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
304 }
305
306 static const struct kvm_mmio_range vgic_dist_ranges[] = {
307         {
308                 .base           = GIC_DIST_CTRL,
309                 .len            = 12,
310                 .bits_per_irq   = 0,
311                 .handle_mmio    = handle_mmio_misc,
312         },
313         {
314                 .base           = GIC_DIST_IGROUP,
315                 .len            = VGIC_MAX_IRQS / 8,
316                 .bits_per_irq   = 1,
317                 .handle_mmio    = handle_mmio_raz_wi,
318         },
319         {
320                 .base           = GIC_DIST_ENABLE_SET,
321                 .len            = VGIC_MAX_IRQS / 8,
322                 .bits_per_irq   = 1,
323                 .handle_mmio    = handle_mmio_set_enable_reg,
324         },
325         {
326                 .base           = GIC_DIST_ENABLE_CLEAR,
327                 .len            = VGIC_MAX_IRQS / 8,
328                 .bits_per_irq   = 1,
329                 .handle_mmio    = handle_mmio_clear_enable_reg,
330         },
331         {
332                 .base           = GIC_DIST_PENDING_SET,
333                 .len            = VGIC_MAX_IRQS / 8,
334                 .bits_per_irq   = 1,
335                 .handle_mmio    = handle_mmio_set_pending_reg,
336         },
337         {
338                 .base           = GIC_DIST_PENDING_CLEAR,
339                 .len            = VGIC_MAX_IRQS / 8,
340                 .bits_per_irq   = 1,
341                 .handle_mmio    = handle_mmio_clear_pending_reg,
342         },
343         {
344                 .base           = GIC_DIST_ACTIVE_SET,
345                 .len            = VGIC_MAX_IRQS / 8,
346                 .bits_per_irq   = 1,
347                 .handle_mmio    = handle_mmio_raz_wi,
348         },
349         {
350                 .base           = GIC_DIST_ACTIVE_CLEAR,
351                 .len            = VGIC_MAX_IRQS / 8,
352                 .bits_per_irq   = 1,
353                 .handle_mmio    = handle_mmio_raz_wi,
354         },
355         {
356                 .base           = GIC_DIST_PRI,
357                 .len            = VGIC_MAX_IRQS,
358                 .bits_per_irq   = 8,
359                 .handle_mmio    = handle_mmio_priority_reg,
360         },
361         {
362                 .base           = GIC_DIST_TARGET,
363                 .len            = VGIC_MAX_IRQS,
364                 .bits_per_irq   = 8,
365                 .handle_mmio    = handle_mmio_target_reg,
366         },
367         {
368                 .base           = GIC_DIST_CONFIG,
369                 .len            = VGIC_MAX_IRQS / 4,
370                 .bits_per_irq   = 2,
371                 .handle_mmio    = handle_mmio_cfg_reg,
372         },
373         {
374                 .base           = GIC_DIST_SOFTINT,
375                 .len            = 4,
376                 .handle_mmio    = handle_mmio_sgi_reg,
377         },
378         {
379                 .base           = GIC_DIST_SGI_PENDING_CLEAR,
380                 .len            = VGIC_NR_SGIS,
381                 .handle_mmio    = handle_mmio_sgi_clear,
382         },
383         {
384                 .base           = GIC_DIST_SGI_PENDING_SET,
385                 .len            = VGIC_NR_SGIS,
386                 .handle_mmio    = handle_mmio_sgi_set,
387         },
388         {}
389 };
390
391 static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392                                 struct kvm_exit_mmio *mmio)
393 {
394         unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
395
396         if (!is_in_range(mmio->phys_addr, mmio->len, base,
397                          KVM_VGIC_V2_DIST_SIZE))
398                 return false;
399
400         /* GICv2 does not support accesses wider than 32 bits */
401         if (mmio->len > 4) {
402                 kvm_inject_dabt(vcpu, mmio->phys_addr);
403                 return true;
404         }
405
406         return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
407 }
408
409 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
410 {
411         struct kvm *kvm = vcpu->kvm;
412         struct vgic_dist *dist = &kvm->arch.vgic;
413         int nrcpus = atomic_read(&kvm->online_vcpus);
414         u8 target_cpus;
415         int sgi, mode, c, vcpu_id;
416
417         vcpu_id = vcpu->vcpu_id;
418
419         sgi = reg & 0xf;
420         target_cpus = (reg >> 16) & 0xff;
421         mode = (reg >> 24) & 3;
422
423         switch (mode) {
424         case 0:
425                 if (!target_cpus)
426                         return;
427                 break;
428
429         case 1:
430                 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
431                 break;
432
433         case 2:
434                 target_cpus = 1 << vcpu_id;
435                 break;
436         }
437
438         kvm_for_each_vcpu(c, vcpu, kvm) {
439                 if (target_cpus & 1) {
440                         /* Flag the SGI as pending */
441                         vgic_dist_irq_set_pending(vcpu, sgi);
442                         *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
443                         kvm_debug("SGI%d from CPU%d to CPU%d\n",
444                                   sgi, vcpu_id, c);
445                 }
446
447                 target_cpus >>= 1;
448         }
449 }
450
451 static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
452 {
453         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
454         unsigned long sources;
455         int vcpu_id = vcpu->vcpu_id;
456         int c;
457
458         sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
459
460         for_each_set_bit(c, &sources, dist->nr_cpus) {
461                 if (vgic_queue_irq(vcpu, c, irq))
462                         clear_bit(c, &sources);
463         }
464
465         *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
466
467         /*
468          * If the sources bitmap has been cleared it means that we
469          * could queue all the SGIs onto link registers (see the
470          * clear_bit above), and therefore we are done with them in
471          * our emulated gic and can get rid of them.
472          */
473         if (!sources) {
474                 vgic_dist_irq_clear_pending(vcpu, irq);
475                 vgic_cpu_irq_clear(vcpu, irq);
476                 return true;
477         }
478
479         return false;
480 }
481
482 /**
483  * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
484  * @kvm: pointer to the kvm struct
485  *
486  * Map the virtual CPU interface into the VM before running any VCPUs.  We
487  * can't do this at creation time, because user space must first set the
488  * virtual CPU interface address in the guest physical address space.
489  */
490 static int vgic_v2_map_resources(struct kvm *kvm,
491                                  const struct vgic_params *params)
492 {
493         int ret = 0;
494
495         if (!irqchip_in_kernel(kvm))
496                 return 0;
497
498         mutex_lock(&kvm->lock);
499
500         if (vgic_ready(kvm))
501                 goto out;
502
503         if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
504             IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
505                 kvm_err("Need to set vgic cpu and dist addresses first\n");
506                 ret = -ENXIO;
507                 goto out;
508         }
509
510         /*
511          * Initialize the vgic if this hasn't already been done on demand by
512          * accessing the vgic state from userspace.
513          */
514         ret = vgic_init(kvm);
515         if (ret) {
516                 kvm_err("Unable to allocate maps\n");
517                 goto out;
518         }
519
520         ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
521                                     params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
522                                     true);
523         if (ret) {
524                 kvm_err("Unable to remap VGIC CPU to VCPU\n");
525                 goto out;
526         }
527
528         kvm->arch.vgic.ready = true;
529 out:
530         if (ret)
531                 kvm_vgic_destroy(kvm);
532         mutex_unlock(&kvm->lock);
533         return ret;
534 }
535
536 static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
537 {
538         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
539
540         *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
541 }
542
543 static int vgic_v2_init_model(struct kvm *kvm)
544 {
545         int i;
546
547         for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
548                 vgic_set_target_reg(kvm, 0, i);
549
550         return 0;
551 }
552
553 void vgic_v2_init_emulation(struct kvm *kvm)
554 {
555         struct vgic_dist *dist = &kvm->arch.vgic;
556
557         dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
558         dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
559         dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
560         dist->vm_ops.init_model = vgic_v2_init_model;
561         dist->vm_ops.map_resources = vgic_v2_map_resources;
562
563         kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
564 }
565
566 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
567                                  struct kvm_exit_mmio *mmio, phys_addr_t offset)
568 {
569         bool updated = false;
570         struct vgic_vmcr vmcr;
571         u32 *vmcr_field;
572         u32 reg;
573
574         vgic_get_vmcr(vcpu, &vmcr);
575
576         switch (offset & ~0x3) {
577         case GIC_CPU_CTRL:
578                 vmcr_field = &vmcr.ctlr;
579                 break;
580         case GIC_CPU_PRIMASK:
581                 vmcr_field = &vmcr.pmr;
582                 break;
583         case GIC_CPU_BINPOINT:
584                 vmcr_field = &vmcr.bpr;
585                 break;
586         case GIC_CPU_ALIAS_BINPOINT:
587                 vmcr_field = &vmcr.abpr;
588                 break;
589         default:
590                 BUG();
591         }
592
593         if (!mmio->is_write) {
594                 reg = *vmcr_field;
595                 mmio_data_write(mmio, ~0, reg);
596         } else {
597                 reg = mmio_data_read(mmio, ~0);
598                 if (reg != *vmcr_field) {
599                         *vmcr_field = reg;
600                         vgic_set_vmcr(vcpu, &vmcr);
601                         updated = true;
602                 }
603         }
604         return updated;
605 }
606
607 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
608                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
609 {
610         return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
611 }
612
613 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
614                                   struct kvm_exit_mmio *mmio,
615                                   phys_addr_t offset)
616 {
617         u32 reg;
618
619         if (mmio->is_write)
620                 return false;
621
622         /* GICC_IIDR */
623         reg = (PRODUCT_ID_KVM << 20) |
624               (GICC_ARCH_VERSION_V2 << 16) |
625               (IMPLEMENTER_ARM << 0);
626         mmio_data_write(mmio, ~0, reg);
627         return false;
628 }
629
630 /*
631  * CPU Interface Register accesses - these are not accessed by the VM, but by
632  * user space for saving and restoring VGIC state.
633  */
634 static const struct kvm_mmio_range vgic_cpu_ranges[] = {
635         {
636                 .base           = GIC_CPU_CTRL,
637                 .len            = 12,
638                 .handle_mmio    = handle_cpu_mmio_misc,
639         },
640         {
641                 .base           = GIC_CPU_ALIAS_BINPOINT,
642                 .len            = 4,
643                 .handle_mmio    = handle_mmio_abpr,
644         },
645         {
646                 .base           = GIC_CPU_ACTIVEPRIO,
647                 .len            = 16,
648                 .handle_mmio    = handle_mmio_raz_wi,
649         },
650         {
651                 .base           = GIC_CPU_IDENT,
652                 .len            = 4,
653                 .handle_mmio    = handle_cpu_mmio_ident,
654         },
655 };
656
657 static int vgic_attr_regs_access(struct kvm_device *dev,
658                                  struct kvm_device_attr *attr,
659                                  u32 *reg, bool is_write)
660 {
661         const struct kvm_mmio_range *r = NULL, *ranges;
662         phys_addr_t offset;
663         int ret, cpuid, c;
664         struct kvm_vcpu *vcpu, *tmp_vcpu;
665         struct vgic_dist *vgic;
666         struct kvm_exit_mmio mmio;
667
668         offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
669         cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
670                 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
671
672         mutex_lock(&dev->kvm->lock);
673
674         ret = vgic_init(dev->kvm);
675         if (ret)
676                 goto out;
677
678         if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
679                 ret = -EINVAL;
680                 goto out;
681         }
682
683         vcpu = kvm_get_vcpu(dev->kvm, cpuid);
684         vgic = &dev->kvm->arch.vgic;
685
686         mmio.len = 4;
687         mmio.is_write = is_write;
688         if (is_write)
689                 mmio_data_write(&mmio, ~0, *reg);
690         switch (attr->group) {
691         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
692                 mmio.phys_addr = vgic->vgic_dist_base + offset;
693                 ranges = vgic_dist_ranges;
694                 break;
695         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
696                 mmio.phys_addr = vgic->vgic_cpu_base + offset;
697                 ranges = vgic_cpu_ranges;
698                 break;
699         default:
700                 BUG();
701         }
702         r = vgic_find_range(ranges, &mmio, offset);
703
704         if (unlikely(!r || !r->handle_mmio)) {
705                 ret = -ENXIO;
706                 goto out;
707         }
708
709
710         spin_lock(&vgic->lock);
711
712         /*
713          * Ensure that no other VCPU is running by checking the vcpu->cpu
714          * field.  If no other VPCUs are running we can safely access the VGIC
715          * state, because even if another VPU is run after this point, that
716          * VCPU will not touch the vgic state, because it will block on
717          * getting the vgic->lock in kvm_vgic_sync_hwstate().
718          */
719         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
720                 if (unlikely(tmp_vcpu->cpu != -1)) {
721                         ret = -EBUSY;
722                         goto out_vgic_unlock;
723                 }
724         }
725
726         /*
727          * Move all pending IRQs from the LRs on all VCPUs so the pending
728          * state can be properly represented in the register state accessible
729          * through this API.
730          */
731         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
732                 vgic_unqueue_irqs(tmp_vcpu);
733
734         offset -= r->base;
735         r->handle_mmio(vcpu, &mmio, offset);
736
737         if (!is_write)
738                 *reg = mmio_data_read(&mmio, ~0);
739
740         ret = 0;
741 out_vgic_unlock:
742         spin_unlock(&vgic->lock);
743 out:
744         mutex_unlock(&dev->kvm->lock);
745         return ret;
746 }
747
748 static int vgic_v2_create(struct kvm_device *dev, u32 type)
749 {
750         return kvm_vgic_create(dev->kvm, type);
751 }
752
753 static void vgic_v2_destroy(struct kvm_device *dev)
754 {
755         kfree(dev);
756 }
757
758 static int vgic_v2_set_attr(struct kvm_device *dev,
759                             struct kvm_device_attr *attr)
760 {
761         int ret;
762
763         ret = vgic_set_common_attr(dev, attr);
764         if (ret != -ENXIO)
765                 return ret;
766
767         switch (attr->group) {
768         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
769         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
770                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
771                 u32 reg;
772
773                 if (get_user(reg, uaddr))
774                         return -EFAULT;
775
776                 return vgic_attr_regs_access(dev, attr, &reg, true);
777         }
778
779         }
780
781         return -ENXIO;
782 }
783
784 static int vgic_v2_get_attr(struct kvm_device *dev,
785                             struct kvm_device_attr *attr)
786 {
787         int ret;
788
789         ret = vgic_get_common_attr(dev, attr);
790         if (ret != -ENXIO)
791                 return ret;
792
793         switch (attr->group) {
794         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
795         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
796                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
797                 u32 reg = 0;
798
799                 ret = vgic_attr_regs_access(dev, attr, &reg, false);
800                 if (ret)
801                         return ret;
802                 return put_user(reg, uaddr);
803         }
804
805         }
806
807         return -ENXIO;
808 }
809
810 static int vgic_v2_has_attr(struct kvm_device *dev,
811                             struct kvm_device_attr *attr)
812 {
813         phys_addr_t offset;
814
815         switch (attr->group) {
816         case KVM_DEV_ARM_VGIC_GRP_ADDR:
817                 switch (attr->attr) {
818                 case KVM_VGIC_V2_ADDR_TYPE_DIST:
819                 case KVM_VGIC_V2_ADDR_TYPE_CPU:
820                         return 0;
821                 }
822                 break;
823         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
824                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
825                 return vgic_has_attr_regs(vgic_dist_ranges, offset);
826         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
827                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
828                 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
829         case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
830                 return 0;
831         case KVM_DEV_ARM_VGIC_GRP_CTRL:
832                 switch (attr->attr) {
833                 case KVM_DEV_ARM_VGIC_CTRL_INIT:
834                         return 0;
835                 }
836         }
837         return -ENXIO;
838 }
839
840 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
841         .name = "kvm-arm-vgic-v2",
842         .create = vgic_v2_create,
843         .destroy = vgic_v2_destroy,
844         .set_attr = vgic_v2_set_attr,
845         .get_attr = vgic_v2_get_attr,
846         .has_attr = vgic_v2_has_attr,
847 };