76e38d231e9959d085673b4eda7e4065f4a7fbb7
[firefly-linux-kernel-4.4.55.git] / virt / kvm / arm / arch_timer.c
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/of_irq.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24
25 #include <clocksource/arm_arch_timer.h>
26 #include <asm/arch_timer.h>
27
28 #include <kvm/arm_vgic.h>
29 #include <kvm/arm_arch_timer.h>
30
31 static struct timecounter *timecounter;
32 static struct workqueue_struct *wqueue;
33 static unsigned int host_vtimer_irq;
34
35 static cycle_t kvm_phys_timer_read(void)
36 {
37         return timecounter->cc->read(timecounter->cc);
38 }
39
40 static bool timer_is_armed(struct arch_timer_cpu *timer)
41 {
42         return timer->armed;
43 }
44
45 /* timer_arm: as in "arm the timer", not as in ARM the company */
46 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
47 {
48         timer->armed = true;
49         hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
50                       HRTIMER_MODE_ABS);
51 }
52
53 static void timer_disarm(struct arch_timer_cpu *timer)
54 {
55         if (timer_is_armed(timer)) {
56                 hrtimer_cancel(&timer->timer);
57                 cancel_work_sync(&timer->expired);
58                 timer->armed = false;
59         }
60 }
61
62 static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
63 {
64         int ret;
65         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
66
67         kvm_vgic_set_phys_irq_active(timer->map, true);
68         ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
69                                          timer->map,
70                                          timer->irq->level);
71         WARN_ON(ret);
72 }
73
74 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
75 {
76         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
77
78         /*
79          * We disable the timer in the world switch and let it be
80          * handled by kvm_timer_sync_hwstate(). Getting a timer
81          * interrupt at this point is a sure sign of some major
82          * breakage.
83          */
84         pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
85         return IRQ_HANDLED;
86 }
87
88 /*
89  * Work function for handling the backup timer that we schedule when a vcpu is
90  * no longer running, but had a timer programmed to fire in the future.
91  */
92 static void kvm_timer_inject_irq_work(struct work_struct *work)
93 {
94         struct kvm_vcpu *vcpu;
95
96         vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
97         vcpu->arch.timer_cpu.armed = false;
98
99         /*
100          * If the vcpu is blocked we want to wake it up so that it will see
101          * the timer has expired when entering the guest.
102          */
103         kvm_vcpu_kick(vcpu);
104 }
105
106 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
107 {
108         struct arch_timer_cpu *timer;
109         timer = container_of(hrt, struct arch_timer_cpu, timer);
110         queue_work(wqueue, &timer->expired);
111         return HRTIMER_NORESTART;
112 }
113
114 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
115 {
116         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
117         cycle_t cval, now;
118
119         if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
120             !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
121             kvm_vgic_get_phys_irq_active(timer->map))
122                 return false;
123
124         cval = timer->cntv_cval;
125         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
126
127         return cval <= now;
128 }
129
130 /**
131  * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
132  * @vcpu: The vcpu pointer
133  *
134  * Disarm any pending soft timers, since the world-switch code will write the
135  * virtual timer state back to the physical CPU.
136  */
137 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
138 {
139         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
140
141         /*
142          * We're about to run this vcpu again, so there is no need to
143          * keep the background timer running, as we're about to
144          * populate the CPU timer again.
145          */
146         timer_disarm(timer);
147
148         /*
149          * If the timer expired while we were not scheduled, now is the time
150          * to inject it.
151          */
152         if (kvm_timer_should_fire(vcpu))
153                 kvm_timer_inject_irq(vcpu);
154 }
155
156 /**
157  * kvm_timer_sync_hwstate - sync timer state from cpu
158  * @vcpu: The vcpu pointer
159  *
160  * Check if the virtual timer was armed and either schedule a corresponding
161  * soft timer or inject directly if already expired.
162  */
163 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
164 {
165         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
166         cycle_t cval, now;
167         u64 ns;
168
169         BUG_ON(timer_is_armed(timer));
170
171         if (kvm_timer_should_fire(vcpu)) {
172                 /*
173                  * Timer has already expired while we were not
174                  * looking. Inject the interrupt and carry on.
175                  */
176                 kvm_timer_inject_irq(vcpu);
177                 return;
178         }
179
180         cval = timer->cntv_cval;
181         now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
182
183         ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
184                                  &timecounter->frac);
185         timer_arm(timer, ns);
186 }
187
188 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
189                          const struct kvm_irq_level *irq)
190 {
191         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
192         struct irq_phys_map *map;
193
194         /*
195          * The vcpu timer irq number cannot be determined in
196          * kvm_timer_vcpu_init() because it is called much before
197          * kvm_vcpu_set_target(). To handle this, we determine
198          * vcpu timer irq number when the vcpu is reset.
199          */
200         timer->irq = irq;
201
202         /*
203          * Tell the VGIC that the virtual interrupt is tied to a
204          * physical interrupt. We do that once per VCPU.
205          */
206         map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
207         if (WARN_ON(IS_ERR(map)))
208                 return PTR_ERR(map);
209
210         timer->map = map;
211         return 0;
212 }
213
214 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
215 {
216         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
217
218         INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
219         hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
220         timer->timer.function = kvm_timer_expire;
221 }
222
223 static void kvm_timer_init_interrupt(void *info)
224 {
225         enable_percpu_irq(host_vtimer_irq, 0);
226 }
227
228 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
229 {
230         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
231
232         switch (regid) {
233         case KVM_REG_ARM_TIMER_CTL:
234                 timer->cntv_ctl = value;
235                 break;
236         case KVM_REG_ARM_TIMER_CNT:
237                 vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
238                 break;
239         case KVM_REG_ARM_TIMER_CVAL:
240                 timer->cntv_cval = value;
241                 break;
242         default:
243                 return -1;
244         }
245         return 0;
246 }
247
248 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
249 {
250         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
251
252         switch (regid) {
253         case KVM_REG_ARM_TIMER_CTL:
254                 return timer->cntv_ctl;
255         case KVM_REG_ARM_TIMER_CNT:
256                 return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
257         case KVM_REG_ARM_TIMER_CVAL:
258                 return timer->cntv_cval;
259         }
260         return (u64)-1;
261 }
262
263 static int kvm_timer_cpu_notify(struct notifier_block *self,
264                                 unsigned long action, void *cpu)
265 {
266         switch (action) {
267         case CPU_STARTING:
268         case CPU_STARTING_FROZEN:
269                 kvm_timer_init_interrupt(NULL);
270                 break;
271         case CPU_DYING:
272         case CPU_DYING_FROZEN:
273                 disable_percpu_irq(host_vtimer_irq);
274                 break;
275         }
276
277         return NOTIFY_OK;
278 }
279
280 static struct notifier_block kvm_timer_cpu_nb = {
281         .notifier_call = kvm_timer_cpu_notify,
282 };
283
284 static const struct of_device_id arch_timer_of_match[] = {
285         { .compatible   = "arm,armv7-timer",    },
286         { .compatible   = "arm,armv8-timer",    },
287         {},
288 };
289
290 int kvm_timer_hyp_init(void)
291 {
292         struct device_node *np;
293         unsigned int ppi;
294         int err;
295
296         timecounter = arch_timer_get_timecounter();
297         if (!timecounter)
298                 return -ENODEV;
299
300         np = of_find_matching_node(NULL, arch_timer_of_match);
301         if (!np) {
302                 kvm_err("kvm_arch_timer: can't find DT node\n");
303                 return -ENODEV;
304         }
305
306         ppi = irq_of_parse_and_map(np, 2);
307         if (!ppi) {
308                 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
309                 err = -EINVAL;
310                 goto out;
311         }
312
313         err = request_percpu_irq(ppi, kvm_arch_timer_handler,
314                                  "kvm guest timer", kvm_get_running_vcpus());
315         if (err) {
316                 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
317                         ppi, err);
318                 goto out;
319         }
320
321         host_vtimer_irq = ppi;
322
323         err = __register_cpu_notifier(&kvm_timer_cpu_nb);
324         if (err) {
325                 kvm_err("Cannot register timer CPU notifier\n");
326                 goto out_free;
327         }
328
329         wqueue = create_singlethread_workqueue("kvm_arch_timer");
330         if (!wqueue) {
331                 err = -ENOMEM;
332                 goto out_free;
333         }
334
335         kvm_info("%s IRQ%d\n", np->name, ppi);
336         on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
337
338         goto out;
339 out_free:
340         free_percpu_irq(ppi, kvm_get_running_vcpus());
341 out:
342         of_node_put(np);
343         return err;
344 }
345
346 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
347 {
348         struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
349
350         timer_disarm(timer);
351         if (timer->map)
352                 kvm_vgic_unmap_phys_irq(vcpu, timer->map);
353 }
354
355 void kvm_timer_enable(struct kvm *kvm)
356 {
357         if (kvm->arch.timer.enabled)
358                 return;
359
360         /*
361          * There is a potential race here between VCPUs starting for the first
362          * time, which may be enabling the timer multiple times.  That doesn't
363          * hurt though, because we're just setting a variable to the same
364          * variable that it already was.  The important thing is that all
365          * VCPUs have the enabled variable set, before entering the guest, if
366          * the arch timers are enabled.
367          */
368         if (timecounter && wqueue)
369                 kvm->arch.timer.enabled = 1;
370 }
371
372 void kvm_timer_init(struct kvm *kvm)
373 {
374         kvm->arch.timer.cntvoff = kvm_phys_timer_read();
375 }