2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/init.h>
23 #include <linux/kvm_para.h>
24 #include <linux/slab.h>
28 #include <asm/sections.h>
29 #include <asm/cacheflush.h>
30 #include <asm/disassemble.h>
32 #define KVM_MAGIC_PAGE (-4096L)
33 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
35 #define KVM_INST_LWZ 0x80000000
36 #define KVM_INST_STW 0x90000000
37 #define KVM_INST_LD 0xe8000000
38 #define KVM_INST_STD 0xf8000000
39 #define KVM_INST_NOP 0x60000000
40 #define KVM_INST_B 0x48000000
41 #define KVM_INST_B_MASK 0x03ffffff
42 #define KVM_INST_B_MAX 0x01ffffff
44 #define KVM_MASK_RT 0x03e00000
45 #define KVM_MASK_RB 0x0000f800
46 #define KVM_INST_MFMSR 0x7c0000a6
47 #define KVM_INST_MFSPR_SPRG0 0x7c1042a6
48 #define KVM_INST_MFSPR_SPRG1 0x7c1142a6
49 #define KVM_INST_MFSPR_SPRG2 0x7c1242a6
50 #define KVM_INST_MFSPR_SPRG3 0x7c1342a6
51 #define KVM_INST_MFSPR_SRR0 0x7c1a02a6
52 #define KVM_INST_MFSPR_SRR1 0x7c1b02a6
53 #define KVM_INST_MFSPR_DAR 0x7c1302a6
54 #define KVM_INST_MFSPR_DSISR 0x7c1202a6
56 #define KVM_INST_MTSPR_SPRG0 0x7c1043a6
57 #define KVM_INST_MTSPR_SPRG1 0x7c1143a6
58 #define KVM_INST_MTSPR_SPRG2 0x7c1243a6
59 #define KVM_INST_MTSPR_SPRG3 0x7c1343a6
60 #define KVM_INST_MTSPR_SRR0 0x7c1a03a6
61 #define KVM_INST_MTSPR_SRR1 0x7c1b03a6
62 #define KVM_INST_MTSPR_DAR 0x7c1303a6
63 #define KVM_INST_MTSPR_DSISR 0x7c1203a6
65 #define KVM_INST_TLBSYNC 0x7c00046c
66 #define KVM_INST_MTMSRD_L0 0x7c000164
67 #define KVM_INST_MTMSRD_L1 0x7c010164
68 #define KVM_INST_MTMSR 0x7c000124
70 #define KVM_INST_WRTEEI_0 0x7c000146
71 #define KVM_INST_WRTEEI_1 0x7c008146
73 #define KVM_INST_MTSRIN 0x7c0001e4
75 static bool kvm_patching_worked = true;
76 static char kvm_tmp[1024 * 1024];
77 static int kvm_tmp_index;
79 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
82 flush_icache_range((ulong)inst, (ulong)inst + 4);
85 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
88 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
94 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
96 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
99 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
102 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
108 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
113 static void kvm_patch_ins_nop(u32 *inst)
115 kvm_patch_ins(inst, KVM_INST_NOP);
118 static void kvm_patch_ins_b(u32 *inst, int addr)
120 #ifdef CONFIG_RELOCATABLE
121 /* On relocatable kernels interrupts handlers and our code
122 can be in different regions, so we don't patch them */
124 extern u32 __end_interrupts;
125 if ((ulong)inst < (ulong)&__end_interrupts)
129 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
132 static u32 *kvm_alloc(int len)
136 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
137 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
139 kvm_patching_worked = false;
143 p = (void*)&kvm_tmp[kvm_tmp_index];
144 kvm_tmp_index += len;
149 extern u32 kvm_emulate_mtmsrd_branch_offs;
150 extern u32 kvm_emulate_mtmsrd_reg_offs;
151 extern u32 kvm_emulate_mtmsrd_len;
152 extern u32 kvm_emulate_mtmsrd[];
154 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
161 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
165 /* Find out where we are and put everything there */
166 distance_start = (ulong)p - (ulong)inst;
167 next_inst = ((ulong)inst + 4);
168 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170 /* Make sure we only write valid b instructions */
171 if (distance_start > KVM_INST_B_MAX) {
172 kvm_patching_worked = false;
176 /* Modify the chunk to fit the invocation */
177 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
178 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
179 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
180 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
182 /* Patch the invocation */
183 kvm_patch_ins_b(inst, distance_start);
186 extern u32 kvm_emulate_mtmsr_branch_offs;
187 extern u32 kvm_emulate_mtmsr_reg1_offs;
188 extern u32 kvm_emulate_mtmsr_reg2_offs;
189 extern u32 kvm_emulate_mtmsr_reg3_offs;
190 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
191 extern u32 kvm_emulate_mtmsr_len;
192 extern u32 kvm_emulate_mtmsr[];
194 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
201 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
205 /* Find out where we are and put everything there */
206 distance_start = (ulong)p - (ulong)inst;
207 next_inst = ((ulong)inst + 4);
208 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
210 /* Make sure we only write valid b instructions */
211 if (distance_start > KVM_INST_B_MAX) {
212 kvm_patching_worked = false;
216 /* Modify the chunk to fit the invocation */
217 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
218 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
219 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
220 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
221 p[kvm_emulate_mtmsr_reg3_offs] |= rt;
222 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
223 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
225 /* Patch the invocation */
226 kvm_patch_ins_b(inst, distance_start);
231 extern u32 kvm_emulate_wrteei_branch_offs;
232 extern u32 kvm_emulate_wrteei_ee_offs;
233 extern u32 kvm_emulate_wrteei_len;
234 extern u32 kvm_emulate_wrteei[];
236 static void kvm_patch_ins_wrteei(u32 *inst)
243 p = kvm_alloc(kvm_emulate_wrteei_len * 4);
247 /* Find out where we are and put everything there */
248 distance_start = (ulong)p - (ulong)inst;
249 next_inst = ((ulong)inst + 4);
250 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
252 /* Make sure we only write valid b instructions */
253 if (distance_start > KVM_INST_B_MAX) {
254 kvm_patching_worked = false;
258 /* Modify the chunk to fit the invocation */
259 memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
260 p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
261 p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
262 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
264 /* Patch the invocation */
265 kvm_patch_ins_b(inst, distance_start);
270 #ifdef CONFIG_PPC_BOOK3S_32
272 extern u32 kvm_emulate_mtsrin_branch_offs;
273 extern u32 kvm_emulate_mtsrin_reg1_offs;
274 extern u32 kvm_emulate_mtsrin_reg2_offs;
275 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
276 extern u32 kvm_emulate_mtsrin_len;
277 extern u32 kvm_emulate_mtsrin[];
279 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
286 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
290 /* Find out where we are and put everything there */
291 distance_start = (ulong)p - (ulong)inst;
292 next_inst = ((ulong)inst + 4);
293 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
295 /* Make sure we only write valid b instructions */
296 if (distance_start > KVM_INST_B_MAX) {
297 kvm_patching_worked = false;
301 /* Modify the chunk to fit the invocation */
302 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
303 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
304 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
305 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
306 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
307 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
309 /* Patch the invocation */
310 kvm_patch_ins_b(inst, distance_start);
315 static void kvm_map_magic_page(void *data)
317 u32 *features = data;
322 in[0] = KVM_MAGIC_PAGE;
323 in[1] = KVM_MAGIC_PAGE;
325 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
330 static void kvm_check_ins(u32 *inst, u32 features)
333 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
334 u32 inst_rt = _inst & KVM_MASK_RT;
336 switch (inst_no_rt) {
339 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
341 case KVM_INST_MFSPR_SPRG0:
342 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
344 case KVM_INST_MFSPR_SPRG1:
345 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
347 case KVM_INST_MFSPR_SPRG2:
348 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
350 case KVM_INST_MFSPR_SPRG3:
351 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
353 case KVM_INST_MFSPR_SRR0:
354 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
356 case KVM_INST_MFSPR_SRR1:
357 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
359 case KVM_INST_MFSPR_DAR:
360 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
362 case KVM_INST_MFSPR_DSISR:
363 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
367 case KVM_INST_MTSPR_SPRG0:
368 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
370 case KVM_INST_MTSPR_SPRG1:
371 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
373 case KVM_INST_MTSPR_SPRG2:
374 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
376 case KVM_INST_MTSPR_SPRG3:
377 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
379 case KVM_INST_MTSPR_SRR0:
380 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
382 case KVM_INST_MTSPR_SRR1:
383 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
385 case KVM_INST_MTSPR_DAR:
386 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
388 case KVM_INST_MTSPR_DSISR:
389 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
393 case KVM_INST_TLBSYNC:
394 kvm_patch_ins_nop(inst);
398 case KVM_INST_MTMSRD_L1:
399 /* We use r30 and r31 during the hook */
400 if (get_rt(inst_rt) < 30)
401 kvm_patch_ins_mtmsrd(inst, inst_rt);
404 case KVM_INST_MTMSRD_L0:
405 /* We use r30 and r31 during the hook */
406 if (get_rt(inst_rt) < 30)
407 kvm_patch_ins_mtmsr(inst, inst_rt);
411 switch (inst_no_rt & ~KVM_MASK_RB) {
412 #ifdef CONFIG_PPC_BOOK3S_32
413 case KVM_INST_MTSRIN:
414 if (features & KVM_MAGIC_FEAT_SR) {
415 u32 inst_rb = _inst & KVM_MASK_RB;
416 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
425 case KVM_INST_WRTEEI_0:
426 case KVM_INST_WRTEEI_1:
427 kvm_patch_ins_wrteei(inst);
433 static void kvm_use_magic_page(void)
440 /* Tell the host to map the magic page to -4096 on all CPUs */
441 on_each_cpu(kvm_map_magic_page, &features, 1);
443 /* Quick self-test to see if the mapping works */
444 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
445 kvm_patching_worked = false;
449 /* Now loop through all code and find instructions */
450 start = (void*)_stext;
453 for (p = start; p < end; p++)
454 kvm_check_ins(p, features);
456 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
457 kvm_patching_worked ? "worked" : "failed");
460 unsigned long kvm_hypercall(unsigned long *in,
464 unsigned long register r0 asm("r0");
465 unsigned long register r3 asm("r3") = in[0];
466 unsigned long register r4 asm("r4") = in[1];
467 unsigned long register r5 asm("r5") = in[2];
468 unsigned long register r6 asm("r6") = in[3];
469 unsigned long register r7 asm("r7") = in[4];
470 unsigned long register r8 asm("r8") = in[5];
471 unsigned long register r9 asm("r9") = in[6];
472 unsigned long register r10 asm("r10") = in[7];
473 unsigned long register r11 asm("r11") = nr;
474 unsigned long register r12 asm("r12");
476 asm volatile("bl kvm_hypercall_start"
477 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
478 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
480 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
481 "r"(r9), "r"(r10), "r"(r11)
482 : "memory", "cc", "xer", "ctr", "lr");
495 EXPORT_SYMBOL_GPL(kvm_hypercall);
497 static int kvm_para_setup(void)
499 extern u32 kvm_hypercall_start;
500 struct device_node *hyper_node;
504 hyper_node = of_find_node_by_path("/hypervisor");
508 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
514 for (i = 0; i < (len / 4); i++)
515 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
520 static __init void kvm_free_tmp(void)
522 unsigned long start, end;
524 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
525 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
527 /* Free the tmp space we don't need */
528 for (; start < end; start += PAGE_SIZE) {
529 ClearPageReserved(virt_to_page(start));
530 init_page_count(virt_to_page(start));
536 static int __init kvm_guest_init(void)
538 if (!kvm_para_available())
541 if (kvm_para_setup())
544 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
545 kvm_use_magic_page();
553 postcore_initcall(kvm_guest_init);