1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <asm/alternative.h>
11 #include <asm/sections.h>
12 #include <asm/pgtable.h>
15 #include <asm/vsyscall.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
19 #include <asm/fixmap.h>
21 #define MAX_PATCH_LEN (255-1)
23 #ifdef CONFIG_HOTPLUG_CPU
24 static int smp_alt_once;
26 static int __init bootonly(char *str)
31 __setup("smp-alt-boot", bootonly);
33 #define smp_alt_once 1
36 static int __initdata_or_module debug_alternative;
38 static int __init debug_alt(char *str)
40 debug_alternative = 1;
43 __setup("debug-alternative", debug_alt);
45 static int noreplace_smp;
47 static int __init setup_noreplace_smp(char *str)
52 __setup("noreplace-smp", setup_noreplace_smp);
54 #ifdef CONFIG_PARAVIRT
55 static int __initdata_or_module noreplace_paravirt = 0;
57 static int __init setup_noreplace_paravirt(char *str)
59 noreplace_paravirt = 1;
62 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #define DPRINTK(fmt, args...) if (debug_alternative) \
66 printk(KERN_DEBUG fmt, args)
68 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
69 /* Use inline assembly to define this because the nops are defined
70 as inline assembly strings in the include files and we cannot
71 get them easily into strings. */
72 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
73 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
74 GENERIC_NOP7 GENERIC_NOP8
76 extern const unsigned char intelnops[];
77 static const unsigned char *const __initconst_or_module
78 intel_nops[ASM_NOP_MAX+1] = {
83 intelnops + 1 + 2 + 3,
84 intelnops + 1 + 2 + 3 + 4,
85 intelnops + 1 + 2 + 3 + 4 + 5,
86 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
87 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
92 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
93 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
96 extern const unsigned char k8nops[];
97 static const unsigned char *const __initconst_or_module
98 k8_nops[ASM_NOP_MAX+1] = {
104 k8nops + 1 + 2 + 3 + 4,
105 k8nops + 1 + 2 + 3 + 4 + 5,
106 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
107 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
112 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
113 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
116 extern const unsigned char k7nops[];
117 static const unsigned char *const __initconst_or_module
118 k7_nops[ASM_NOP_MAX+1] = {
124 k7nops + 1 + 2 + 3 + 4,
125 k7nops + 1 + 2 + 3 + 4 + 5,
126 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
127 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
132 asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
133 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
136 extern const unsigned char p6nops[];
137 static const unsigned char *const __initconst_or_module
138 p6_nops[ASM_NOP_MAX+1] = {
144 p6nops + 1 + 2 + 3 + 4,
145 p6nops + 1 + 2 + 3 + 4 + 5,
146 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
147 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
153 extern char __vsyscall_0;
154 static const unsigned char *const *__init_or_module find_nop_table(void)
156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
157 boot_cpu_has(X86_FEATURE_NOPL))
163 #else /* CONFIG_X86_64 */
165 static const unsigned char *const *__init_or_module find_nop_table(void)
167 if (boot_cpu_has(X86_FEATURE_K8))
169 else if (boot_cpu_has(X86_FEATURE_K7))
171 else if (boot_cpu_has(X86_FEATURE_NOPL))
177 #endif /* CONFIG_X86_64 */
179 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
180 static void __init_or_module add_nops(void *insns, unsigned int len)
182 const unsigned char *const *noptable = find_nop_table();
185 unsigned int noplen = len;
186 if (noplen > ASM_NOP_MAX)
187 noplen = ASM_NOP_MAX;
188 memcpy(insns, noptable[noplen], noplen);
194 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
195 extern u8 *__smp_locks[], *__smp_locks_end[];
196 static void *text_poke_early(void *addr, const void *opcode, size_t len);
198 /* Replace instructions with better alternatives for this CPU type.
199 This runs before SMP is initialized to avoid SMP problems with
200 self modifying code. This implies that assymetric systems where
201 APs have less capabilities than the boot processor are not handled.
202 Tough. Make sure you disable such features by hand. */
204 void __init_or_module apply_alternatives(struct alt_instr *start,
205 struct alt_instr *end)
208 char insnbuf[MAX_PATCH_LEN];
210 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
211 for (a = start; a < end; a++) {
212 u8 *instr = a->instr;
213 BUG_ON(a->replacementlen > a->instrlen);
214 BUG_ON(a->instrlen > sizeof(insnbuf));
215 if (!boot_cpu_has(a->cpuid))
218 /* vsyscall code is not mapped yet. resolve it manually. */
219 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
220 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
221 DPRINTK("%s: vsyscall fixup: %p => %p\n",
222 __func__, a->instr, instr);
225 memcpy(insnbuf, a->replacement, a->replacementlen);
226 add_nops(insnbuf + a->replacementlen,
227 a->instrlen - a->replacementlen);
228 text_poke_early(instr, insnbuf, a->instrlen);
234 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
238 mutex_lock(&text_mutex);
239 for (ptr = start; ptr < end; ptr++) {
244 /* turn DS segment override prefix into lock prefix */
245 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
247 mutex_unlock(&text_mutex);
250 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
257 mutex_lock(&text_mutex);
258 for (ptr = start; ptr < end; ptr++) {
263 /* turn lock prefix into DS segment override prefix */
264 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
266 mutex_unlock(&text_mutex);
269 struct smp_alt_module {
270 /* what is this ??? */
274 /* ptrs to lock prefixes */
278 /* .text segment, needed to avoid patching init code ;) */
282 struct list_head next;
284 static LIST_HEAD(smp_alt_modules);
285 static DEFINE_MUTEX(smp_alt);
286 static int smp_mode = 1; /* protected by smp_alt */
288 void __init_or_module alternatives_smp_module_add(struct module *mod,
290 void *locks, void *locks_end,
291 void *text, void *text_end)
293 struct smp_alt_module *smp;
299 if (boot_cpu_has(X86_FEATURE_UP))
300 alternatives_smp_unlock(locks, locks_end,
305 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
307 return; /* we'll run the (safe but slow) SMP code then ... */
312 smp->locks_end = locks_end;
314 smp->text_end = text_end;
315 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
316 __func__, smp->locks, smp->locks_end,
317 smp->text, smp->text_end, smp->name);
319 mutex_lock(&smp_alt);
320 list_add_tail(&smp->next, &smp_alt_modules);
321 if (boot_cpu_has(X86_FEATURE_UP))
322 alternatives_smp_unlock(smp->locks, smp->locks_end,
323 smp->text, smp->text_end);
324 mutex_unlock(&smp_alt);
327 void __init_or_module alternatives_smp_module_del(struct module *mod)
329 struct smp_alt_module *item;
331 if (smp_alt_once || noreplace_smp)
334 mutex_lock(&smp_alt);
335 list_for_each_entry(item, &smp_alt_modules, next) {
336 if (mod != item->mod)
338 list_del(&item->next);
339 mutex_unlock(&smp_alt);
340 DPRINTK("%s: %s\n", __func__, item->name);
344 mutex_unlock(&smp_alt);
347 void alternatives_smp_switch(int smp)
349 struct smp_alt_module *mod;
351 #ifdef CONFIG_LOCKDEP
353 * Older binutils section handling bug prevented
354 * alternatives-replacement from working reliably.
356 * If this still occurs then you should see a hang
357 * or crash shortly after this line:
359 printk("lockdep: fixing up alternatives.\n");
362 if (noreplace_smp || smp_alt_once)
364 BUG_ON(!smp && (num_online_cpus() > 1));
366 mutex_lock(&smp_alt);
369 * Avoid unnecessary switches because it forces JIT based VMs to
370 * throw away all cached translations, which can be quite costly.
372 if (smp == smp_mode) {
375 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
376 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
377 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
378 list_for_each_entry(mod, &smp_alt_modules, next)
379 alternatives_smp_lock(mod->locks, mod->locks_end,
380 mod->text, mod->text_end);
382 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
383 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
384 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
385 list_for_each_entry(mod, &smp_alt_modules, next)
386 alternatives_smp_unlock(mod->locks, mod->locks_end,
387 mod->text, mod->text_end);
390 mutex_unlock(&smp_alt);
393 /* Return 1 if the address range is reserved for smp-alternatives */
394 int alternatives_text_reserved(void *start, void *end)
396 struct smp_alt_module *mod;
399 list_for_each_entry(mod, &smp_alt_modules, next) {
400 if (mod->text > end || mod->text_end < start)
402 for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
403 if (start <= *ptr && end >= *ptr)
411 #ifdef CONFIG_PARAVIRT
412 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
413 struct paravirt_patch_site *end)
415 struct paravirt_patch_site *p;
416 char insnbuf[MAX_PATCH_LEN];
418 if (noreplace_paravirt)
421 for (p = start; p < end; p++) {
424 BUG_ON(p->len > MAX_PATCH_LEN);
425 /* prep the buffer with the original instructions */
426 memcpy(insnbuf, p->instr, p->len);
427 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
428 (unsigned long)p->instr, p->len);
430 BUG_ON(used > p->len);
432 /* Pad the rest with nops */
433 add_nops(insnbuf + used, p->len - used);
434 text_poke_early(p->instr, insnbuf, p->len);
437 extern struct paravirt_patch_site __start_parainstructions[],
438 __stop_parainstructions[];
439 #endif /* CONFIG_PARAVIRT */
441 void __init alternative_instructions(void)
443 /* The patching is not fully atomic, so try to avoid local interruptions
444 that might execute the to be patched code.
445 Other CPUs are not running. */
449 * Don't stop machine check exceptions while patching.
450 * MCEs only happen when something got corrupted and in this
451 * case we must do something about the corruption.
452 * Ignoring it is worse than a unlikely patching race.
453 * Also machine checks tend to be broadcast and if one CPU
454 * goes into machine check the others follow quickly, so we don't
455 * expect a machine check to cause undue problems during to code
459 apply_alternatives(__alt_instructions, __alt_instructions_end);
461 /* switch to patch-once-at-boottime-only mode and free the
462 * tables in case we know the number of CPUs will never ever
464 #ifdef CONFIG_HOTPLUG_CPU
465 if (num_possible_cpus() < 2)
471 if (1 == num_possible_cpus()) {
472 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
473 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
474 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
476 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
480 alternatives_smp_module_add(NULL, "core kernel",
481 __smp_locks, __smp_locks_end,
484 /* Only switch to UP mode if we don't immediately boot others */
485 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
486 alternatives_smp_switch(0);
489 apply_paravirt(__parainstructions, __parainstructions_end);
492 free_init_pages("SMP alternatives",
493 (unsigned long)__smp_locks,
494 (unsigned long)__smp_locks_end);
500 * text_poke_early - Update instructions on a live kernel at boot time
501 * @addr: address to modify
502 * @opcode: source of the copy
503 * @len: length to copy
505 * When you use this code to patch more than one byte of an instruction
506 * you need to make sure that other CPUs cannot execute this code in parallel.
507 * Also no thread must be currently preempted in the middle of these
508 * instructions. And on the local CPU you need to be protected again NMI or MCE
509 * handlers seeing an inconsistent instruction while you patch.
511 static void *__init_or_module text_poke_early(void *addr, const void *opcode,
515 local_irq_save(flags);
516 memcpy(addr, opcode, len);
518 local_irq_restore(flags);
519 /* Could also do a CLFLUSH here to speed up CPU recovery; but
520 that causes hangs on some VIA CPUs. */
525 * text_poke - Update instructions on a live kernel
526 * @addr: address to modify
527 * @opcode: source of the copy
528 * @len: length to copy
530 * Only atomic text poke/set should be allowed when not doing early patching.
531 * It means the size must be writable atomically and the address must be aligned
532 * in a way that permits an atomic write. It also makes sure we fit on a single
535 * Note: Must be called under text_mutex.
537 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
541 struct page *pages[2];
544 if (!core_kernel_text((unsigned long)addr)) {
545 pages[0] = vmalloc_to_page(addr);
546 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
548 pages[0] = virt_to_page(addr);
549 WARN_ON(!PageReserved(pages[0]));
550 pages[1] = virt_to_page(addr + PAGE_SIZE);
553 local_irq_save(flags);
554 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
556 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
557 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
558 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
559 clear_fixmap(FIX_TEXT_POKE0);
561 clear_fixmap(FIX_TEXT_POKE1);
564 /* Could also do a CLFLUSH here to speed up CPU recovery; but
565 that causes hangs on some VIA CPUs. */
566 for (i = 0; i < len; i++)
567 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
568 local_irq_restore(flags);