2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include <linux/stddef.h>
9 #include <linux/hardirq.h>
11 #include <linux/module.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/tick.h>
20 #include <linux/threads.h>
21 #include <linux/tracehook.h>
22 #include <asm/current.h>
23 #include <asm/pgtable.h>
24 #include <asm/mmu_context.h>
25 #include <asm/uaccess.h>
26 #include "as-layout.h"
27 #include "kern_util.h"
32 * This is a per-cpu array. A processor only modifies its entry and it only
33 * cares about its entry, so it's OK if another processor is modifying its
36 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
38 static inline int external_pid(void)
40 /* FIXME: Need to look up userspace_pid by cpu */
41 return userspace_pid[0];
44 int pid_to_processor_id(int pid)
48 for (i = 0; i < ncpus; i++) {
49 if (cpu_tasks[i].pid == pid)
55 void free_stack(unsigned long stack, int order)
57 free_pages(stack, order);
60 unsigned long alloc_stack(int order, int atomic)
63 gfp_t flags = GFP_KERNEL;
67 page = __get_free_pages(flags, order);
72 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
76 current->thread.request.u.thread.proc = fn;
77 current->thread.request.u.thread.arg = arg;
78 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
79 ¤t->thread.regs, 0, NULL, NULL);
82 EXPORT_SYMBOL(kernel_thread);
84 static inline void set_current(struct task_struct *task)
86 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
87 { external_pid(), task });
90 extern void arch_switch_to(struct task_struct *to);
92 void *__switch_to(struct task_struct *from, struct task_struct *to)
94 to->thread.prev_sched = from;
98 current->thread.saved_task = NULL;
100 switch_threads(&from->thread.switch_buf,
101 &to->thread.switch_buf);
103 arch_switch_to(current);
105 if (current->thread.saved_task)
106 show_regs(&(current->thread.regs));
107 to = current->thread.saved_task;
109 } while (current->thread.saved_task);
111 return current->thread.prev_sched;
114 void interrupt_end(void)
118 if (test_thread_flag(TIF_SIGPENDING))
120 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
121 tracehook_notify_resume(¤t->thread.regs);
124 void exit_thread(void)
128 int get_current_pid(void)
130 return task_pid_nr(current);
134 * This is called magically, by its address being stuffed in a jmp_buf
135 * and being longjmp-d to.
137 void new_thread_handler(void)
139 int (*fn)(void *), n;
142 if (current->thread.prev_sched != NULL)
143 schedule_tail(current->thread.prev_sched);
144 current->thread.prev_sched = NULL;
146 fn = current->thread.request.u.thread.proc;
147 arg = current->thread.request.u.thread.arg;
150 * The return value is 1 if the kernel thread execs a process,
153 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
155 /* Handle any immediate reschedules or signals */
157 userspace(¤t->thread.regs.regs);
162 /* Called magically, see new_thread_handler above */
163 void fork_handler(void)
167 schedule_tail(current->thread.prev_sched);
170 * XXX: if interrupt_end() calls schedule, this call to
171 * arch_switch_to isn't needed. We could want to apply this to
172 * improve performance. -bb
174 arch_switch_to(current);
176 current->thread.prev_sched = NULL;
178 /* Handle any immediate reschedules or signals */
181 userspace(¤t->thread.regs.regs);
184 int copy_thread(unsigned long clone_flags, unsigned long sp,
185 unsigned long stack_top, struct task_struct * p,
186 struct pt_regs *regs)
188 void (*handler)(void);
191 p->thread = (struct thread_struct) INIT_THREAD;
193 if (current->thread.forking) {
194 memcpy(&p->thread.regs.regs, ®s->regs,
195 sizeof(p->thread.regs.regs));
196 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
198 REGS_SP(p->thread.regs.regs.gp) = sp;
200 handler = fork_handler;
202 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
205 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
206 p->thread.request.u.thread = current->thread.request.u.thread;
207 handler = new_thread_handler;
210 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
212 if (current->thread.forking) {
213 clear_flushed_tls(p);
216 * Set a new TLS for the child thread?
218 if (clone_flags & CLONE_SETTLS)
219 ret = arch_copy_tls(p);
225 void initial_thread_cb(void (*proc)(void *), void *arg)
227 int save_kmalloc_ok = kmalloc_ok;
230 initial_thread_cb_skas(proc, arg);
231 kmalloc_ok = save_kmalloc_ok;
234 void default_idle(void)
236 unsigned long long nsecs;
239 /* endless idle loop with no priority at all */
242 * although we are an idle CPU, we do not want to
243 * get into the scheduler unnecessarily.
248 tick_nohz_idle_enter();
250 nsecs = disable_timer();
253 tick_nohz_idle_exit();
259 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
263 int __cant_sleep(void) {
264 return in_atomic() || irqs_disabled() || in_interrupt();
265 /* Is in_interrupt() really needed? */
268 int user_context(unsigned long sp)
272 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
273 return stack != (unsigned long) current_thread_info();
276 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
278 void do_uml_exitcalls(void)
282 call = &__uml_exitcall_end;
283 while (--call >= &__uml_exitcall_begin)
287 char *uml_strdup(const char *string)
289 return kstrdup(string, GFP_KERNEL);
291 EXPORT_SYMBOL(uml_strdup);
293 int copy_to_user_proc(void __user *to, void *from, int size)
295 return copy_to_user(to, from, size);
298 int copy_from_user_proc(void *to, void __user *from, int size)
300 return copy_from_user(to, from, size);
303 int clear_user_proc(void __user *buf, int size)
305 return clear_user(buf, size);
308 int strlen_user_proc(char __user *str)
310 return strlen_user(str);
313 int smp_sigio_handler(void)
316 int cpu = current_thread_info()->cpu;
326 return current_thread_info()->cpu;
329 static atomic_t using_sysemu = ATOMIC_INIT(0);
330 int sysemu_supported;
332 void set_using_sysemu(int value)
334 if (value > sysemu_supported)
336 atomic_set(&using_sysemu, value);
339 int get_using_sysemu(void)
341 return atomic_read(&using_sysemu);
344 static int sysemu_proc_show(struct seq_file *m, void *v)
346 seq_printf(m, "%d\n", get_using_sysemu());
350 static int sysemu_proc_open(struct inode *inode, struct file *file)
352 return single_open(file, sysemu_proc_show, NULL);
355 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
356 size_t count, loff_t *pos)
360 if (copy_from_user(tmp, buf, 1))
363 if (tmp[0] >= '0' && tmp[0] <= '2')
364 set_using_sysemu(tmp[0] - '0');
365 /* We use the first char, but pretend to write everything */
369 static const struct file_operations sysemu_proc_fops = {
370 .owner = THIS_MODULE,
371 .open = sysemu_proc_open,
374 .release = single_release,
375 .write = sysemu_proc_write,
378 int __init make_proc_sysemu(void)
380 struct proc_dir_entry *ent;
381 if (!sysemu_supported)
384 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
388 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
395 late_initcall(make_proc_sysemu);
397 int singlestepping(void * t)
399 struct task_struct *task = t ? t : current;
401 if (!(task->ptrace & PT_DTRACE))
404 if (task->thread.singlestep_syscall)
411 * Only x86 and x86_64 have an arch_align_stack().
412 * All other arches have "#define arch_align_stack(x) (x)"
413 * in their asm/system.h
414 * As this is included in UML from asm-um/system-generic.h,
415 * we can use it to behave as the subarch does.
417 #ifndef arch_align_stack
418 unsigned long arch_align_stack(unsigned long sp)
420 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
421 sp -= get_random_int() % 8192;
426 unsigned long get_wchan(struct task_struct *p)
428 unsigned long stack_page, sp, ip;
431 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
434 stack_page = (unsigned long) task_stack_page(p);
435 /* Bail if the process has no kernel stack for some reason */
439 sp = p->thread.switch_buf->JB_SP;
441 * Bail if the stack pointer is below the bottom of the kernel
442 * stack for some reason
447 while (sp < stack_page + THREAD_SIZE) {
448 ip = *((unsigned long *) sp);
449 if (in_sched_functions(ip))
450 /* Ignore everything until we're above the scheduler */
452 else if (kernel_text_address(ip) && seen_sched)
455 sp += sizeof(unsigned long);
461 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
463 int cpu = current_thread_info()->cpu;
465 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);