2 * arch/ubicom32/kernel/ubicom32_context_switch.S
3 * Implements context switch and return functions.
5 * (C) Copyright 2009, Ubicom, Inc.
7 * This file is part of the Ubicom32 Linux Kernel Port.
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
23 * Ubicom32 implementation derived from (with many thanks):
28 #include <linux/sys.h>
29 #include <linux/linkage.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/ubicom32-common.h>
32 #include <asm/ip5000.h>
33 #include <asm/range-protect.h>
36 * begin_restore_context()
37 * Restore most of the context from sp (struct pt_reg *)
39 * This *can* be called without the global atomic lock. (because sp is
40 * not restored!) Only d15 and a3 are allowed to be used after this
41 * before calling complete_restore_context
43 .macro begin_restore_context
54 move.4 d10, PT_D10(sp)
55 move.4 d11, PT_D11(sp)
56 move.4 d12, PT_D12(sp)
57 move.4 d13, PT_D13(sp)
58 move.4 d14, PT_D14(sp)
59 ;; move.4 d15, PT_D15(sp)
63 ;; move.4 a3, PT_A3(sp)
67 move.4 acc0_hi, PT_ACC0HI(sp)
68 move.4 acc0_lo, PT_ACC0LO(sp)
69 move.4 mac_rc16, PT_MAC_RC16(sp)
70 move.4 acc1_hi, PT_ACC1HI(sp)
71 move.4 acc1_lo, PT_ACC1LO(sp)
72 move.4 source3, PT_SOURCE3(sp)
73 move.4 int_mask0, PT_INT_MASK0(sp)
74 move.4 int_mask1, PT_INT_MASK1(sp)
78 * complete_restore_context()
79 * Completely restore the context from sp (struct pt_reg *)
81 * Note: Recovered PC and CSR are saved on the stack and are to be
82 * popped off before returning.
84 .macro complete_restore_context
86 move.4 d15, PT_D15(sp)
87 move.4 sp, PT_SP(a3) ; Recover Stack pointer from save area
88 move.4 -4(sp)++, PT_PC(a3) ; Recover saved PC and save to stack
89 move.4 -4(sp)++, PT_CSR(a3) ; Recover saved csr and save to stack
94 * old restore_context macro
96 .macro restore_context
98 complete_restore_context
102 * ldsr_thread_enable_interrupts()
103 * An assembly version of the enable interrupts function.
105 * The stack is fair game but all registers MUST be preserved.
108 .macro ldsr_thread_enable_interrupts
109 move.4 -4(sp)++, d3 ; Push d3
110 move.4 -4(sp)++, a3 ; Push a3
113 * Read the ROSR and obtain ~(1 << tid)
115 lsr.4 d3, rosr, #0x2 ; Move the thread portion of ROSR into d3
116 lsl.4 d3, #1, d3 ; perform a (1 << tid)
117 not.4 d3, d3 ; Negate the value of d3 == ~(1 << threadid)
120 * Get the value of the ldsr_soft_irq_mask
122 moveai a3, #%hi(ldsr_soft_irq_mask)
123 move.4 a3, %lo(ldsr_soft_irq_mask)(a3)
126 * Now re-enable interrupts for this thread and then
129 and.4 scratchpad1, scratchpad1, d3
133 * Restore the registers.
140 * ret_from_interrupt_to_kernel()
141 * RFI function that is where do_IRQ() returns to if the thread was
144 .section .text.ret_from_interrupt_to_kernel, "ax", @progbits
145 .global ret_from_interrupt_to_kernel
146 ret_from_interrupt_to_kernel:
147 begin_restore_context ; Restore the thread context
148 atomic_lock_acquire ; Enter critical section
149 complete_restore_context ; Restore the thread context
150 atomic_lock_release ; Leave critical section
151 ldsr_thread_enable_interrupts ; enable the threads interrupts
152 move.4 csr, (sp)4++ ; Restore csr from the stack
156 * ret_from_interrupt_to_user()
157 * RFI function that is where do_IRQ() returns to if the thread was
160 * TODO: Do we really need the critical section handling in this code?
163 .section .text.ret_from_interrupt_to_user, "ax", @progbits
164 .global ret_from_interrupt_to_user
165 ret_from_interrupt_to_user:
166 ldsr_thread_enable_interrupts ; enable the threads interrupts
168 * Set a1 to the thread info pointer, no need to save it as we are
169 * restoring userspace and will never return
171 movei d0, #(~(ASM_THREAD_SIZE-1))
175 * Test if the scheduler needs to be called.
177 btst TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED
179 call a5, schedule ; Call the scheduler. I will come back here.
182 * See if we have pending signals and call do_signal
186 btst TI_FLAGS(a1), #ASM_TIF_SIGPENDING ; Any signals needed?
190 * Now call do_signal()
192 move.4 d0, #0 ; oldset pointer is NULL
193 move.4 d1, sp ; d1 is the regs pointer
194 call a5, do_signal ; Call do_signal()
197 * Back from do_signal(), re-enter critical section.
200 begin_restore_context ; Restore the thread context
201 atomic_lock_acquire ; Enter critical section
202 call a3, __complete_and_return_to_userspace ; jump to unprotected section
205 * restore_all_registers()
207 * restore_all_registers will be the alternate exit route for
208 * preempted processes that have called a signal handler
209 * and are returning back to user space.
211 .section .text.restore_all_registers, "ax", @progbits
212 .global restore_all_registers
213 restore_all_registers:
214 begin_restore_context ; Restore the thread context
215 atomic_lock_acquire ; Enter critical section
216 call a3, __complete_and_return_to_userspace
219 * __complete_and_return_to_userspace
221 * restores the second half of the context and returns
222 * You must have the atomic lock when you call this function
224 .section .kernel_unprotected, "ax", @progbits
225 __complete_and_return_to_userspace:
226 disable_kernel_ranges_for_current d15 ; disable kernel ranges
227 complete_restore_context ; restore previous context
228 atomic_lock_release ; Leave critical section
229 move.4 csr, (sp)4++ ; Restore csr from the stack
234 * Called on the child's return from fork system call.
236 .section .text.ret_from_fork, "ax", @progbits
237 .global ret_from_fork
239 ;;; d0 contains the arg for schedule_tail
240 ;;; the others we don't care about as they are in PT_REGS (sp)
241 call a5, schedule_tail
243 atomic_lock_acquire ; Enter critical section
246 move.4 d0, PT_D0(a3) ; Restore D0
247 move.4 d1, PT_D1(a3) ; Restore D1
248 move.4 d2, PT_D2(a3) ; Restore D2
249 move.4 d3, PT_D3(a3) ; Restore D3
250 move.4 d10, PT_D10(a3) ; Restore D10
251 move.4 d11, PT_D11(a3) ; Restore D11
252 move.4 d12, PT_D12(a3) ; Restore D12
253 move.4 d13, PT_D13(a3) ; Restore D13
254 move.4 a1, PT_A1(a3) ; Restore A1
255 move.4 a2, PT_A2(a3) ; Restore A2
256 move.4 a5, PT_A5(a3) ; Restore A5
257 move.4 a6, PT_A6(a3) ; Restore A6
258 ;; I think atomic_lock_acquire could be moved here..
259 move.4 sp, PT_SP(a3) ; Restore sp
260 move.4 a4, PT_PC(a3) ; Restore pc in register a4
261 move.4 PT_FRAME_TYPE(a3), #0 ; Clear frame_type to indicate it is invalid.
263 #ifdef CONFIG_PROTECT_KERNEL
264 call a3, __ret_from_fork_bottom_half
265 .section .kernel_unprotected, "ax", @progbits
266 __ret_from_fork_bottom_half:
267 disable_kernel_ranges_for_current d15
269 atomic_lock_release ; Leave critical section
270 calli a4, 0(a4) ; Return.
276 * void *__switch_to(struct task_struct *prev, struct thread_struct *prev_switch,
277 * struct thread_struct *next_switch)
279 .section .text.__switch_to, "ax", @progbits
284 * Set up register a3 to point to save area.
286 movea a3, d1 ; a3 now holds prev_switch
298 * Set up register a3 to point to restore area.
300 movea a3, d2 ; a3 now holds next_switch
312 * Load the sw_ksp with the proper thread_info pointer.
314 movei d15, #(~(ASM_THREAD_SIZE-1))
315 and.4 a3, sp, d15 ; a3 now has the thread info pointer
316 moveai a4, #%hi(sw_ksp)
317 lea.1 a4, %lo(sw_ksp)(a4) ; a4 now has the base address of sw_ksp array
318 lsr.4 d15, ROSR, #2 ; Thread number - bit's 6 through 31 are zeroes anyway.
319 move.4 (a4, d15), a3 ; Load the thread info pointer into the hw_ksp array..
322 * We are done with context switch. Time to return..
325 .size __switch_to, . - __switch_to
328 * ubicom32_emulate_insn()
329 * Emulates the instruction.
332 * unsigned int ubicom32_emulate_insn(int source1, int source2, int source3, int *save_acc, int *save_csr);
334 .section .text.ubicom32_emulate_insn, "ax", @progbits
335 .global ubicom32_emulate_insn
337 ubicom32_emulate_insn:
338 movea a3, d3 ; a3 holds save_acc pointer
339 movea a4, d4 ; a4 hods save_csr pointer
342 move.4 acc0_hi, 4(a3)
343 move.4 acc1_lo, 8(a3)
344 move.4 acc1_hi, 12(a3)
345 move.4 mac_rc16, 16(a3)
352 move.4 (a4), CSR ; Save csr
354 move.4 4(a3), acc0_hi
355 move.4 8(a3), acc1_lo
356 move.4 12(a3), acc1_hi
357 move.4 16(a3), mac_rc16
359 .size ubicom32_emulate_insn, . - ubicom32_emulate_insn