2 * Kernel execution entry point code.
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
33 #include <linux/init.h>
34 #include <linux/threads.h>
35 #include <asm/processor.h>
38 #include <asm/pgtable.h>
39 #include <asm/cputable.h>
40 #include <asm/thread_info.h>
41 #include <asm/ppc_asm.h>
42 #include <asm/asm-offsets.h>
43 #include <asm/cache.h>
44 #include "head_booke.h"
46 /* As with the other PowerPC ports, it is expected that when code
47 * execution begins here, the following registers contain valid, yet
48 * optional, information:
50 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
51 * r4 - Starting address of the init RAM disk
52 * r5 - Ending address of the init RAM disk
53 * r6 - Start of kernel command line string (e.g. "mem=128")
54 * r7 - End of kernel command line string
61 * Reserve a word at a fixed location to store the address
66 * Save parameters we are passed
73 li r25,0 /* phys kernel start (low) */
74 li r24,0 /* CPU number */
75 li r23,0 /* phys kernel start (high) */
77 /* We try to not make any assumptions about how the boot loader
78 * setup or used the TLBs. We invalidate all mappings from the
79 * boot loader and load a single entry in TLB1[0] to map the
80 * first 64M of kernel memory. Any boot info passed from the
81 * bootloader needs to live in this first 64M.
83 * Requirement on bootloader:
84 * - The page we're executing in needs to reside in TLB1 and
85 * have IPROT=1. If not an invalidate broadcast could
86 * evict the entry we're currently executing in.
88 * r3 = Index of TLB1 were executing in
89 * r4 = Current MSR[IS]
90 * r5 = Index of TLB1 temp mapping
92 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
97 /* 1. Find the index of the entry we're executing in */
98 bl invstr /* Find our address */
99 invstr: mflr r6 /* Make it accessible */
101 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
106 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
108 andis. r7,r7,MAS1_VALID@h
112 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
114 bne match_TLB /* skip if NPIDS != 3 */
120 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
122 andis. r7,r7,MAS1_VALID@h
128 tlbsx 0,r6 /* Fall through, we had to match */
132 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
134 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
135 oris r7,r7,MAS1_IPROT@h
139 /* 2. Invalidate all entries except the entry we're executing in */
140 mfspr r9,SPRN_TLB1CFG
142 li r6,0 /* Set Entry counter to 0 */
143 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
144 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
148 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
150 beq skpinv /* Dont update the current execution TLB */
154 skpinv: addi r6,r6,1 /* Increment */
155 cmpw r6,r9 /* Are we done? */
156 bne 1b /* If not, repeat */
158 /* Invalidate TLB0 */
162 /* Invalidate TLB1 */
167 /* 3. Setup a temp mapping and jump to it */
168 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
170 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
171 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
175 /* grab and fixup the RPN */
176 mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
177 rlwinm r6,r6,25,27,31
180 slw r6,r8,r6 /* convert to mask */
182 bl 1f /* Find our address */
186 #ifdef CONFIG_PHYS_64BIT
194 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
196 /* Just modify the entry ID and EPN for the temp mapping */
197 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
198 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
200 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
202 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
203 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
206 li r7,0 /* temp EPN = 0 */
213 slwi r6,r6,5 /* setup new context with other address space */
214 bl 1f /* Find our address */
222 /* 4. Clear out PIDs & Search info */
228 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
230 bne 2f /* skip if NPIDS != 3 */
235 /* 5. Invalidate mapping we started in */
237 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
238 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
242 rlwinm r6,r6,0,2,0 /* clear IPROT */
245 /* Invalidate TLB1 */
250 /* The mapping only needs to be cache-coherent on SMP */
252 #define M_IF_SMP MAS2_M
257 /* 6. Setup KERNELBASE mapping in TLB1[0] */
258 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
260 lis r6,(MAS1_VALID|MAS1_IPROT)@h
261 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
263 lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
264 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
269 /* 7. Jump to KERNELBASE mapping */
270 lis r6,(KERNELBASE & ~0xfff)@h
271 ori r6,r6,(KERNELBASE & ~0xfff)@l
273 ori r7,r7,MSR_KERNEL@l
274 bl 1f /* Find our address */
281 rfi /* start execution out of TLB1[0] entry */
283 /* 8. Clear out the temp mapping */
284 2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
285 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
289 rlwinm r8,r8,0,2,0 /* clear IPROT */
292 /* Invalidate TLB1 */
297 /* Establish the interrupt vector offsets */
298 SET_IVOR(0, CriticalInput);
299 SET_IVOR(1, MachineCheck);
300 SET_IVOR(2, DataStorage);
301 SET_IVOR(3, InstructionStorage);
302 SET_IVOR(4, ExternalInput);
303 SET_IVOR(5, Alignment);
304 SET_IVOR(6, Program);
305 SET_IVOR(7, FloatingPointUnavailable);
306 SET_IVOR(8, SystemCall);
307 SET_IVOR(9, AuxillaryProcessorUnavailable);
308 SET_IVOR(10, Decrementer);
309 SET_IVOR(11, FixedIntervalTimer);
310 SET_IVOR(12, WatchdogTimer);
311 SET_IVOR(13, DataTLBError);
312 SET_IVOR(14, InstructionTLBError);
313 SET_IVOR(15, DebugCrit);
315 /* Establish the interrupt vector base */
316 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
319 /* Setup the defaults for TLB entries */
320 li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
322 oris r2,r2,MAS4_TLBSELD(1)@h
329 oris r2,r2,HID0_DOZE@h
333 #if !defined(CONFIG_BDI_SWITCH)
335 * The Abatron BDI JTAG debugger does not tolerate others
336 * mucking with the debug registers.
341 /* clear any residual debug events */
347 /* Check to see if we're the second processor, and jump
348 * to the secondary_start code if so
352 bne __secondary_start
356 * This is where the main kernel code starts.
361 ori r2,r2,init_task@l
363 /* ptr to current thread */
364 addi r4,r2,THREAD /* init task's THREAD */
365 mtspr SPRN_SPRG_THREAD,r4
368 lis r1,init_thread_union@h
369 ori r1,r1,init_thread_union@l
371 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
375 #ifdef CONFIG_RELOCATABLE
376 lis r3,kernstart_addr@ha
377 la r3,kernstart_addr@l(r3)
378 #ifdef CONFIG_PHYS_64BIT
387 * Decide what sort of machine this is and initialize the MMU.
397 /* Setup PTE pointers for the Abatron bdiGDB */
398 lis r6, swapper_pg_dir@h
399 ori r6, r6, swapper_pg_dir@l
400 lis r5, abatron_pteptrs@h
401 ori r5, r5, abatron_pteptrs@l
403 ori r4, r4, KERNELBASE@l
404 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
408 lis r4,start_kernel@h
409 ori r4,r4,start_kernel@l
411 ori r3,r3,MSR_KERNEL@l
414 rfi /* change context and jump to start_kernel */
416 /* Macros to hide the PTE size differences
418 * FIND_PTE -- walks the page tables given EA & pgdir pointer
420 * r11 -- PGDIR pointer
422 * label 2: is the bailout case
424 * if we find the pte (fall through):
425 * r11 is low pte word
426 * r12 is pointer to the pte
428 #ifdef CONFIG_PTE_64BIT
430 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
431 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
432 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
433 beq 2f; /* Bail if no table */ \
434 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
435 lwz r11, 4(r12); /* Get pte entry */
438 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
439 lwz r11, 0(r11); /* Get L1 entry */ \
440 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
441 beq 2f; /* Bail if no table */ \
442 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
443 lwz r11, 0(r12); /* Get Linux PTE */
447 * Interrupt vector entry code
449 * The Book E MMUs are always on so we don't need to handle
450 * interrupts in real mode as with previous PPC processors. In
451 * this case we handle interrupts in the kernel virtual address
454 * Interrupt vectors are dynamically placed relative to the
455 * interrupt prefix as determined by the address of interrupt_base.
456 * The interrupt vectors offsets are programmed using the labels
457 * for each interrupt vector entry.
459 * Interrupt vectors must be aligned on a 16 byte boundary.
460 * We align on a 32 byte cache line boundary for good measure.
464 /* Critical Input Interrupt */
465 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
467 /* Machine Check Interrupt */
469 /* no RFMCI, MCSRRs on E200 */
470 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
472 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
475 /* Data Storage Interrupt */
476 START_EXCEPTION(DataStorage)
477 NORMAL_EXCEPTION_PROLOG
478 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
480 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
481 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
483 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
485 addi r3,r1,STACK_FRAME_OVERHEAD
486 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
488 /* Instruction Storage Interrupt */
489 INSTRUCTION_STORAGE_EXCEPTION
491 /* External Input Interrupt */
492 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
494 /* Alignment Interrupt */
497 /* Program Interrupt */
500 /* Floating Point Unavailable Interrupt */
501 #ifdef CONFIG_PPC_FPU
502 FP_UNAVAILABLE_EXCEPTION
505 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
506 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
508 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
512 /* System Call Interrupt */
513 START_EXCEPTION(SystemCall)
514 NORMAL_EXCEPTION_PROLOG
515 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
517 /* Auxillary Processor Unavailable Interrupt */
518 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
520 /* Decrementer Interrupt */
521 DECREMENTER_EXCEPTION
523 /* Fixed Internal Timer Interrupt */
524 /* TODO: Add FIT support */
525 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
527 /* Watchdog Timer Interrupt */
528 #ifdef CONFIG_BOOKE_WDT
529 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
531 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
534 /* Data TLB Error Interrupt */
535 START_EXCEPTION(DataTLBError)
536 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
537 mtspr SPRN_SPRG_WSCRATCH1, r11
538 mtspr SPRN_SPRG_WSCRATCH2, r12
539 mtspr SPRN_SPRG_WSCRATCH3, r13
541 mtspr SPRN_SPRG_WSCRATCH4, r11
542 mfspr r10, SPRN_DEAR /* Get faulting address */
544 /* If we are faulting a kernel address, we have to use the
545 * kernel page tables.
547 lis r11, PAGE_OFFSET@h
550 lis r11, swapper_pg_dir@h
551 ori r11, r11, swapper_pg_dir@l
553 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
554 rlwinm r12,r12,0,16,1
559 /* Get the PGD for the current thread */
561 mfspr r11,SPRN_SPRG_THREAD
565 /* Mask of required permission bits. Note that while we
566 * do copy ESR:ST to _PAGE_RW position as trying to write
567 * to an RO page is pretty common, we don't do it with
568 * _PAGE_DIRTY. We could do it, but it's a fairly rare
569 * event so I'd rather take the overhead when it happens
570 * rather than adding an instruction here. We should measure
571 * whether the whole thing is worth it in the first place
572 * as we could avoid loading SPRN_ESR completely in the first
575 * TODO: Is it worth doing that mfspr & rlwimi in the first
576 * place or can we save a couple of instructions here ?
579 #ifdef CONFIG_PTE_64BIT
581 oris r13,r13,_PAGE_ACCESSED@h
583 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
585 rlwimi r13,r12,11,29,29
588 andc. r13,r13,r11 /* Check permission */
590 #ifdef CONFIG_PTE_64BIT
592 subf r10,r11,r12 /* create false data dep */
593 lwzx r13,r11,r10 /* Get upper pte bits */
595 lwz r13,0(r12) /* Get upper pte bits */
599 bne 2f /* Bail if permission/valid mismach */
601 /* Jump to common tlb load */
604 /* The bailout. Restore registers to pre-exception conditions
605 * and call the heavyweights to help us out.
607 mfspr r11, SPRN_SPRG_RSCRATCH4
609 mfspr r13, SPRN_SPRG_RSCRATCH3
610 mfspr r12, SPRN_SPRG_RSCRATCH2
611 mfspr r11, SPRN_SPRG_RSCRATCH1
612 mfspr r10, SPRN_SPRG_RSCRATCH0
615 /* Instruction TLB Error Interrupt */
617 * Nearly the same as above, except we get our
618 * information from different registers and bailout
619 * to a different point.
621 START_EXCEPTION(InstructionTLBError)
622 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
623 mtspr SPRN_SPRG_WSCRATCH1, r11
624 mtspr SPRN_SPRG_WSCRATCH2, r12
625 mtspr SPRN_SPRG_WSCRATCH3, r13
627 mtspr SPRN_SPRG_WSCRATCH4, r11
628 mfspr r10, SPRN_SRR0 /* Get faulting address */
630 /* If we are faulting a kernel address, we have to use the
631 * kernel page tables.
633 lis r11, PAGE_OFFSET@h
636 lis r11, swapper_pg_dir@h
637 ori r11, r11, swapper_pg_dir@l
639 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
640 rlwinm r12,r12,0,16,1
643 /* Make up the required permissions for kernel code */
644 #ifdef CONFIG_PTE_64BIT
645 li r13,_PAGE_PRESENT | _PAGE_BAP_SX
646 oris r13,r13,_PAGE_ACCESSED@h
648 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
652 /* Get the PGD for the current thread */
654 mfspr r11,SPRN_SPRG_THREAD
657 /* Make up the required permissions for user code */
658 #ifdef CONFIG_PTE_64BIT
659 li r13,_PAGE_PRESENT | _PAGE_BAP_UX
660 oris r13,r13,_PAGE_ACCESSED@h
662 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
667 andc. r13,r13,r11 /* Check permission */
669 #ifdef CONFIG_PTE_64BIT
671 subf r10,r11,r12 /* create false data dep */
672 lwzx r13,r11,r10 /* Get upper pte bits */
674 lwz r13,0(r12) /* Get upper pte bits */
678 bne 2f /* Bail if permission mismach */
680 /* Jump to common TLB load point */
684 /* The bailout. Restore registers to pre-exception conditions
685 * and call the heavyweights to help us out.
687 mfspr r11, SPRN_SPRG_RSCRATCH4
689 mfspr r13, SPRN_SPRG_RSCRATCH3
690 mfspr r12, SPRN_SPRG_RSCRATCH2
691 mfspr r11, SPRN_SPRG_RSCRATCH1
692 mfspr r10, SPRN_SPRG_RSCRATCH0
696 /* SPE Unavailable */
697 START_EXCEPTION(SPEUnavailable)
698 NORMAL_EXCEPTION_PROLOG
700 addi r3,r1,STACK_FRAME_OVERHEAD
701 EXC_XFER_EE_LITE(0x2010, KernelSPE)
703 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
704 #endif /* CONFIG_SPE */
706 /* SPE Floating Point Data */
708 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
710 /* SPE Floating Point Round */
711 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
713 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
714 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
715 #endif /* CONFIG_SPE */
717 /* Performance Monitor */
718 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
720 EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD)
722 CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception)
724 /* Debug Interrupt */
725 DEBUG_DEBUG_EXCEPTION
733 * Both the instruction and data TLB miss get to this
734 * point to load the TLB.
735 * r10 - available to use
736 * r11 - TLB (info from Linux PTE)
737 * r12 - available to use
738 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
739 * CR5 - results of addr >= PAGE_OFFSET
740 * MAS0, MAS1 - loaded with proper value when we get here
741 * MAS2, MAS3 - will need additional info from Linux PTE
742 * Upon exit, we reload everything and RFI.
746 * We set execute, because we don't have the granularity to
747 * properly set this at the page level (Linux problem).
748 * Many of these bits are software only. Bits we don't set
749 * here we (properly should) assume have the appropriate value.
753 #ifdef CONFIG_PTE_64BIT
754 rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
756 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
760 #ifdef CONFIG_PTE_64BIT
761 rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
762 andi. r10, r11, _PAGE_DIRTY
764 li r10, MAS3_SW | MAS3_UW
766 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
767 rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
769 BEGIN_MMU_FTR_SECTION
770 srwi r10, r13, 12 /* grab RPN[12:31] */
772 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
774 li r10, (_PAGE_EXEC | _PAGE_PRESENT)
775 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
777 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
781 rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
785 /* Round robin TLB1 entries assignment */
788 /* Extract TLB1CFG(NENTRY) */
789 mfspr r11, SPRN_TLB1CFG
790 andi. r11, r11, 0xfff
792 /* Extract MAS0(NV) */
793 andi. r13, r12, 0xfff
798 /* check if we need to wrap */
801 /* wrap back to first free tlbcam entry */
802 lis r13, tlbcam_index@ha
803 lwz r13, tlbcam_index@l(r13)
804 rlwimi r12, r13, 0, 20, 31
807 #endif /* CONFIG_E200 */
811 /* Done...restore registers and get out of here. */
812 mfspr r11, SPRN_SPRG_RSCRATCH4
814 mfspr r13, SPRN_SPRG_RSCRATCH3
815 mfspr r12, SPRN_SPRG_RSCRATCH2
816 mfspr r11, SPRN_SPRG_RSCRATCH1
817 mfspr r10, SPRN_SPRG_RSCRATCH0
818 rfi /* Force context change */
821 /* Note that the SPE support is closely modeled after the AltiVec
822 * support. Changes to one are likely to be applicable to the
826 * Disable SPE for the task which had SPE previously,
827 * and save its SPE registers in its thread_struct.
828 * Enables SPE for use in the kernel on return.
829 * On SMP we know the SPE units are free, since we give it up every
834 mtmsr r5 /* enable use of SPE now */
837 * For SMP, we don't do lazy SPE switching because it just gets too
838 * horrendously complex, especially when a task switches from one CPU
839 * to another. Instead we call giveup_spe in switch_to.
842 lis r3,last_task_used_spe@ha
843 lwz r4,last_task_used_spe@l(r3)
846 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
847 SAVE_32EVRS(0,r10,r4)
848 evxor evr10, evr10, evr10 /* clear out evr10 */
849 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
851 evstddx evr10, r4, r5 /* save off accumulator */
853 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
855 andc r4,r4,r10 /* disable SPE for previous task */
856 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
858 #endif /* !CONFIG_SMP */
859 /* enable use of SPE after return */
861 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
864 stw r4,THREAD_USED_SPE(r5)
867 REST_32EVRS(0,r10,r5)
870 stw r4,last_task_used_spe@l(r3)
871 #endif /* !CONFIG_SMP */
872 /* restore registers and return */
873 2: REST_4GPRS(3, r11)
888 * SPE unavailable trap from kernel - print a message, but let
889 * the task use SPE in the kernel until it returns to user mode.
894 stw r3,_MSR(r1) /* enable use of SPE after return */
898 mr r4,r2 /* current */
904 87: .string "SPE used in kernel (task=%p, pc=%x) \n"
908 #endif /* CONFIG_SPE */
914 /* Adjust or setup IVORs for e200 */
915 _GLOBAL(__setup_e200_ivors)
918 li r3,SPEUnavailable@l
920 li r3,SPEFloatingPointData@l
922 li r3,SPEFloatingPointRound@l
927 /* Adjust or setup IVORs for e500v1/v2 */
928 _GLOBAL(__setup_e500_ivors)
931 li r3,SPEUnavailable@l
933 li r3,SPEFloatingPointData@l
935 li r3,SPEFloatingPointRound@l
937 li r3,PerformanceMonitor@l
942 /* Adjust or setup IVORs for e500mc */
943 _GLOBAL(__setup_e500mc_ivors)
946 li r3,PerformanceMonitor@l
950 li r3,CriticalDoorbell@l
956 * extern void giveup_altivec(struct task_struct *prev)
958 * The e500 core does not have an AltiVec unit.
960 _GLOBAL(giveup_altivec)
965 * extern void giveup_spe(struct task_struct *prev)
971 mtmsr r5 /* enable use of SPE now */
974 beqlr- /* if no previous owner, done */
975 addi r3,r3,THREAD /* want THREAD of task */
978 SAVE_32EVRS(0, r4, r3)
979 evxor evr6, evr6, evr6 /* clear out evr6 */
980 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
982 evstddx evr6, r4, r3 /* save off accumulator */
983 mfspr r6,SPRN_SPEFSCR
984 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
986 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
988 andc r4,r4,r3 /* disable SPE for previous task */
989 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
993 lis r4,last_task_used_spe@ha
994 stw r5,last_task_used_spe@l(r4)
995 #endif /* !CONFIG_SMP */
997 #endif /* CONFIG_SPE */
1000 * extern void giveup_fpu(struct task_struct *prev)
1002 * Not all FSL Book-E cores have an FPU
1004 #ifndef CONFIG_PPC_FPU
1010 * extern void abort(void)
1012 * At present, this routine just applies a system reset.
1016 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1019 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1022 mfspr r13,SPRN_DBCR0
1023 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1024 mtspr SPRN_DBCR0,r13
1027 _GLOBAL(set_context)
1029 #ifdef CONFIG_BDI_SWITCH
1030 /* Context switch the PTE pointer for the Abatron BDI2000.
1031 * The PGDIR is the second parameter.
1033 lis r5, abatron_pteptrs@h
1034 ori r5, r5, abatron_pteptrs@l
1038 isync /* Force context change */
1041 _GLOBAL(flush_dcache_L1)
1042 mfspr r3,SPRN_L1CFG0
1044 rlwinm r5,r3,9,3 /* Extract cache block size */
1045 twlgti r5,1 /* Only 32 and 64 byte cache blocks
1046 * are currently defined.
1049 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
1050 * log2(number of ways)
1052 slw r5,r4,r5 /* r5 = cache block size */
1054 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
1055 mulli r7,r7,13 /* An 8-way cache will require 13
1060 /* save off HID0 and set DCFA */
1062 ori r9,r8,HID0_DCFA@l
1069 1: lwz r3,0(r4) /* Load... */
1077 1: dcbf 0,r4 /* ...and flush. */
1088 /* When we get here, r24 needs to hold the CPU # */
1089 .globl __secondary_start
1091 lis r3,__secondary_hold_acknowledge@h
1092 ori r3,r3,__secondary_hold_acknowledge@l
1096 mr r4,r24 /* Why? */
1099 lis r3,tlbcam_index@ha
1100 lwz r3,tlbcam_index@l(r3)
1102 li r26,0 /* r26 safe? */
1104 /* Load each CAM entry */
1110 /* get current_thread_info and current */
1111 lis r1,secondary_ti@ha
1112 lwz r1,secondary_ti@l(r1)
1116 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1120 /* ptr to current thread */
1121 addi r4,r2,THREAD /* address of our thread_struct */
1122 mtspr SPRN_SPRG_THREAD,r4
1124 /* Setup the defaults for TLB entries */
1125 li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1128 /* Jump to start_secondary */
1130 ori r4,r4,MSR_KERNEL@l
1131 lis r3,start_secondary@h
1132 ori r3,r3,start_secondary@l
1139 .globl __secondary_hold_acknowledge
1140 __secondary_hold_acknowledge:
1145 * We put a few things here that have to be page-aligned. This stuff
1146 * goes at the beginning of the data segment, which is page-aligned.
1152 .globl empty_zero_page
1155 .globl swapper_pg_dir
1157 .space PGD_TABLE_SIZE
1160 * Room for two PTE pointers, usually the kernel and current user pointers
1161 * to their respective root page table.