4dc1a045052ac786c33d6151d40d3b53aa3a9d5f
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2  * This file contains the 64-bit "server" PowerPC variant
3  * of the low level exception handling including exception
4  * vectors, exception return, part of the slb and stab
5  * handling and other fixed offset specific things.
6  *
7  * This file is meant to be #included from head_64.S due to
8  * position dependent assembly.
9  *
10  * Most of this originates from head_64.S and thus has the same
11  * copyright history.
12  *
13  */
14
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
18
19 /*
20  * We layout physical memory as follows:
21  * 0x0000 - 0x00ff : Secondary processor spin code
22  * 0x0100 - 0x2fff : pSeries Interrupt prologs
23  * 0x3000 - 0x5fff : interrupt support common interrupt prologs
24  * 0x6000 - 0x6fff : Initial (CPU0) segment table
25  * 0x7000 - 0x7fff : FWNMI data area
26  * 0x8000 -        : Early init and support code
27  */
28         /* Syscall routine is used twice, in reloc-off and reloc-on paths */
29 #define SYSCALL_PSERIES_1                                       \
30 BEGIN_FTR_SECTION                                               \
31         cmpdi   r0,0x1ebe ;                                     \
32         beq-    1f ;                                            \
33 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                          \
34         mr      r9,r13 ;                                        \
35         GET_PACA(r13) ;                                         \
36         mfspr   r11,SPRN_SRR0 ;                                 \
37 0:
38
39 #define SYSCALL_PSERIES_2_RFID                                  \
40         mfspr   r12,SPRN_SRR1 ;                                 \
41         ld      r10,PACAKBASE(r13) ;                            \
42         LOAD_HANDLER(r10, system_call_entry) ;                  \
43         mtspr   SPRN_SRR0,r10 ;                                 \
44         ld      r10,PACAKMSR(r13) ;                             \
45         mtspr   SPRN_SRR1,r10 ;                                 \
46         rfid ;                                                  \
47         b       . ;     /* prevent speculative execution */
48
49 #define SYSCALL_PSERIES_3                                       \
50         /* Fast LE/BE switch system call */                     \
51 1:      mfspr   r12,SPRN_SRR1 ;                                 \
52         xori    r12,r12,MSR_LE ;                                \
53         mtspr   SPRN_SRR1,r12 ;                                 \
54         rfid ;          /* return to userspace */               \
55         b       . ;                                             \
56 2:      mfspr   r12,SPRN_SRR1 ;                                 \
57         andi.   r12,r12,MSR_PR ;                                \
58         bne     0b ;                                            \
59         mtspr   SPRN_SRR0,r3 ;                                  \
60         mtspr   SPRN_SRR1,r4 ;                                  \
61         mtspr   SPRN_SDR1,r5 ;                                  \
62         rfid ;                                                  \
63         b       . ;     /* prevent speculative execution */
64
65 #if defined(CONFIG_RELOCATABLE)
66         /*
67          * We can't branch directly; in the direct case we use LR
68          * and system_call_entry restores LR.  (We thus need to move
69          * LR to r10 in the RFID case too.)
70          */
71 #define SYSCALL_PSERIES_2_DIRECT                                \
72         mflr    r10 ;                                           \
73         ld      r12,PACAKBASE(r13) ;                            \
74         LOAD_HANDLER(r12, system_call_entry_direct) ;           \
75         mtlr    r12 ;                                           \
76         mfspr   r12,SPRN_SRR1 ;                                 \
77         /* Re-use of r13... No spare regs to do this */ \
78         li      r13,MSR_RI ;                                    \
79         mtmsrd  r13,1 ;                                         \
80         GET_PACA(r13) ; /* get r13 back */                      \
81         blr ;
82 #else
83         /* We can branch directly */
84 #define SYSCALL_PSERIES_2_DIRECT                                \
85         mfspr   r12,SPRN_SRR1 ;                                 \
86         li      r10,MSR_RI ;                                    \
87         mtmsrd  r10,1 ;                 /* Set RI (EE=0) */     \
88         b       system_call_entry_direct ;
89 #endif
90
91 /*
92  * This is the start of the interrupt handlers for pSeries
93  * This code runs with relocation off.
94  * Code from here to __end_interrupts gets copied down to real
95  * address 0x100 when we are running a relocatable kernel.
96  * Therefore any relative branches in this section must only
97  * branch to labels in this section.
98  */
99         . = 0x100
100         .globl __start_interrupts
101 __start_interrupts:
102
103         .globl system_reset_pSeries;
104 system_reset_pSeries:
105         HMT_MEDIUM;
106         SET_SCRATCH0(r13)
107 #ifdef CONFIG_PPC_P7_NAP
108 BEGIN_FTR_SECTION
109         /* Running native on arch 2.06 or later, check if we are
110          * waking up from nap. We only handle no state loss and
111          * supervisor state loss. We do -not- handle hypervisor
112          * state loss at this time.
113          */
114         mfspr   r13,SPRN_SRR1
115         rlwinm. r13,r13,47-31,30,31
116         beq     9f
117
118         /* waking up from powersave (nap) state */
119         cmpwi   cr1,r13,2
120         /* Total loss of HV state is fatal, we could try to use the
121          * PIR to locate a PACA, then use an emergency stack etc...
122          * but for now, let's just stay stuck here
123          */
124         bgt     cr1,.
125         GET_PACA(r13)
126
127 #ifdef CONFIG_KVM_BOOK3S_64_HV
128         li      r0,KVM_HWTHREAD_IN_KERNEL
129         stb     r0,HSTATE_HWTHREAD_STATE(r13)
130         /* Order setting hwthread_state vs. testing hwthread_req */
131         sync
132         lbz     r0,HSTATE_HWTHREAD_REQ(r13)
133         cmpwi   r0,0
134         beq     1f
135         b       kvm_start_guest
136 1:
137 #endif
138
139         beq     cr1,2f
140         b       .power7_wakeup_noloss
141 2:      b       .power7_wakeup_loss
142 9:
143 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
144 #endif /* CONFIG_PPC_P7_NAP */
145         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
146                                  NOTEST, 0x100)
147
148         . = 0x200
149 machine_check_pSeries_1:
150         /* This is moved out of line as it can be patched by FW, but
151          * some code path might still want to branch into the original
152          * vector
153          */
154         b       machine_check_pSeries
155
156         . = 0x300
157         .globl data_access_pSeries
158 data_access_pSeries:
159         HMT_MEDIUM
160         SET_SCRATCH0(r13)
161 BEGIN_FTR_SECTION
162         b       data_access_check_stab
163 data_access_not_stab:
164 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
165         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
166                                  KVMTEST, 0x300)
167
168         . = 0x380
169         .globl data_access_slb_pSeries
170 data_access_slb_pSeries:
171         HMT_MEDIUM
172         SET_SCRATCH0(r13)
173         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
174         std     r3,PACA_EXSLB+EX_R3(r13)
175         mfspr   r3,SPRN_DAR
176 #ifdef __DISABLED__
177         /* Keep that around for when we re-implement dynamic VSIDs */
178         cmpdi   r3,0
179         bge     slb_miss_user_pseries
180 #endif /* __DISABLED__ */
181         mfspr   r12,SPRN_SRR1
182 #ifndef CONFIG_RELOCATABLE
183         b       .slb_miss_realmode
184 #else
185         /*
186          * We can't just use a direct branch to .slb_miss_realmode
187          * because the distance from here to there depends on where
188          * the kernel ends up being put.
189          */
190         mfctr   r11
191         ld      r10,PACAKBASE(r13)
192         LOAD_HANDLER(r10, .slb_miss_realmode)
193         mtctr   r10
194         bctr
195 #endif
196
197         STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
198
199         . = 0x480
200         .globl instruction_access_slb_pSeries
201 instruction_access_slb_pSeries:
202         HMT_MEDIUM
203         SET_SCRATCH0(r13)
204         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
205         std     r3,PACA_EXSLB+EX_R3(r13)
206         mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
207 #ifdef __DISABLED__
208         /* Keep that around for when we re-implement dynamic VSIDs */
209         cmpdi   r3,0
210         bge     slb_miss_user_pseries
211 #endif /* __DISABLED__ */
212         mfspr   r12,SPRN_SRR1
213 #ifndef CONFIG_RELOCATABLE
214         b       .slb_miss_realmode
215 #else
216         mfctr   r11
217         ld      r10,PACAKBASE(r13)
218         LOAD_HANDLER(r10, .slb_miss_realmode)
219         mtctr   r10
220         bctr
221 #endif
222
223         /* We open code these as we can't have a ". = x" (even with
224          * x = "." within a feature section
225          */
226         . = 0x500;
227         .globl hardware_interrupt_pSeries;
228         .globl hardware_interrupt_hv;
229 hardware_interrupt_pSeries:
230 hardware_interrupt_hv:
231         BEGIN_FTR_SECTION
232                 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
233                                             EXC_HV, SOFTEN_TEST_HV)
234                 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
235         FTR_SECTION_ELSE
236                 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
237                                             EXC_STD, SOFTEN_TEST_HV_201)
238                 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
239         ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
240
241         STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
242         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
243
244         STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
245         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
246
247         STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
248         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
249
250         MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
251         STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
252
253         STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
254         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
255
256         STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
257         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
258
259         . = 0xc00
260         .globl  system_call_pSeries
261 system_call_pSeries:
262         HMT_MEDIUM
263 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
264         SET_SCRATCH0(r13)
265         GET_PACA(r13)
266         std     r9,PACA_EXGEN+EX_R9(r13)
267         std     r10,PACA_EXGEN+EX_R10(r13)
268         mfcr    r9
269         KVMTEST(0xc00)
270         GET_SCRATCH0(r13)
271 #endif
272         SYSCALL_PSERIES_1
273         SYSCALL_PSERIES_2_RFID
274         SYSCALL_PSERIES_3
275         KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
276
277         STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
278         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
279
280         /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
281          * out of line to handle them
282          */
283         . = 0xe00
284 hv_exception_trampoline:
285         b       h_data_storage_hv
286         . = 0xe20
287         b       h_instr_storage_hv
288         . = 0xe40
289         b       emulation_assist_hv
290         . = 0xe50
291         b       hmi_exception_hv
292         . = 0xe60
293         b       hmi_exception_hv
294
295         /* We need to deal with the Altivec unavailable exception
296          * here which is at 0xf20, thus in the middle of the
297          * prolog code of the PerformanceMonitor one. A little
298          * trickery is thus necessary
299          */
300 performance_monitor_pSeries_1:
301         . = 0xf00
302         b       performance_monitor_pSeries
303
304 altivec_unavailable_pSeries_1:
305         . = 0xf20
306         b       altivec_unavailable_pSeries
307
308 vsx_unavailable_pSeries_1:
309         . = 0xf40
310         b       vsx_unavailable_pSeries
311
312 #ifdef CONFIG_CBE_RAS
313         STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
314         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
315 #endif /* CONFIG_CBE_RAS */
316
317         STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
318         KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
319
320         . = 0x1500
321         .global denorm_exception_hv
322 denorm_exception_hv:
323         HMT_MEDIUM
324         mtspr   SPRN_SPRG_HSCRATCH0,r13
325         mfspr   r13,SPRN_SPRG_HPACA
326         std     r9,PACA_EXGEN+EX_R9(r13)
327         std     r10,PACA_EXGEN+EX_R10(r13)
328         std     r11,PACA_EXGEN+EX_R11(r13)
329         std     r12,PACA_EXGEN+EX_R12(r13)
330         mfspr   r9,SPRN_SPRG_HSCRATCH0
331         std     r9,PACA_EXGEN+EX_R13(r13)
332         mfcr    r9
333
334 #ifdef CONFIG_PPC_DENORMALISATION
335         mfspr   r10,SPRN_HSRR1
336         mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
337         andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
338         addi    r11,r11,-4              /* HSRR0 is next instruction */
339         bne+    denorm_assist
340 #endif
341
342         EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
343         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
344
345 #ifdef CONFIG_CBE_RAS
346         STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
347         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
348 #endif /* CONFIG_CBE_RAS */
349
350         STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
351         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
352
353 #ifdef CONFIG_CBE_RAS
354         STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
355         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
356 #else
357         . = 0x1800
358 #endif /* CONFIG_CBE_RAS */
359
360
361 /*** Out of line interrupts support ***/
362
363         .align  7
364         /* moved from 0x200 */
365 machine_check_pSeries:
366         .globl machine_check_fwnmi
367 machine_check_fwnmi:
368         HMT_MEDIUM
369         SET_SCRATCH0(r13)               /* save r13 */
370         EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
371                                  EXC_STD, KVMTEST, 0x200)
372         KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
373
374         /* moved from 0x300 */
375 data_access_check_stab:
376         GET_PACA(r13)
377         std     r9,PACA_EXSLB+EX_R9(r13)
378         std     r10,PACA_EXSLB+EX_R10(r13)
379         mfspr   r10,SPRN_DAR
380         mfspr   r9,SPRN_DSISR
381         srdi    r10,r10,60
382         rlwimi  r10,r9,16,0x20
383 #ifdef CONFIG_KVM_BOOK3S_PR
384         lbz     r9,HSTATE_IN_GUEST(r13)
385         rlwimi  r10,r9,8,0x300
386 #endif
387         mfcr    r9
388         cmpwi   r10,0x2c
389         beq     do_stab_bolted_pSeries
390         mtcrf   0x80,r9
391         ld      r9,PACA_EXSLB+EX_R9(r13)
392         ld      r10,PACA_EXSLB+EX_R10(r13)
393         b       data_access_not_stab
394 do_stab_bolted_pSeries:
395         std     r11,PACA_EXSLB+EX_R11(r13)
396         std     r12,PACA_EXSLB+EX_R12(r13)
397         GET_SCRATCH0(r10)
398         std     r10,PACA_EXSLB+EX_R13(r13)
399         EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
400
401         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
402         KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
403         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
404         KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
405         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
406         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
407
408 #ifdef CONFIG_PPC_DENORMALISATION
409 denorm_assist:
410 BEGIN_FTR_SECTION
411 /*
412  * To denormalise we need to move a copy of the register to itself.
413  * For POWER6 do that here for all FP regs.
414  */
415         mfmsr   r10
416         ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
417         xori    r10,r10,(MSR_FE0|MSR_FE1)
418         mtmsrd  r10
419         sync
420         fmr     0,0
421         fmr     1,1
422         fmr     2,2
423         fmr     3,3
424         fmr     4,4
425         fmr     5,5
426         fmr     6,6
427         fmr     7,7
428         fmr     8,8
429         fmr     9,9
430         fmr     10,10
431         fmr     11,11
432         fmr     12,12
433         fmr     13,13
434         fmr     14,14
435         fmr     15,15
436         fmr     16,16
437         fmr     17,17
438         fmr     18,18
439         fmr     19,19
440         fmr     20,20
441         fmr     21,21
442         fmr     22,22
443         fmr     23,23
444         fmr     24,24
445         fmr     25,25
446         fmr     26,26
447         fmr     27,27
448         fmr     28,28
449         fmr     29,29
450         fmr     30,30
451         fmr     31,31
452 FTR_SECTION_ELSE
453 /*
454  * To denormalise we need to move a copy of the register to itself.
455  * For POWER7 do that here for the first 32 VSX registers only.
456  */
457         mfmsr   r10
458         oris    r10,r10,MSR_VSX@h
459         mtmsrd  r10
460         sync
461         XVCPSGNDP(0,0,0)
462         XVCPSGNDP(1,1,1)
463         XVCPSGNDP(2,2,2)
464         XVCPSGNDP(3,3,3)
465         XVCPSGNDP(4,4,4)
466         XVCPSGNDP(5,5,5)
467         XVCPSGNDP(6,6,6)
468         XVCPSGNDP(7,7,7)
469         XVCPSGNDP(8,8,8)
470         XVCPSGNDP(9,9,9)
471         XVCPSGNDP(10,10,10)
472         XVCPSGNDP(11,11,11)
473         XVCPSGNDP(12,12,12)
474         XVCPSGNDP(13,13,13)
475         XVCPSGNDP(14,14,14)
476         XVCPSGNDP(15,15,15)
477         XVCPSGNDP(16,16,16)
478         XVCPSGNDP(17,17,17)
479         XVCPSGNDP(18,18,18)
480         XVCPSGNDP(19,19,19)
481         XVCPSGNDP(20,20,20)
482         XVCPSGNDP(21,21,21)
483         XVCPSGNDP(22,22,22)
484         XVCPSGNDP(23,23,23)
485         XVCPSGNDP(24,24,24)
486         XVCPSGNDP(25,25,25)
487         XVCPSGNDP(26,26,26)
488         XVCPSGNDP(27,27,27)
489         XVCPSGNDP(28,28,28)
490         XVCPSGNDP(29,29,29)
491         XVCPSGNDP(30,30,30)
492         XVCPSGNDP(31,31,31)
493 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
494         mtspr   SPRN_HSRR0,r11
495         mtcrf   0x80,r9
496         ld      r9,PACA_EXGEN+EX_R9(r13)
497         ld      r10,PACA_EXGEN+EX_R10(r13)
498         ld      r11,PACA_EXGEN+EX_R11(r13)
499         ld      r12,PACA_EXGEN+EX_R12(r13)
500         ld      r13,PACA_EXGEN+EX_R13(r13)
501         HRFID
502         b       .
503 #endif
504
505         .align  7
506         /* moved from 0xe00 */
507         STD_EXCEPTION_HV(., 0xe02, h_data_storage)
508         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
509         STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
510         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
511         STD_EXCEPTION_HV(., 0xe42, emulation_assist)
512         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
513         STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
514         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
515
516         /* moved from 0xf00 */
517         STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
518         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
519         STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
520         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
521         STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
522         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
523
524 /*
525  * An interrupt came in while soft-disabled. We set paca->irq_happened,
526  * then, if it was a decrementer interrupt, we bump the dec to max and
527  * and return, else we hard disable and return. This is called with
528  * r10 containing the value to OR to the paca field.
529  */
530 #define MASKED_INTERRUPT(_H)                            \
531 masked_##_H##interrupt:                                 \
532         std     r11,PACA_EXGEN+EX_R11(r13);             \
533         lbz     r11,PACAIRQHAPPENED(r13);               \
534         or      r11,r11,r10;                            \
535         stb     r11,PACAIRQHAPPENED(r13);               \
536         andi.   r10,r10,PACA_IRQ_DEC;                   \
537         beq     1f;                                     \
538         lis     r10,0x7fff;                             \
539         ori     r10,r10,0xffff;                         \
540         mtspr   SPRN_DEC,r10;                           \
541         b       2f;                                     \
542 1:      mfspr   r10,SPRN_##_H##SRR1;                    \
543         rldicl  r10,r10,48,1; /* clear MSR_EE */        \
544         rotldi  r10,r10,16;                             \
545         mtspr   SPRN_##_H##SRR1,r10;                    \
546 2:      mtcrf   0x80,r9;                                \
547         ld      r9,PACA_EXGEN+EX_R9(r13);               \
548         ld      r10,PACA_EXGEN+EX_R10(r13);             \
549         ld      r11,PACA_EXGEN+EX_R11(r13);             \
550         GET_SCRATCH0(r13);                              \
551         ##_H##rfid;                                     \
552         b       .
553         
554         MASKED_INTERRUPT()
555         MASKED_INTERRUPT(H)
556
557 /*
558  * Called from arch_local_irq_enable when an interrupt needs
559  * to be resent. r3 contains 0x500 or 0x900 to indicate which
560  * kind of interrupt. MSR:EE is already off. We generate a
561  * stackframe like if a real interrupt had happened.
562  *
563  * Note: While MSR:EE is off, we need to make sure that _MSR
564  * in the generated frame has EE set to 1 or the exception
565  * handler will not properly re-enable them.
566  */
567 _GLOBAL(__replay_interrupt)
568         /* We are going to jump to the exception common code which
569          * will retrieve various register values from the PACA which
570          * we don't give a damn about, so we don't bother storing them.
571          */
572         mfmsr   r12
573         mflr    r11
574         mfcr    r9
575         ori     r12,r12,MSR_EE
576         andi.   r3,r3,0x0800
577         bne     decrementer_common
578         b       hardware_interrupt_common
579
580 #ifdef CONFIG_PPC_PSERIES
581 /*
582  * Vectors for the FWNMI option.  Share common code.
583  */
584         .globl system_reset_fwnmi
585       .align 7
586 system_reset_fwnmi:
587         HMT_MEDIUM
588         SET_SCRATCH0(r13)               /* save r13 */
589         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
590                                  NOTEST, 0x100)
591
592 #endif /* CONFIG_PPC_PSERIES */
593
594 #ifdef __DISABLED__
595 /*
596  * This is used for when the SLB miss handler has to go virtual,
597  * which doesn't happen for now anymore but will once we re-implement
598  * dynamic VSIDs for shared page tables
599  */
600 slb_miss_user_pseries:
601         std     r10,PACA_EXGEN+EX_R10(r13)
602         std     r11,PACA_EXGEN+EX_R11(r13)
603         std     r12,PACA_EXGEN+EX_R12(r13)
604         GET_SCRATCH0(r10)
605         ld      r11,PACA_EXSLB+EX_R9(r13)
606         ld      r12,PACA_EXSLB+EX_R3(r13)
607         std     r10,PACA_EXGEN+EX_R13(r13)
608         std     r11,PACA_EXGEN+EX_R9(r13)
609         std     r12,PACA_EXGEN+EX_R3(r13)
610         clrrdi  r12,r13,32
611         mfmsr   r10
612         mfspr   r11,SRR0                        /* save SRR0 */
613         ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
614         ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
615         mtspr   SRR0,r12
616         mfspr   r12,SRR1                        /* and SRR1 */
617         mtspr   SRR1,r10
618         rfid
619         b       .                               /* prevent spec. execution */
620 #endif /* __DISABLED__ */
621
622         .align  7
623         .globl  __end_interrupts
624 __end_interrupts:
625
626 /*
627  * Code from here down to __end_handlers is invoked from the
628  * exception prologs above.  Because the prologs assemble the
629  * addresses of these handlers using the LOAD_HANDLER macro,
630  * which uses an ori instruction, these handlers must be in
631  * the first 64k of the kernel image.
632  */
633
634 /*** Common interrupt handlers ***/
635
636         STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
637
638         /*
639          * Machine check is different because we use a different
640          * save area: PACA_EXMC instead of PACA_EXGEN.
641          */
642         .align  7
643         .globl machine_check_common
644 machine_check_common:
645         EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
646         FINISH_NAP
647         DISABLE_INTS
648         bl      .save_nvgprs
649         addi    r3,r1,STACK_FRAME_OVERHEAD
650         bl      .machine_check_exception
651         b       .ret_from_except
652
653         STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
654         STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
655         STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
656         STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
657         STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
658         STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
659         STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
660         STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
661         STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
662         STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
663         STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
664         STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
665 #ifdef CONFIG_ALTIVEC
666         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
667 #else
668         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
669 #endif
670 #ifdef CONFIG_CBE_RAS
671         STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
672         STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
673         STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
674 #endif /* CONFIG_CBE_RAS */
675
676         .align  7
677 system_call_entry:
678         b       system_call_common
679
680 ppc64_runlatch_on_trampoline:
681         b       .__ppc64_runlatch_on
682
683 /*
684  * Here we have detected that the kernel stack pointer is bad.
685  * R9 contains the saved CR, r13 points to the paca,
686  * r10 contains the (bad) kernel stack pointer,
687  * r11 and r12 contain the saved SRR0 and SRR1.
688  * We switch to using an emergency stack, save the registers there,
689  * and call kernel_bad_stack(), which panics.
690  */
691 bad_stack:
692         ld      r1,PACAEMERGSP(r13)
693         subi    r1,r1,64+INT_FRAME_SIZE
694         std     r9,_CCR(r1)
695         std     r10,GPR1(r1)
696         std     r11,_NIP(r1)
697         std     r12,_MSR(r1)
698         mfspr   r11,SPRN_DAR
699         mfspr   r12,SPRN_DSISR
700         std     r11,_DAR(r1)
701         std     r12,_DSISR(r1)
702         mflr    r10
703         mfctr   r11
704         mfxer   r12
705         std     r10,_LINK(r1)
706         std     r11,_CTR(r1)
707         std     r12,_XER(r1)
708         SAVE_GPR(0,r1)
709         SAVE_GPR(2,r1)
710         ld      r10,EX_R3(r3)
711         std     r10,GPR3(r1)
712         SAVE_GPR(4,r1)
713         SAVE_4GPRS(5,r1)
714         ld      r9,EX_R9(r3)
715         ld      r10,EX_R10(r3)
716         SAVE_2GPRS(9,r1)
717         ld      r9,EX_R11(r3)
718         ld      r10,EX_R12(r3)
719         ld      r11,EX_R13(r3)
720         std     r9,GPR11(r1)
721         std     r10,GPR12(r1)
722         std     r11,GPR13(r1)
723 BEGIN_FTR_SECTION
724         ld      r10,EX_CFAR(r3)
725         std     r10,ORIG_GPR3(r1)
726 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
727         SAVE_8GPRS(14,r1)
728         SAVE_10GPRS(22,r1)
729         lhz     r12,PACA_TRAP_SAVE(r13)
730         std     r12,_TRAP(r1)
731         addi    r11,r1,INT_FRAME_SIZE
732         std     r11,0(r1)
733         li      r12,0
734         std     r12,0(r11)
735         ld      r2,PACATOC(r13)
736         ld      r11,exception_marker@toc(r2)
737         std     r12,RESULT(r1)
738         std     r11,STACK_FRAME_OVERHEAD-16(r1)
739 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
740         bl      .kernel_bad_stack
741         b       1b
742
743 /*
744  * Here r13 points to the paca, r9 contains the saved CR,
745  * SRR0 and SRR1 are saved in r11 and r12,
746  * r9 - r13 are saved in paca->exgen.
747  */
748         .align  7
749         .globl data_access_common
750 data_access_common:
751         mfspr   r10,SPRN_DAR
752         std     r10,PACA_EXGEN+EX_DAR(r13)
753         mfspr   r10,SPRN_DSISR
754         stw     r10,PACA_EXGEN+EX_DSISR(r13)
755         EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
756         DISABLE_INTS
757         ld      r12,_MSR(r1)
758         ld      r3,PACA_EXGEN+EX_DAR(r13)
759         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
760         li      r5,0x300
761         b       .do_hash_page           /* Try to handle as hpte fault */
762
763         .align  7
764         .globl  h_data_storage_common
765 h_data_storage_common:
766         mfspr   r10,SPRN_HDAR
767         std     r10,PACA_EXGEN+EX_DAR(r13)
768         mfspr   r10,SPRN_HDSISR
769         stw     r10,PACA_EXGEN+EX_DSISR(r13)
770         EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
771         bl      .save_nvgprs
772         DISABLE_INTS
773         addi    r3,r1,STACK_FRAME_OVERHEAD
774         bl      .unknown_exception
775         b       .ret_from_except
776
777         .align  7
778         .globl instruction_access_common
779 instruction_access_common:
780         EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
781         DISABLE_INTS
782         ld      r12,_MSR(r1)
783         ld      r3,_NIP(r1)
784         andis.  r4,r12,0x5820
785         li      r5,0x400
786         b       .do_hash_page           /* Try to handle as hpte fault */
787
788         STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
789
790 /*
791  * Here is the common SLB miss user that is used when going to virtual
792  * mode for SLB misses, that is currently not used
793  */
794 #ifdef __DISABLED__
795         .align  7
796         .globl  slb_miss_user_common
797 slb_miss_user_common:
798         mflr    r10
799         std     r3,PACA_EXGEN+EX_DAR(r13)
800         stw     r9,PACA_EXGEN+EX_CCR(r13)
801         std     r10,PACA_EXGEN+EX_LR(r13)
802         std     r11,PACA_EXGEN+EX_SRR0(r13)
803         bl      .slb_allocate_user
804
805         ld      r10,PACA_EXGEN+EX_LR(r13)
806         ld      r3,PACA_EXGEN+EX_R3(r13)
807         lwz     r9,PACA_EXGEN+EX_CCR(r13)
808         ld      r11,PACA_EXGEN+EX_SRR0(r13)
809         mtlr    r10
810         beq-    slb_miss_fault
811
812         andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
813         beq-    unrecov_user_slb
814         mfmsr   r10
815
816 .machine push
817 .machine "power4"
818         mtcrf   0x80,r9
819 .machine pop
820
821         clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
822         mtmsrd  r10,1
823
824         mtspr   SRR0,r11
825         mtspr   SRR1,r12
826
827         ld      r9,PACA_EXGEN+EX_R9(r13)
828         ld      r10,PACA_EXGEN+EX_R10(r13)
829         ld      r11,PACA_EXGEN+EX_R11(r13)
830         ld      r12,PACA_EXGEN+EX_R12(r13)
831         ld      r13,PACA_EXGEN+EX_R13(r13)
832         rfid
833         b       .
834
835 slb_miss_fault:
836         EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
837         ld      r4,PACA_EXGEN+EX_DAR(r13)
838         li      r5,0
839         std     r4,_DAR(r1)
840         std     r5,_DSISR(r1)
841         b       handle_page_fault
842
843 unrecov_user_slb:
844         EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
845         DISABLE_INTS
846         bl      .save_nvgprs
847 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
848         bl      .unrecoverable_exception
849         b       1b
850
851 #endif /* __DISABLED__ */
852
853
854 /*
855  * r13 points to the PACA, r9 contains the saved CR,
856  * r12 contain the saved SRR1, SRR0 is still ready for return
857  * r3 has the faulting address
858  * r9 - r13 are saved in paca->exslb.
859  * r3 is saved in paca->slb_r3
860  * We assume we aren't going to take any exceptions during this procedure.
861  */
862 _GLOBAL(slb_miss_realmode)
863         mflr    r10
864 #ifdef CONFIG_RELOCATABLE
865         mtctr   r11
866 #endif
867
868         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
869         std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
870
871         bl      .slb_allocate_realmode
872
873         /* All done -- return from exception. */
874
875         ld      r10,PACA_EXSLB+EX_LR(r13)
876         ld      r3,PACA_EXSLB+EX_R3(r13)
877         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
878
879         mtlr    r10
880
881         andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
882         beq-    2f
883
884 .machine        push
885 .machine        "power4"
886         mtcrf   0x80,r9
887         mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
888 .machine        pop
889
890         ld      r9,PACA_EXSLB+EX_R9(r13)
891         ld      r10,PACA_EXSLB+EX_R10(r13)
892         ld      r11,PACA_EXSLB+EX_R11(r13)
893         ld      r12,PACA_EXSLB+EX_R12(r13)
894         ld      r13,PACA_EXSLB+EX_R13(r13)
895         rfid
896         b       .       /* prevent speculative execution */
897
898 2:      mfspr   r11,SPRN_SRR0
899         ld      r10,PACAKBASE(r13)
900         LOAD_HANDLER(r10,unrecov_slb)
901         mtspr   SPRN_SRR0,r10
902         ld      r10,PACAKMSR(r13)
903         mtspr   SPRN_SRR1,r10
904         rfid
905         b       .
906
907 unrecov_slb:
908         EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
909         DISABLE_INTS
910         bl      .save_nvgprs
911 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
912         bl      .unrecoverable_exception
913         b       1b
914
915
916 #ifdef CONFIG_PPC_970_NAP
917 power4_fixup_nap:
918         andc    r9,r9,r10
919         std     r9,TI_LOCAL_FLAGS(r11)
920         ld      r10,_LINK(r1)           /* make idle task do the */
921         std     r10,_NIP(r1)            /* equivalent of a blr */
922         blr
923 #endif
924
925         .align  7
926         .globl alignment_common
927 alignment_common:
928         mfspr   r10,SPRN_DAR
929         std     r10,PACA_EXGEN+EX_DAR(r13)
930         mfspr   r10,SPRN_DSISR
931         stw     r10,PACA_EXGEN+EX_DSISR(r13)
932         EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
933         ld      r3,PACA_EXGEN+EX_DAR(r13)
934         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
935         std     r3,_DAR(r1)
936         std     r4,_DSISR(r1)
937         bl      .save_nvgprs
938         DISABLE_INTS
939         addi    r3,r1,STACK_FRAME_OVERHEAD
940         bl      .alignment_exception
941         b       .ret_from_except
942
943         .align  7
944         .globl program_check_common
945 program_check_common:
946         EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
947         bl      .save_nvgprs
948         DISABLE_INTS
949         addi    r3,r1,STACK_FRAME_OVERHEAD
950         bl      .program_check_exception
951         b       .ret_from_except
952
953         .align  7
954         .globl fp_unavailable_common
955 fp_unavailable_common:
956         EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
957         bne     1f                      /* if from user, just load it up */
958         bl      .save_nvgprs
959         DISABLE_INTS
960         addi    r3,r1,STACK_FRAME_OVERHEAD
961         bl      .kernel_fp_unavailable_exception
962         BUG_OPCODE
963 1:      bl      .load_up_fpu
964         b       fast_exception_return
965
966         .align  7
967         .globl altivec_unavailable_common
968 altivec_unavailable_common:
969         EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
970 #ifdef CONFIG_ALTIVEC
971 BEGIN_FTR_SECTION
972         beq     1f
973         bl      .load_up_altivec
974         b       fast_exception_return
975 1:
976 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
977 #endif
978         bl      .save_nvgprs
979         DISABLE_INTS
980         addi    r3,r1,STACK_FRAME_OVERHEAD
981         bl      .altivec_unavailable_exception
982         b       .ret_from_except
983
984         .align  7
985         .globl vsx_unavailable_common
986 vsx_unavailable_common:
987         EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
988 #ifdef CONFIG_VSX
989 BEGIN_FTR_SECTION
990         beq     1f
991         b       .load_up_vsx
992 1:
993 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
994 #endif
995         bl      .save_nvgprs
996         DISABLE_INTS
997         addi    r3,r1,STACK_FRAME_OVERHEAD
998         bl      .vsx_unavailable_exception
999         b       .ret_from_except
1000
1001         .align  7
1002         .globl  __end_handlers
1003 __end_handlers:
1004
1005 /*
1006  * Hash table stuff
1007  */
1008         .align  7
1009 _STATIC(do_hash_page)
1010         std     r3,_DAR(r1)
1011         std     r4,_DSISR(r1)
1012
1013         andis.  r0,r4,0xa410            /* weird error? */
1014         bne-    handle_page_fault       /* if not, try to insert a HPTE */
1015         andis.  r0,r4,DSISR_DABRMATCH@h
1016         bne-    handle_dabr_fault
1017
1018 BEGIN_FTR_SECTION
1019         andis.  r0,r4,0x0020            /* Is it a segment table fault? */
1020         bne-    do_ste_alloc            /* If so handle it */
1021 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1022
1023         CURRENT_THREAD_INFO(r11, r1)
1024         lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
1025         andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
1026         bne     77f                     /* then don't call hash_page now */
1027         /*
1028          * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1029          * accessing a userspace segment (even from the kernel). We assume
1030          * kernel addresses always have the high bit set.
1031          */
1032         rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1033         rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
1034         orc     r0,r12,r0               /* MSR_PR | ~high_bit */
1035         rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
1036         ori     r4,r4,1                 /* add _PAGE_PRESENT */
1037         rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
1038
1039         /*
1040          * r3 contains the faulting address
1041          * r4 contains the required access permissions
1042          * r5 contains the trap number
1043          *
1044          * at return r3 = 0 for success, 1 for page fault, negative for error
1045          */
1046         bl      .hash_page              /* build HPTE if possible */
1047         cmpdi   r3,0                    /* see if hash_page succeeded */
1048
1049         /* Success */
1050         beq     fast_exc_return_irq     /* Return from exception on success */
1051
1052         /* Error */
1053         blt-    13f
1054
1055 /* Here we have a page fault that hash_page can't handle. */
1056 handle_page_fault:
1057 11:     ld      r4,_DAR(r1)
1058         ld      r5,_DSISR(r1)
1059         addi    r3,r1,STACK_FRAME_OVERHEAD
1060         bl      .do_page_fault
1061         cmpdi   r3,0
1062         beq+    12f
1063         bl      .save_nvgprs
1064         mr      r5,r3
1065         addi    r3,r1,STACK_FRAME_OVERHEAD
1066         lwz     r4,_DAR(r1)
1067         bl      .bad_page_fault
1068         b       .ret_from_except
1069
1070 /* We have a data breakpoint exception - handle it */
1071 handle_dabr_fault:
1072         bl      .save_nvgprs
1073         ld      r4,_DAR(r1)
1074         ld      r5,_DSISR(r1)
1075         addi    r3,r1,STACK_FRAME_OVERHEAD
1076         bl      .do_dabr
1077 12:     b       .ret_from_except_lite
1078
1079
1080 /* We have a page fault that hash_page could handle but HV refused
1081  * the PTE insertion
1082  */
1083 13:     bl      .save_nvgprs
1084         mr      r5,r3
1085         addi    r3,r1,STACK_FRAME_OVERHEAD
1086         ld      r4,_DAR(r1)
1087         bl      .low_hash_fault
1088         b       .ret_from_except
1089
1090 /*
1091  * We come here as a result of a DSI at a point where we don't want
1092  * to call hash_page, such as when we are accessing memory (possibly
1093  * user memory) inside a PMU interrupt that occurred while interrupts
1094  * were soft-disabled.  We want to invoke the exception handler for
1095  * the access, or panic if there isn't a handler.
1096  */
1097 77:     bl      .save_nvgprs
1098         mr      r4,r3
1099         addi    r3,r1,STACK_FRAME_OVERHEAD
1100         li      r5,SIGSEGV
1101         bl      .bad_page_fault
1102         b       .ret_from_except
1103
1104         /* here we have a segment miss */
1105 do_ste_alloc:
1106         bl      .ste_allocate           /* try to insert stab entry */
1107         cmpdi   r3,0
1108         bne-    handle_page_fault
1109         b       fast_exception_return
1110
1111 /*
1112  * r13 points to the PACA, r9 contains the saved CR,
1113  * r11 and r12 contain the saved SRR0 and SRR1.
1114  * r9 - r13 are saved in paca->exslb.
1115  * We assume we aren't going to take any exceptions during this procedure.
1116  * We assume (DAR >> 60) == 0xc.
1117  */
1118         .align  7
1119 _GLOBAL(do_stab_bolted)
1120         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1121         std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
1122
1123         /* Hash to the primary group */
1124         ld      r10,PACASTABVIRT(r13)
1125         mfspr   r11,SPRN_DAR
1126         srdi    r11,r11,28
1127         rldimi  r10,r11,7,52    /* r10 = first ste of the group */
1128
1129         /* Calculate VSID */
1130         /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1131         li      r9,0x1
1132         rldimi  r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1133         ASM_VSID_SCRAMBLE(r11, r9, 256M)
1134         rldic   r9,r11,12,16    /* r9 = vsid << 12 */
1135
1136         /* Search the primary group for a free entry */
1137 1:      ld      r11,0(r10)      /* Test valid bit of the current ste    */
1138         andi.   r11,r11,0x80
1139         beq     2f
1140         addi    r10,r10,16
1141         andi.   r11,r10,0x70
1142         bne     1b
1143
1144         /* Stick for only searching the primary group for now.          */
1145         /* At least for now, we use a very simple random castout scheme */
1146         /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
1147         mftb    r11
1148         rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
1149         ori     r11,r11,0x10
1150
1151         /* r10 currently points to an ste one past the group of interest */
1152         /* make it point to the randomly selected entry                 */
1153         subi    r10,r10,128
1154         or      r10,r10,r11     /* r10 is the entry to invalidate       */
1155
1156         isync                   /* mark the entry invalid               */
1157         ld      r11,0(r10)
1158         rldicl  r11,r11,56,1    /* clear the valid bit */
1159         rotldi  r11,r11,8
1160         std     r11,0(r10)
1161         sync
1162
1163         clrrdi  r11,r11,28      /* Get the esid part of the ste         */
1164         slbie   r11
1165
1166 2:      std     r9,8(r10)       /* Store the vsid part of the ste       */
1167         eieio
1168
1169         mfspr   r11,SPRN_DAR            /* Get the new esid                     */
1170         clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
1171         ori     r11,r11,0x90    /* Turn on valid and kp                 */
1172         std     r11,0(r10)      /* Put new entry back into the stab     */
1173
1174         sync
1175
1176         /* All done -- return from exception. */
1177         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1178         ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
1179
1180         andi.   r10,r12,MSR_RI
1181         beq-    unrecov_slb
1182
1183         mtcrf   0x80,r9                 /* restore CR */
1184
1185         mfmsr   r10
1186         clrrdi  r10,r10,2
1187         mtmsrd  r10,1
1188
1189         mtspr   SPRN_SRR0,r11
1190         mtspr   SPRN_SRR1,r12
1191         ld      r9,PACA_EXSLB+EX_R9(r13)
1192         ld      r10,PACA_EXSLB+EX_R10(r13)
1193         ld      r11,PACA_EXSLB+EX_R11(r13)
1194         ld      r12,PACA_EXSLB+EX_R12(r13)
1195         ld      r13,PACA_EXSLB+EX_R13(r13)
1196         rfid
1197         b       .       /* prevent speculative execution */
1198
1199 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1200 /*
1201  * Data area reserved for FWNMI option.
1202  * This address (0x7000) is fixed by the RPA.
1203  */
1204         .= 0x7000
1205         .globl fwnmi_data_area
1206 fwnmi_data_area:
1207
1208         /* pseries and powernv need to keep the whole page from
1209          * 0x7000 to 0x8000 free for use by the firmware
1210          */
1211         . = 0x8000
1212 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1213
1214 /* Space for CPU0's segment table */
1215         .balign 4096
1216         .globl initial_stab
1217 initial_stab:
1218         .space  4096
1219
1220 #ifdef CONFIG_PPC_POWERNV
1221 _GLOBAL(opal_mc_secondary_handler)
1222         HMT_MEDIUM
1223         SET_SCRATCH0(r13)
1224         GET_PACA(r13)
1225         clrldi  r3,r3,2
1226         tovirt(r3,r3)
1227         std     r3,PACA_OPAL_MC_EVT(r13)
1228         ld      r13,OPAL_MC_SRR0(r3)
1229         mtspr   SPRN_SRR0,r13
1230         ld      r13,OPAL_MC_SRR1(r3)
1231         mtspr   SPRN_SRR1,r13
1232         ld      r3,OPAL_MC_GPR3(r3)
1233         GET_SCRATCH0(r13)
1234         b       machine_check_pSeries
1235 #endif /* CONFIG_PPC_POWERNV */