powerpc: Rename and flesh out the facility unavailable exception handler
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2  * This file contains the 64-bit "server" PowerPC variant
3  * of the low level exception handling including exception
4  * vectors, exception return, part of the slb and stab
5  * handling and other fixed offset specific things.
6  *
7  * This file is meant to be #included from head_64.S due to
8  * position dependent assembly.
9  *
10  * Most of this originates from head_64.S and thus has the same
11  * copyright history.
12  *
13  */
14
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
18
19 /*
20  * We layout physical memory as follows:
21  * 0x0000 - 0x00ff : Secondary processor spin code
22  * 0x0100 - 0x17ff : pSeries Interrupt prologs
23  * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24  * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25  * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26  * 0x7000 - 0x7fff : FWNMI data area
27  * 0x8000 - 0x8fff : Initial (CPU0) segment table
28  * 0x9000 -        : Early init and support code
29  */
30         /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31 #define SYSCALL_PSERIES_1                                       \
32 BEGIN_FTR_SECTION                                               \
33         cmpdi   r0,0x1ebe ;                                     \
34         beq-    1f ;                                            \
35 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                          \
36         mr      r9,r13 ;                                        \
37         GET_PACA(r13) ;                                         \
38         mfspr   r11,SPRN_SRR0 ;                                 \
39 0:
40
41 #define SYSCALL_PSERIES_2_RFID                                  \
42         mfspr   r12,SPRN_SRR1 ;                                 \
43         ld      r10,PACAKBASE(r13) ;                            \
44         LOAD_HANDLER(r10, system_call_entry) ;                  \
45         mtspr   SPRN_SRR0,r10 ;                                 \
46         ld      r10,PACAKMSR(r13) ;                             \
47         mtspr   SPRN_SRR1,r10 ;                                 \
48         rfid ;                                                  \
49         b       . ;     /* prevent speculative execution */
50
51 #define SYSCALL_PSERIES_3                                       \
52         /* Fast LE/BE switch system call */                     \
53 1:      mfspr   r12,SPRN_SRR1 ;                                 \
54         xori    r12,r12,MSR_LE ;                                \
55         mtspr   SPRN_SRR1,r12 ;                                 \
56         rfid ;          /* return to userspace */               \
57         b       . ;                                             \
58 2:      mfspr   r12,SPRN_SRR1 ;                                 \
59         andi.   r12,r12,MSR_PR ;                                \
60         bne     0b ;                                            \
61         mtspr   SPRN_SRR0,r3 ;                                  \
62         mtspr   SPRN_SRR1,r4 ;                                  \
63         mtspr   SPRN_SDR1,r5 ;                                  \
64         rfid ;                                                  \
65         b       . ;     /* prevent speculative execution */
66
67 #if defined(CONFIG_RELOCATABLE)
68         /*
69          * We can't branch directly; in the direct case we use LR
70          * and system_call_entry restores LR.  (We thus need to move
71          * LR to r10 in the RFID case too.)
72          */
73 #define SYSCALL_PSERIES_2_DIRECT                                \
74         mflr    r10 ;                                           \
75         ld      r12,PACAKBASE(r13) ;                            \
76         LOAD_HANDLER(r12, system_call_entry_direct) ;           \
77         mtctr   r12 ;                                           \
78         mfspr   r12,SPRN_SRR1 ;                                 \
79         /* Re-use of r13... No spare regs to do this */ \
80         li      r13,MSR_RI ;                                    \
81         mtmsrd  r13,1 ;                                         \
82         GET_PACA(r13) ; /* get r13 back */                      \
83         bctr ;
84 #else
85         /* We can branch directly */
86 #define SYSCALL_PSERIES_2_DIRECT                                \
87         mfspr   r12,SPRN_SRR1 ;                                 \
88         li      r10,MSR_RI ;                                    \
89         mtmsrd  r10,1 ;                 /* Set RI (EE=0) */     \
90         b       system_call_entry_direct ;
91 #endif
92
93 /*
94  * This is the start of the interrupt handlers for pSeries
95  * This code runs with relocation off.
96  * Code from here to __end_interrupts gets copied down to real
97  * address 0x100 when we are running a relocatable kernel.
98  * Therefore any relative branches in this section must only
99  * branch to labels in this section.
100  */
101         . = 0x100
102         .globl __start_interrupts
103 __start_interrupts:
104
105         .globl system_reset_pSeries;
106 system_reset_pSeries:
107         HMT_MEDIUM_PPR_DISCARD
108         SET_SCRATCH0(r13)
109 #ifdef CONFIG_PPC_P7_NAP
110 BEGIN_FTR_SECTION
111         /* Running native on arch 2.06 or later, check if we are
112          * waking up from nap. We only handle no state loss and
113          * supervisor state loss. We do -not- handle hypervisor
114          * state loss at this time.
115          */
116         mfspr   r13,SPRN_SRR1
117         rlwinm. r13,r13,47-31,30,31
118         beq     9f
119
120         /* waking up from powersave (nap) state */
121         cmpwi   cr1,r13,2
122         /* Total loss of HV state is fatal, we could try to use the
123          * PIR to locate a PACA, then use an emergency stack etc...
124          * but for now, let's just stay stuck here
125          */
126         bgt     cr1,.
127         GET_PACA(r13)
128
129 #ifdef CONFIG_KVM_BOOK3S_64_HV
130         li      r0,KVM_HWTHREAD_IN_KERNEL
131         stb     r0,HSTATE_HWTHREAD_STATE(r13)
132         /* Order setting hwthread_state vs. testing hwthread_req */
133         sync
134         lbz     r0,HSTATE_HWTHREAD_REQ(r13)
135         cmpwi   r0,0
136         beq     1f
137         b       kvm_start_guest
138 1:
139 #endif
140
141         beq     cr1,2f
142         b       .power7_wakeup_noloss
143 2:      b       .power7_wakeup_loss
144 9:
145 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146 #endif /* CONFIG_PPC_P7_NAP */
147         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148                                  NOTEST, 0x100)
149
150         . = 0x200
151 machine_check_pSeries_1:
152         /* This is moved out of line as it can be patched by FW, but
153          * some code path might still want to branch into the original
154          * vector
155          */
156         HMT_MEDIUM_PPR_DISCARD
157         SET_SCRATCH0(r13)               /* save r13 */
158         EXCEPTION_PROLOG_0(PACA_EXMC)
159         b       machine_check_pSeries_0
160
161         . = 0x300
162         .globl data_access_pSeries
163 data_access_pSeries:
164         HMT_MEDIUM_PPR_DISCARD
165         SET_SCRATCH0(r13)
166 BEGIN_FTR_SECTION
167         b       data_access_check_stab
168 data_access_not_stab:
169 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171                                  KVMTEST, 0x300)
172
173         . = 0x380
174         .globl data_access_slb_pSeries
175 data_access_slb_pSeries:
176         HMT_MEDIUM_PPR_DISCARD
177         SET_SCRATCH0(r13)
178         EXCEPTION_PROLOG_0(PACA_EXSLB)
179         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180         std     r3,PACA_EXSLB+EX_R3(r13)
181         mfspr   r3,SPRN_DAR
182 #ifdef __DISABLED__
183         /* Keep that around for when we re-implement dynamic VSIDs */
184         cmpdi   r3,0
185         bge     slb_miss_user_pseries
186 #endif /* __DISABLED__ */
187         mfspr   r12,SPRN_SRR1
188 #ifndef CONFIG_RELOCATABLE
189         b       .slb_miss_realmode
190 #else
191         /*
192          * We can't just use a direct branch to .slb_miss_realmode
193          * because the distance from here to there depends on where
194          * the kernel ends up being put.
195          */
196         mfctr   r11
197         ld      r10,PACAKBASE(r13)
198         LOAD_HANDLER(r10, .slb_miss_realmode)
199         mtctr   r10
200         bctr
201 #endif
202
203         STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
204
205         . = 0x480
206         .globl instruction_access_slb_pSeries
207 instruction_access_slb_pSeries:
208         HMT_MEDIUM_PPR_DISCARD
209         SET_SCRATCH0(r13)
210         EXCEPTION_PROLOG_0(PACA_EXSLB)
211         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212         std     r3,PACA_EXSLB+EX_R3(r13)
213         mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
214 #ifdef __DISABLED__
215         /* Keep that around for when we re-implement dynamic VSIDs */
216         cmpdi   r3,0
217         bge     slb_miss_user_pseries
218 #endif /* __DISABLED__ */
219         mfspr   r12,SPRN_SRR1
220 #ifndef CONFIG_RELOCATABLE
221         b       .slb_miss_realmode
222 #else
223         mfctr   r11
224         ld      r10,PACAKBASE(r13)
225         LOAD_HANDLER(r10, .slb_miss_realmode)
226         mtctr   r10
227         bctr
228 #endif
229
230         /* We open code these as we can't have a ". = x" (even with
231          * x = "." within a feature section
232          */
233         . = 0x500;
234         .globl hardware_interrupt_pSeries;
235         .globl hardware_interrupt_hv;
236 hardware_interrupt_pSeries:
237 hardware_interrupt_hv:
238         HMT_MEDIUM_PPR_DISCARD
239         BEGIN_FTR_SECTION
240                 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
241                                             EXC_HV, SOFTEN_TEST_HV)
242                 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
243         FTR_SECTION_ELSE
244                 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
245                                             EXC_STD, SOFTEN_TEST_HV_201)
246                 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
247         ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
248
249         STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
250         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
251
252         STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
253         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
254
255         STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
256         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
257
258         . = 0x900
259         .globl decrementer_pSeries
260 decrementer_pSeries:
261         _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
262
263         STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
264
265         MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
266         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
267
268         STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
269         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
270
271         . = 0xc00
272         .globl  system_call_pSeries
273 system_call_pSeries:
274         HMT_MEDIUM
275 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
276         SET_SCRATCH0(r13)
277         GET_PACA(r13)
278         std     r9,PACA_EXGEN+EX_R9(r13)
279         std     r10,PACA_EXGEN+EX_R10(r13)
280         mfcr    r9
281         KVMTEST(0xc00)
282         GET_SCRATCH0(r13)
283 #endif
284         SYSCALL_PSERIES_1
285         SYSCALL_PSERIES_2_RFID
286         SYSCALL_PSERIES_3
287         KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
288
289         STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
290         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
291
292         /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
293          * out of line to handle them
294          */
295         . = 0xe00
296 hv_exception_trampoline:
297         SET_SCRATCH0(r13)
298         EXCEPTION_PROLOG_0(PACA_EXGEN)
299         b       h_data_storage_hv
300
301         . = 0xe20
302         SET_SCRATCH0(r13)
303         EXCEPTION_PROLOG_0(PACA_EXGEN)
304         b       h_instr_storage_hv
305
306         . = 0xe40
307         SET_SCRATCH0(r13)
308         EXCEPTION_PROLOG_0(PACA_EXGEN)
309         b       emulation_assist_hv
310
311         . = 0xe60
312         SET_SCRATCH0(r13)
313         EXCEPTION_PROLOG_0(PACA_EXGEN)
314         b       hmi_exception_hv
315
316         . = 0xe80
317         SET_SCRATCH0(r13)
318         EXCEPTION_PROLOG_0(PACA_EXGEN)
319         b       h_doorbell_hv
320
321         /* We need to deal with the Altivec unavailable exception
322          * here which is at 0xf20, thus in the middle of the
323          * prolog code of the PerformanceMonitor one. A little
324          * trickery is thus necessary
325          */
326 performance_monitor_pSeries_1:
327         . = 0xf00
328         SET_SCRATCH0(r13)
329         EXCEPTION_PROLOG_0(PACA_EXGEN)
330         b       performance_monitor_pSeries
331
332 altivec_unavailable_pSeries_1:
333         . = 0xf20
334         SET_SCRATCH0(r13)
335         EXCEPTION_PROLOG_0(PACA_EXGEN)
336         b       altivec_unavailable_pSeries
337
338 vsx_unavailable_pSeries_1:
339         . = 0xf40
340         SET_SCRATCH0(r13)
341         EXCEPTION_PROLOG_0(PACA_EXGEN)
342         b       vsx_unavailable_pSeries
343
344 facility_unavailable_trampoline:
345         . = 0xf60
346         SET_SCRATCH0(r13)
347         EXCEPTION_PROLOG_0(PACA_EXGEN)
348         b       facility_unavailable_pSeries
349
350 #ifdef CONFIG_CBE_RAS
351         STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
352         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
353 #endif /* CONFIG_CBE_RAS */
354
355         STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
356         KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
357
358         . = 0x1500
359         .global denorm_exception_hv
360 denorm_exception_hv:
361         HMT_MEDIUM_PPR_DISCARD
362         mtspr   SPRN_SPRG_HSCRATCH0,r13
363         EXCEPTION_PROLOG_0(PACA_EXGEN)
364         std     r11,PACA_EXGEN+EX_R11(r13)
365         std     r12,PACA_EXGEN+EX_R12(r13)
366         mfspr   r9,SPRN_SPRG_HSCRATCH0
367         std     r9,PACA_EXGEN+EX_R13(r13)
368         mfcr    r9
369
370 #ifdef CONFIG_PPC_DENORMALISATION
371         mfspr   r10,SPRN_HSRR1
372         mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
373         andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
374         addi    r11,r11,-4              /* HSRR0 is next instruction */
375         bne+    denorm_assist
376 #endif
377
378         EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
379         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
380
381 #ifdef CONFIG_CBE_RAS
382         STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
383         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
384 #endif /* CONFIG_CBE_RAS */
385
386         STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
387         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
388
389 #ifdef CONFIG_CBE_RAS
390         STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
391         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
392 #else
393         . = 0x1800
394 #endif /* CONFIG_CBE_RAS */
395
396
397 /*** Out of line interrupts support ***/
398
399         .align  7
400         /* moved from 0x200 */
401 machine_check_pSeries:
402         .globl machine_check_fwnmi
403 machine_check_fwnmi:
404         HMT_MEDIUM_PPR_DISCARD
405         SET_SCRATCH0(r13)               /* save r13 */
406         EXCEPTION_PROLOG_0(PACA_EXMC)
407 machine_check_pSeries_0:
408         EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
409         EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
410         KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
411
412         /* moved from 0x300 */
413 data_access_check_stab:
414         GET_PACA(r13)
415         std     r9,PACA_EXSLB+EX_R9(r13)
416         std     r10,PACA_EXSLB+EX_R10(r13)
417         mfspr   r10,SPRN_DAR
418         mfspr   r9,SPRN_DSISR
419         srdi    r10,r10,60
420         rlwimi  r10,r9,16,0x20
421 #ifdef CONFIG_KVM_BOOK3S_PR
422         lbz     r9,HSTATE_IN_GUEST(r13)
423         rlwimi  r10,r9,8,0x300
424 #endif
425         mfcr    r9
426         cmpwi   r10,0x2c
427         beq     do_stab_bolted_pSeries
428         mtcrf   0x80,r9
429         ld      r9,PACA_EXSLB+EX_R9(r13)
430         ld      r10,PACA_EXSLB+EX_R10(r13)
431         b       data_access_not_stab
432 do_stab_bolted_pSeries:
433         std     r11,PACA_EXSLB+EX_R11(r13)
434         std     r12,PACA_EXSLB+EX_R12(r13)
435         GET_SCRATCH0(r10)
436         std     r10,PACA_EXSLB+EX_R13(r13)
437         EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
438
439         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
440         KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
441         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
442         KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
443         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
444         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
445
446 #ifdef CONFIG_PPC_DENORMALISATION
447 denorm_assist:
448 BEGIN_FTR_SECTION
449 /*
450  * To denormalise we need to move a copy of the register to itself.
451  * For POWER6 do that here for all FP regs.
452  */
453         mfmsr   r10
454         ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
455         xori    r10,r10,(MSR_FE0|MSR_FE1)
456         mtmsrd  r10
457         sync
458
459 #define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
460 #define FMR4(n)  FMR2(n) ; FMR2(n+2)
461 #define FMR8(n)  FMR4(n) ; FMR4(n+4)
462 #define FMR16(n) FMR8(n) ; FMR8(n+8)
463 #define FMR32(n) FMR16(n) ; FMR16(n+16)
464         FMR32(0)
465
466 FTR_SECTION_ELSE
467 /*
468  * To denormalise we need to move a copy of the register to itself.
469  * For POWER7 do that here for the first 32 VSX registers only.
470  */
471         mfmsr   r10
472         oris    r10,r10,MSR_VSX@h
473         mtmsrd  r10
474         sync
475
476 #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
477 #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
478 #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
479 #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
480 #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
481         XVCPSGNDP32(0)
482
483 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
484
485 BEGIN_FTR_SECTION
486         b       denorm_done
487 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
488 /*
489  * To denormalise we need to move a copy of the register to itself.
490  * For POWER8 we need to do that for all 64 VSX registers
491  */
492         XVCPSGNDP32(32)
493 denorm_done:
494         mtspr   SPRN_HSRR0,r11
495         mtcrf   0x80,r9
496         ld      r9,PACA_EXGEN+EX_R9(r13)
497         RESTORE_PPR_PACA(PACA_EXGEN, r10)
498         ld      r10,PACA_EXGEN+EX_R10(r13)
499         ld      r11,PACA_EXGEN+EX_R11(r13)
500         ld      r12,PACA_EXGEN+EX_R12(r13)
501         ld      r13,PACA_EXGEN+EX_R13(r13)
502         HRFID
503         b       .
504 #endif
505
506         .align  7
507         /* moved from 0xe00 */
508         STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
509         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
510         STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
511         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
512         STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
513         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
514         STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
515         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
516         MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
517         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
518
519         /* moved from 0xf00 */
520         STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
521         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
522         STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
523         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
524         STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
525         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
526         STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
527         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
528
529 /*
530  * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
531  * - If it was a decrementer interrupt, we bump the dec to max and and return.
532  * - If it was a doorbell we return immediately since doorbells are edge
533  *   triggered and won't automatically refire.
534  * - else we hard disable and return.
535  * This is called with r10 containing the value to OR to the paca field.
536  */
537 #define MASKED_INTERRUPT(_H)                            \
538 masked_##_H##interrupt:                                 \
539         std     r11,PACA_EXGEN+EX_R11(r13);             \
540         lbz     r11,PACAIRQHAPPENED(r13);               \
541         or      r11,r11,r10;                            \
542         stb     r11,PACAIRQHAPPENED(r13);               \
543         cmpwi   r10,PACA_IRQ_DEC;                       \
544         bne     1f;                                     \
545         lis     r10,0x7fff;                             \
546         ori     r10,r10,0xffff;                         \
547         mtspr   SPRN_DEC,r10;                           \
548         b       2f;                                     \
549 1:      cmpwi   r10,PACA_IRQ_DBELL;                     \
550         beq     2f;                                     \
551         mfspr   r10,SPRN_##_H##SRR1;                    \
552         rldicl  r10,r10,48,1; /* clear MSR_EE */        \
553         rotldi  r10,r10,16;                             \
554         mtspr   SPRN_##_H##SRR1,r10;                    \
555 2:      mtcrf   0x80,r9;                                \
556         ld      r9,PACA_EXGEN+EX_R9(r13);               \
557         ld      r10,PACA_EXGEN+EX_R10(r13);             \
558         ld      r11,PACA_EXGEN+EX_R11(r13);             \
559         GET_SCRATCH0(r13);                              \
560         ##_H##rfid;                                     \
561         b       .
562         
563         MASKED_INTERRUPT()
564         MASKED_INTERRUPT(H)
565
566 /*
567  * Called from arch_local_irq_enable when an interrupt needs
568  * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
569  * which kind of interrupt. MSR:EE is already off. We generate a
570  * stackframe like if a real interrupt had happened.
571  *
572  * Note: While MSR:EE is off, we need to make sure that _MSR
573  * in the generated frame has EE set to 1 or the exception
574  * handler will not properly re-enable them.
575  */
576 _GLOBAL(__replay_interrupt)
577         /* We are going to jump to the exception common code which
578          * will retrieve various register values from the PACA which
579          * we don't give a damn about, so we don't bother storing them.
580          */
581         mfmsr   r12
582         mflr    r11
583         mfcr    r9
584         ori     r12,r12,MSR_EE
585         cmpwi   r3,0x900
586         beq     decrementer_common
587         cmpwi   r3,0x500
588         beq     hardware_interrupt_common
589 BEGIN_FTR_SECTION
590         cmpwi   r3,0xe80
591         beq     h_doorbell_common
592 FTR_SECTION_ELSE
593         cmpwi   r3,0xa00
594         beq     doorbell_super_common
595 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
596         blr
597
598 #ifdef CONFIG_PPC_PSERIES
599 /*
600  * Vectors for the FWNMI option.  Share common code.
601  */
602         .globl system_reset_fwnmi
603       .align 7
604 system_reset_fwnmi:
605         HMT_MEDIUM_PPR_DISCARD
606         SET_SCRATCH0(r13)               /* save r13 */
607         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
608                                  NOTEST, 0x100)
609
610 #endif /* CONFIG_PPC_PSERIES */
611
612 #ifdef __DISABLED__
613 /*
614  * This is used for when the SLB miss handler has to go virtual,
615  * which doesn't happen for now anymore but will once we re-implement
616  * dynamic VSIDs for shared page tables
617  */
618 slb_miss_user_pseries:
619         std     r10,PACA_EXGEN+EX_R10(r13)
620         std     r11,PACA_EXGEN+EX_R11(r13)
621         std     r12,PACA_EXGEN+EX_R12(r13)
622         GET_SCRATCH0(r10)
623         ld      r11,PACA_EXSLB+EX_R9(r13)
624         ld      r12,PACA_EXSLB+EX_R3(r13)
625         std     r10,PACA_EXGEN+EX_R13(r13)
626         std     r11,PACA_EXGEN+EX_R9(r13)
627         std     r12,PACA_EXGEN+EX_R3(r13)
628         clrrdi  r12,r13,32
629         mfmsr   r10
630         mfspr   r11,SRR0                        /* save SRR0 */
631         ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
632         ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
633         mtspr   SRR0,r12
634         mfspr   r12,SRR1                        /* and SRR1 */
635         mtspr   SRR1,r10
636         rfid
637         b       .                               /* prevent spec. execution */
638 #endif /* __DISABLED__ */
639
640 /*
641  * Code from here down to __end_handlers is invoked from the
642  * exception prologs above.  Because the prologs assemble the
643  * addresses of these handlers using the LOAD_HANDLER macro,
644  * which uses an ori instruction, these handlers must be in
645  * the first 64k of the kernel image.
646  */
647
648 /*** Common interrupt handlers ***/
649
650         STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
651
652         /*
653          * Machine check is different because we use a different
654          * save area: PACA_EXMC instead of PACA_EXGEN.
655          */
656         .align  7
657         .globl machine_check_common
658 machine_check_common:
659
660         mfspr   r10,SPRN_DAR
661         std     r10,PACA_EXGEN+EX_DAR(r13)
662         mfspr   r10,SPRN_DSISR
663         stw     r10,PACA_EXGEN+EX_DSISR(r13)
664         EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
665         FINISH_NAP
666         DISABLE_INTS
667         ld      r3,PACA_EXGEN+EX_DAR(r13)
668         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
669         std     r3,_DAR(r1)
670         std     r4,_DSISR(r1)
671         bl      .save_nvgprs
672         addi    r3,r1,STACK_FRAME_OVERHEAD
673         bl      .machine_check_exception
674         b       .ret_from_except
675
676         STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
677         STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
678         STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
679 #ifdef CONFIG_PPC_DOORBELL
680         STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
681 #else
682         STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
683 #endif
684         STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
685         STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
686         STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
687         STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
688         STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
689 #ifdef CONFIG_PPC_DOORBELL
690         STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
691 #else
692         STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
693 #endif
694         STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
695         STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
696         STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
697 #ifdef CONFIG_ALTIVEC
698         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
699 #else
700         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
701 #endif
702 #ifdef CONFIG_CBE_RAS
703         STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
704         STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
705         STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
706 #endif /* CONFIG_CBE_RAS */
707
708         /*
709          * Relocation-on interrupts: A subset of the interrupts can be delivered
710          * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
711          * it.  Addresses are the same as the original interrupt addresses, but
712          * offset by 0xc000000000004000.
713          * It's impossible to receive interrupts below 0x300 via this mechanism.
714          * KVM: None of these traps are from the guest ; anything that escalated
715          * to HV=1 from HV=0 is delivered via real mode handlers.
716          */
717
718         /*
719          * This uses the standard macro, since the original 0x300 vector
720          * only has extra guff for STAB-based processors -- which never
721          * come here.
722          */
723         STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
724         . = 0x4380
725         .globl data_access_slb_relon_pSeries
726 data_access_slb_relon_pSeries:
727         SET_SCRATCH0(r13)
728         EXCEPTION_PROLOG_0(PACA_EXSLB)
729         EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
730         std     r3,PACA_EXSLB+EX_R3(r13)
731         mfspr   r3,SPRN_DAR
732         mfspr   r12,SPRN_SRR1
733 #ifndef CONFIG_RELOCATABLE
734         b       .slb_miss_realmode
735 #else
736         /*
737          * We can't just use a direct branch to .slb_miss_realmode
738          * because the distance from here to there depends on where
739          * the kernel ends up being put.
740          */
741         mfctr   r11
742         ld      r10,PACAKBASE(r13)
743         LOAD_HANDLER(r10, .slb_miss_realmode)
744         mtctr   r10
745         bctr
746 #endif
747
748         STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
749         . = 0x4480
750         .globl instruction_access_slb_relon_pSeries
751 instruction_access_slb_relon_pSeries:
752         SET_SCRATCH0(r13)
753         EXCEPTION_PROLOG_0(PACA_EXSLB)
754         EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
755         std     r3,PACA_EXSLB+EX_R3(r13)
756         mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
757         mfspr   r12,SPRN_SRR1
758 #ifndef CONFIG_RELOCATABLE
759         b       .slb_miss_realmode
760 #else
761         mfctr   r11
762         ld      r10,PACAKBASE(r13)
763         LOAD_HANDLER(r10, .slb_miss_realmode)
764         mtctr   r10
765         bctr
766 #endif
767
768         . = 0x4500
769         .globl hardware_interrupt_relon_pSeries;
770         .globl hardware_interrupt_relon_hv;
771 hardware_interrupt_relon_pSeries:
772 hardware_interrupt_relon_hv:
773         BEGIN_FTR_SECTION
774                 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
775         FTR_SECTION_ELSE
776                 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
777         ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
778         STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
779         STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
780         STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
781         MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
782         STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
783         MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
784         STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
785
786         . = 0x4c00
787         .globl system_call_relon_pSeries
788 system_call_relon_pSeries:
789         HMT_MEDIUM
790         SYSCALL_PSERIES_1
791         SYSCALL_PSERIES_2_DIRECT
792         SYSCALL_PSERIES_3
793
794         STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
795
796         . = 0x4e00
797         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
798
799         . = 0x4e20
800         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
801
802         . = 0x4e40
803         SET_SCRATCH0(r13)
804         EXCEPTION_PROLOG_0(PACA_EXGEN)
805         b       emulation_assist_relon_hv
806
807         . = 0x4e60
808         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
809
810         . = 0x4e80
811         SET_SCRATCH0(r13)
812         EXCEPTION_PROLOG_0(PACA_EXGEN)
813         b       h_doorbell_relon_hv
814
815 performance_monitor_relon_pSeries_1:
816         . = 0x4f00
817         SET_SCRATCH0(r13)
818         EXCEPTION_PROLOG_0(PACA_EXGEN)
819         b       performance_monitor_relon_pSeries
820
821 altivec_unavailable_relon_pSeries_1:
822         . = 0x4f20
823         SET_SCRATCH0(r13)
824         EXCEPTION_PROLOG_0(PACA_EXGEN)
825         b       altivec_unavailable_relon_pSeries
826
827 vsx_unavailable_relon_pSeries_1:
828         . = 0x4f40
829         SET_SCRATCH0(r13)
830         EXCEPTION_PROLOG_0(PACA_EXGEN)
831         b       vsx_unavailable_relon_pSeries
832
833 facility_unavailable_relon_trampoline:
834         . = 0x4f60
835         SET_SCRATCH0(r13)
836         EXCEPTION_PROLOG_0(PACA_EXGEN)
837         b       facility_unavailable_relon_pSeries
838
839         STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
840 #ifdef CONFIG_PPC_DENORMALISATION
841         . = 0x5500
842         b       denorm_exception_hv
843 #endif
844         STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
845
846         /* Other future vectors */
847         .align  7
848         .globl  __end_interrupts
849 __end_interrupts:
850
851         .align  7
852 system_call_entry_direct:
853 #if defined(CONFIG_RELOCATABLE)
854         /* The first level prologue may have used LR to get here, saving
855          * orig in r10.  To save hacking/ifdeffing common code, restore here.
856          */
857         mtlr    r10
858 #endif
859 system_call_entry:
860         b       system_call_common
861
862 ppc64_runlatch_on_trampoline:
863         b       .__ppc64_runlatch_on
864
865 /*
866  * Here we have detected that the kernel stack pointer is bad.
867  * R9 contains the saved CR, r13 points to the paca,
868  * r10 contains the (bad) kernel stack pointer,
869  * r11 and r12 contain the saved SRR0 and SRR1.
870  * We switch to using an emergency stack, save the registers there,
871  * and call kernel_bad_stack(), which panics.
872  */
873 bad_stack:
874         ld      r1,PACAEMERGSP(r13)
875         subi    r1,r1,64+INT_FRAME_SIZE
876         std     r9,_CCR(r1)
877         std     r10,GPR1(r1)
878         std     r11,_NIP(r1)
879         std     r12,_MSR(r1)
880         mfspr   r11,SPRN_DAR
881         mfspr   r12,SPRN_DSISR
882         std     r11,_DAR(r1)
883         std     r12,_DSISR(r1)
884         mflr    r10
885         mfctr   r11
886         mfxer   r12
887         std     r10,_LINK(r1)
888         std     r11,_CTR(r1)
889         std     r12,_XER(r1)
890         SAVE_GPR(0,r1)
891         SAVE_GPR(2,r1)
892         ld      r10,EX_R3(r3)
893         std     r10,GPR3(r1)
894         SAVE_GPR(4,r1)
895         SAVE_4GPRS(5,r1)
896         ld      r9,EX_R9(r3)
897         ld      r10,EX_R10(r3)
898         SAVE_2GPRS(9,r1)
899         ld      r9,EX_R11(r3)
900         ld      r10,EX_R12(r3)
901         ld      r11,EX_R13(r3)
902         std     r9,GPR11(r1)
903         std     r10,GPR12(r1)
904         std     r11,GPR13(r1)
905 BEGIN_FTR_SECTION
906         ld      r10,EX_CFAR(r3)
907         std     r10,ORIG_GPR3(r1)
908 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
909         SAVE_8GPRS(14,r1)
910         SAVE_10GPRS(22,r1)
911         lhz     r12,PACA_TRAP_SAVE(r13)
912         std     r12,_TRAP(r1)
913         addi    r11,r1,INT_FRAME_SIZE
914         std     r11,0(r1)
915         li      r12,0
916         std     r12,0(r11)
917         ld      r2,PACATOC(r13)
918         ld      r11,exception_marker@toc(r2)
919         std     r12,RESULT(r1)
920         std     r11,STACK_FRAME_OVERHEAD-16(r1)
921 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
922         bl      .kernel_bad_stack
923         b       1b
924
925 /*
926  * Here r13 points to the paca, r9 contains the saved CR,
927  * SRR0 and SRR1 are saved in r11 and r12,
928  * r9 - r13 are saved in paca->exgen.
929  */
930         .align  7
931         .globl data_access_common
932 data_access_common:
933         mfspr   r10,SPRN_DAR
934         std     r10,PACA_EXGEN+EX_DAR(r13)
935         mfspr   r10,SPRN_DSISR
936         stw     r10,PACA_EXGEN+EX_DSISR(r13)
937         EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
938         DISABLE_INTS
939         ld      r12,_MSR(r1)
940         ld      r3,PACA_EXGEN+EX_DAR(r13)
941         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
942         li      r5,0x300
943         b       .do_hash_page           /* Try to handle as hpte fault */
944
945         .align  7
946         .globl  h_data_storage_common
947 h_data_storage_common:
948         mfspr   r10,SPRN_HDAR
949         std     r10,PACA_EXGEN+EX_DAR(r13)
950         mfspr   r10,SPRN_HDSISR
951         stw     r10,PACA_EXGEN+EX_DSISR(r13)
952         EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
953         bl      .save_nvgprs
954         DISABLE_INTS
955         addi    r3,r1,STACK_FRAME_OVERHEAD
956         bl      .unknown_exception
957         b       .ret_from_except
958
959         .align  7
960         .globl instruction_access_common
961 instruction_access_common:
962         EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
963         DISABLE_INTS
964         ld      r12,_MSR(r1)
965         ld      r3,_NIP(r1)
966         andis.  r4,r12,0x5820
967         li      r5,0x400
968         b       .do_hash_page           /* Try to handle as hpte fault */
969
970         STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
971
972 /*
973  * Here is the common SLB miss user that is used when going to virtual
974  * mode for SLB misses, that is currently not used
975  */
976 #ifdef __DISABLED__
977         .align  7
978         .globl  slb_miss_user_common
979 slb_miss_user_common:
980         mflr    r10
981         std     r3,PACA_EXGEN+EX_DAR(r13)
982         stw     r9,PACA_EXGEN+EX_CCR(r13)
983         std     r10,PACA_EXGEN+EX_LR(r13)
984         std     r11,PACA_EXGEN+EX_SRR0(r13)
985         bl      .slb_allocate_user
986
987         ld      r10,PACA_EXGEN+EX_LR(r13)
988         ld      r3,PACA_EXGEN+EX_R3(r13)
989         lwz     r9,PACA_EXGEN+EX_CCR(r13)
990         ld      r11,PACA_EXGEN+EX_SRR0(r13)
991         mtlr    r10
992         beq-    slb_miss_fault
993
994         andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
995         beq-    unrecov_user_slb
996         mfmsr   r10
997
998 .machine push
999 .machine "power4"
1000         mtcrf   0x80,r9
1001 .machine pop
1002
1003         clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
1004         mtmsrd  r10,1
1005
1006         mtspr   SRR0,r11
1007         mtspr   SRR1,r12
1008
1009         ld      r9,PACA_EXGEN+EX_R9(r13)
1010         ld      r10,PACA_EXGEN+EX_R10(r13)
1011         ld      r11,PACA_EXGEN+EX_R11(r13)
1012         ld      r12,PACA_EXGEN+EX_R12(r13)
1013         ld      r13,PACA_EXGEN+EX_R13(r13)
1014         rfid
1015         b       .
1016
1017 slb_miss_fault:
1018         EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1019         ld      r4,PACA_EXGEN+EX_DAR(r13)
1020         li      r5,0
1021         std     r4,_DAR(r1)
1022         std     r5,_DSISR(r1)
1023         b       handle_page_fault
1024
1025 unrecov_user_slb:
1026         EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1027         DISABLE_INTS
1028         bl      .save_nvgprs
1029 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
1030         bl      .unrecoverable_exception
1031         b       1b
1032
1033 #endif /* __DISABLED__ */
1034
1035
1036         .align  7
1037         .globl alignment_common
1038 alignment_common:
1039         mfspr   r10,SPRN_DAR
1040         std     r10,PACA_EXGEN+EX_DAR(r13)
1041         mfspr   r10,SPRN_DSISR
1042         stw     r10,PACA_EXGEN+EX_DSISR(r13)
1043         EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1044         ld      r3,PACA_EXGEN+EX_DAR(r13)
1045         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
1046         std     r3,_DAR(r1)
1047         std     r4,_DSISR(r1)
1048         bl      .save_nvgprs
1049         DISABLE_INTS
1050         addi    r3,r1,STACK_FRAME_OVERHEAD
1051         bl      .alignment_exception
1052         b       .ret_from_except
1053
1054         .align  7
1055         .globl program_check_common
1056 program_check_common:
1057         EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1058         bl      .save_nvgprs
1059         DISABLE_INTS
1060         addi    r3,r1,STACK_FRAME_OVERHEAD
1061         bl      .program_check_exception
1062         b       .ret_from_except
1063
1064         .align  7
1065         .globl fp_unavailable_common
1066 fp_unavailable_common:
1067         EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1068         bne     1f                      /* if from user, just load it up */
1069         bl      .save_nvgprs
1070         DISABLE_INTS
1071         addi    r3,r1,STACK_FRAME_OVERHEAD
1072         bl      .kernel_fp_unavailable_exception
1073         BUG_OPCODE
1074 1:
1075 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1076 BEGIN_FTR_SECTION
1077         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1078          * transaction), go do TM stuff
1079          */
1080         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1081         bne-    2f
1082 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1083 #endif
1084         bl      .load_up_fpu
1085         b       fast_exception_return
1086 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1087 2:      /* User process was in a transaction */
1088         bl      .save_nvgprs
1089         DISABLE_INTS
1090         addi    r3,r1,STACK_FRAME_OVERHEAD
1091         bl      .fp_unavailable_tm
1092         b       .ret_from_except
1093 #endif
1094         .align  7
1095         .globl altivec_unavailable_common
1096 altivec_unavailable_common:
1097         EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1098 #ifdef CONFIG_ALTIVEC
1099 BEGIN_FTR_SECTION
1100         beq     1f
1101 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1102   BEGIN_FTR_SECTION_NESTED(69)
1103         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1104          * transaction), go do TM stuff
1105          */
1106         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1107         bne-    2f
1108   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1109 #endif
1110         bl      .load_up_altivec
1111         b       fast_exception_return
1112 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1113 2:      /* User process was in a transaction */
1114         bl      .save_nvgprs
1115         DISABLE_INTS
1116         addi    r3,r1,STACK_FRAME_OVERHEAD
1117         bl      .altivec_unavailable_tm
1118         b       .ret_from_except
1119 #endif
1120 1:
1121 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1122 #endif
1123         bl      .save_nvgprs
1124         DISABLE_INTS
1125         addi    r3,r1,STACK_FRAME_OVERHEAD
1126         bl      .altivec_unavailable_exception
1127         b       .ret_from_except
1128
1129         .align  7
1130         .globl vsx_unavailable_common
1131 vsx_unavailable_common:
1132         EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1133 #ifdef CONFIG_VSX
1134 BEGIN_FTR_SECTION
1135         beq     1f
1136 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1137   BEGIN_FTR_SECTION_NESTED(69)
1138         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1139          * transaction), go do TM stuff
1140          */
1141         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1142         bne-    2f
1143   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1144 #endif
1145         b       .load_up_vsx
1146 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1147 2:      /* User process was in a transaction */
1148         bl      .save_nvgprs
1149         DISABLE_INTS
1150         addi    r3,r1,STACK_FRAME_OVERHEAD
1151         bl      .vsx_unavailable_tm
1152         b       .ret_from_except
1153 #endif
1154 1:
1155 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1156 #endif
1157         bl      .save_nvgprs
1158         DISABLE_INTS
1159         addi    r3,r1,STACK_FRAME_OVERHEAD
1160         bl      .vsx_unavailable_exception
1161         b       .ret_from_except
1162
1163         STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1164
1165         .align  7
1166         .globl  __end_handlers
1167 __end_handlers:
1168
1169         /* Equivalents to the above handlers for relocation-on interrupt vectors */
1170         STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1171         MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1172
1173         STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1174         STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1175         STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1176         STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1177
1178 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1179 /*
1180  * Data area reserved for FWNMI option.
1181  * This address (0x7000) is fixed by the RPA.
1182  */
1183         .= 0x7000
1184         .globl fwnmi_data_area
1185 fwnmi_data_area:
1186
1187         /* pseries and powernv need to keep the whole page from
1188          * 0x7000 to 0x8000 free for use by the firmware
1189          */
1190         . = 0x8000
1191 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1192
1193 /* Space for CPU0's segment table */
1194         .balign 4096
1195         .globl initial_stab
1196 initial_stab:
1197         .space  4096
1198
1199 #ifdef CONFIG_PPC_POWERNV
1200 _GLOBAL(opal_mc_secondary_handler)
1201         HMT_MEDIUM_PPR_DISCARD
1202         SET_SCRATCH0(r13)
1203         GET_PACA(r13)
1204         clrldi  r3,r3,2
1205         tovirt(r3,r3)
1206         std     r3,PACA_OPAL_MC_EVT(r13)
1207         ld      r13,OPAL_MC_SRR0(r3)
1208         mtspr   SPRN_SRR0,r13
1209         ld      r13,OPAL_MC_SRR1(r3)
1210         mtspr   SPRN_SRR1,r13
1211         ld      r3,OPAL_MC_GPR3(r3)
1212         GET_SCRATCH0(r13)
1213         b       machine_check_pSeries
1214 #endif /* CONFIG_PPC_POWERNV */
1215
1216
1217 /*
1218  * r13 points to the PACA, r9 contains the saved CR,
1219  * r12 contain the saved SRR1, SRR0 is still ready for return
1220  * r3 has the faulting address
1221  * r9 - r13 are saved in paca->exslb.
1222  * r3 is saved in paca->slb_r3
1223  * We assume we aren't going to take any exceptions during this procedure.
1224  */
1225 _GLOBAL(slb_miss_realmode)
1226         mflr    r10
1227 #ifdef CONFIG_RELOCATABLE
1228         mtctr   r11
1229 #endif
1230
1231         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1232         std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
1233
1234         bl      .slb_allocate_realmode
1235
1236         /* All done -- return from exception. */
1237
1238         ld      r10,PACA_EXSLB+EX_LR(r13)
1239         ld      r3,PACA_EXSLB+EX_R3(r13)
1240         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1241
1242         mtlr    r10
1243
1244         andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
1245         beq-    2f
1246
1247 .machine        push
1248 .machine        "power4"
1249         mtcrf   0x80,r9
1250         mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
1251 .machine        pop
1252
1253         RESTORE_PPR_PACA(PACA_EXSLB, r9)
1254         ld      r9,PACA_EXSLB+EX_R9(r13)
1255         ld      r10,PACA_EXSLB+EX_R10(r13)
1256         ld      r11,PACA_EXSLB+EX_R11(r13)
1257         ld      r12,PACA_EXSLB+EX_R12(r13)
1258         ld      r13,PACA_EXSLB+EX_R13(r13)
1259         rfid
1260         b       .       /* prevent speculative execution */
1261
1262 2:      mfspr   r11,SPRN_SRR0
1263         ld      r10,PACAKBASE(r13)
1264         LOAD_HANDLER(r10,unrecov_slb)
1265         mtspr   SPRN_SRR0,r10
1266         ld      r10,PACAKMSR(r13)
1267         mtspr   SPRN_SRR1,r10
1268         rfid
1269         b       .
1270
1271 unrecov_slb:
1272         EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1273         DISABLE_INTS
1274         bl      .save_nvgprs
1275 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
1276         bl      .unrecoverable_exception
1277         b       1b
1278
1279
1280 #ifdef CONFIG_PPC_970_NAP
1281 power4_fixup_nap:
1282         andc    r9,r9,r10
1283         std     r9,TI_LOCAL_FLAGS(r11)
1284         ld      r10,_LINK(r1)           /* make idle task do the */
1285         std     r10,_NIP(r1)            /* equivalent of a blr */
1286         blr
1287 #endif
1288
1289 /*
1290  * Hash table stuff
1291  */
1292         .align  7
1293 _STATIC(do_hash_page)
1294         std     r3,_DAR(r1)
1295         std     r4,_DSISR(r1)
1296
1297         andis.  r0,r4,0xa410            /* weird error? */
1298         bne-    handle_page_fault       /* if not, try to insert a HPTE */
1299         andis.  r0,r4,DSISR_DABRMATCH@h
1300         bne-    handle_dabr_fault
1301
1302 BEGIN_FTR_SECTION
1303         andis.  r0,r4,0x0020            /* Is it a segment table fault? */
1304         bne-    do_ste_alloc            /* If so handle it */
1305 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1306
1307         CURRENT_THREAD_INFO(r11, r1)
1308         lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
1309         andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
1310         bne     77f                     /* then don't call hash_page now */
1311         /*
1312          * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1313          * accessing a userspace segment (even from the kernel). We assume
1314          * kernel addresses always have the high bit set.
1315          */
1316         rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1317         rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
1318         orc     r0,r12,r0               /* MSR_PR | ~high_bit */
1319         rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
1320         ori     r4,r4,1                 /* add _PAGE_PRESENT */
1321         rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
1322
1323         /*
1324          * r3 contains the faulting address
1325          * r4 contains the required access permissions
1326          * r5 contains the trap number
1327          *
1328          * at return r3 = 0 for success, 1 for page fault, negative for error
1329          */
1330         bl      .hash_page              /* build HPTE if possible */
1331         cmpdi   r3,0                    /* see if hash_page succeeded */
1332
1333         /* Success */
1334         beq     fast_exc_return_irq     /* Return from exception on success */
1335
1336         /* Error */
1337         blt-    13f
1338
1339 /* Here we have a page fault that hash_page can't handle. */
1340 handle_page_fault:
1341 11:     ld      r4,_DAR(r1)
1342         ld      r5,_DSISR(r1)
1343         addi    r3,r1,STACK_FRAME_OVERHEAD
1344         bl      .do_page_fault
1345         cmpdi   r3,0
1346         beq+    12f
1347         bl      .save_nvgprs
1348         mr      r5,r3
1349         addi    r3,r1,STACK_FRAME_OVERHEAD
1350         lwz     r4,_DAR(r1)
1351         bl      .bad_page_fault
1352         b       .ret_from_except
1353
1354 /* We have a data breakpoint exception - handle it */
1355 handle_dabr_fault:
1356         bl      .save_nvgprs
1357         ld      r4,_DAR(r1)
1358         ld      r5,_DSISR(r1)
1359         addi    r3,r1,STACK_FRAME_OVERHEAD
1360         bl      .do_break
1361 12:     b       .ret_from_except_lite
1362
1363
1364 /* We have a page fault that hash_page could handle but HV refused
1365  * the PTE insertion
1366  */
1367 13:     bl      .save_nvgprs
1368         mr      r5,r3
1369         addi    r3,r1,STACK_FRAME_OVERHEAD
1370         ld      r4,_DAR(r1)
1371         bl      .low_hash_fault
1372         b       .ret_from_except
1373
1374 /*
1375  * We come here as a result of a DSI at a point where we don't want
1376  * to call hash_page, such as when we are accessing memory (possibly
1377  * user memory) inside a PMU interrupt that occurred while interrupts
1378  * were soft-disabled.  We want to invoke the exception handler for
1379  * the access, or panic if there isn't a handler.
1380  */
1381 77:     bl      .save_nvgprs
1382         mr      r4,r3
1383         addi    r3,r1,STACK_FRAME_OVERHEAD
1384         li      r5,SIGSEGV
1385         bl      .bad_page_fault
1386         b       .ret_from_except
1387
1388         /* here we have a segment miss */
1389 do_ste_alloc:
1390         bl      .ste_allocate           /* try to insert stab entry */
1391         cmpdi   r3,0
1392         bne-    handle_page_fault
1393         b       fast_exception_return
1394
1395 /*
1396  * r13 points to the PACA, r9 contains the saved CR,
1397  * r11 and r12 contain the saved SRR0 and SRR1.
1398  * r9 - r13 are saved in paca->exslb.
1399  * We assume we aren't going to take any exceptions during this procedure.
1400  * We assume (DAR >> 60) == 0xc.
1401  */
1402         .align  7
1403 _GLOBAL(do_stab_bolted)
1404         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1405         std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
1406         mfspr   r11,SPRN_DAR                    /* ea */
1407
1408         /*
1409          * check for bad kernel/user address
1410          * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1411          */
1412         rldicr. r9,r11,4,(63 - 46 - 4)
1413         li      r9,0    /* VSID = 0 for bad address */
1414         bne-    0f
1415
1416         /*
1417          * Calculate VSID:
1418          * This is the kernel vsid, we take the top for context from
1419          * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1420          * Here we know that (ea >> 60) == 0xc
1421          */
1422         lis     r9,(MAX_USER_CONTEXT + 1)@ha
1423         addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
1424
1425         srdi    r10,r11,SID_SHIFT
1426         rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1427         ASM_VSID_SCRAMBLE(r10, r9, 256M)
1428         rldic   r9,r10,12,16    /* r9 = vsid << 12 */
1429
1430 0:
1431         /* Hash to the primary group */
1432         ld      r10,PACASTABVIRT(r13)
1433         srdi    r11,r11,SID_SHIFT
1434         rldimi  r10,r11,7,52    /* r10 = first ste of the group */
1435
1436         /* Search the primary group for a free entry */
1437 1:      ld      r11,0(r10)      /* Test valid bit of the current ste    */
1438         andi.   r11,r11,0x80
1439         beq     2f
1440         addi    r10,r10,16
1441         andi.   r11,r10,0x70
1442         bne     1b
1443
1444         /* Stick for only searching the primary group for now.          */
1445         /* At least for now, we use a very simple random castout scheme */
1446         /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
1447         mftb    r11
1448         rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
1449         ori     r11,r11,0x10
1450
1451         /* r10 currently points to an ste one past the group of interest */
1452         /* make it point to the randomly selected entry                 */
1453         subi    r10,r10,128
1454         or      r10,r10,r11     /* r10 is the entry to invalidate       */
1455
1456         isync                   /* mark the entry invalid               */
1457         ld      r11,0(r10)
1458         rldicl  r11,r11,56,1    /* clear the valid bit */
1459         rotldi  r11,r11,8
1460         std     r11,0(r10)
1461         sync
1462
1463         clrrdi  r11,r11,28      /* Get the esid part of the ste         */
1464         slbie   r11
1465
1466 2:      std     r9,8(r10)       /* Store the vsid part of the ste       */
1467         eieio
1468
1469         mfspr   r11,SPRN_DAR            /* Get the new esid                     */
1470         clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
1471         ori     r11,r11,0x90    /* Turn on valid and kp                 */
1472         std     r11,0(r10)      /* Put new entry back into the stab     */
1473
1474         sync
1475
1476         /* All done -- return from exception. */
1477         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1478         ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
1479
1480         andi.   r10,r12,MSR_RI
1481         beq-    unrecov_slb
1482
1483         mtcrf   0x80,r9                 /* restore CR */
1484
1485         mfmsr   r10
1486         clrrdi  r10,r10,2
1487         mtmsrd  r10,1
1488
1489         mtspr   SPRN_SRR0,r11
1490         mtspr   SPRN_SRR1,r12
1491         ld      r9,PACA_EXSLB+EX_R9(r13)
1492         ld      r10,PACA_EXSLB+EX_R10(r13)
1493         ld      r11,PACA_EXSLB+EX_R11(r13)
1494         ld      r12,PACA_EXSLB+EX_R12(r13)
1495         ld      r13,PACA_EXSLB+EX_R13(r13)
1496         rfid
1497         b       .       /* prevent speculative execution */