[S390] entry[64].S improvements
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 27 Dec 2011 10:27:15 +0000 (11:27 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 27 Dec 2011 10:27:12 +0000 (11:27 +0100)
Another round of cleanup for entry[64].S, in particular the program check
handler looks more reasonable now. The code size for the 31 bit kernel
has been reduced by 616 byte and by 528 byte for the 64 bit version.
Even better the code is a bit faster as well.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/lowcore.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/base.S
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/head.S
arch/s390/kernel/reipl64.S
arch/s390/kernel/smp.c

index 3b97964e0e96fb9a7a8fc4951982a6af256b61d7..707f2306725b9a91a79b40d3627fc7bd18e1bcb9 100644 (file)
@@ -97,47 +97,52 @@ struct _lowcore {
        __u32   gpregs_save_area[16];           /* 0x0180 */
        __u32   cregs_save_area[16];            /* 0x01c0 */
 
+       /* Save areas. */
+       __u32   save_area_sync[8];              /* 0x0200 */
+       __u32   save_area_async[8];             /* 0x0220 */
+       __u32   save_area_restart[1];           /* 0x0240 */
+       __u8    pad_0x0244[0x0248-0x0244];      /* 0x0244 */
+
        /* Return psws. */
-       __u32   save_area[16];                  /* 0x0200 */
-       psw_t   return_psw;                     /* 0x0240 */
-       psw_t   return_mcck_psw;                /* 0x0248 */
+       psw_t   return_psw;                     /* 0x0248 */
+       psw_t   return_mcck_psw;                /* 0x0250 */
 
        /* CPU time accounting values */
-       __u64   sync_enter_timer;               /* 0x0250 */
-       __u64   async_enter_timer;              /* 0x0258 */
-       __u64   mcck_enter_timer;               /* 0x0260 */
-       __u64   exit_timer;                     /* 0x0268 */
-       __u64   user_timer;                     /* 0x0270 */
-       __u64   system_timer;                   /* 0x0278 */
-       __u64   steal_timer;                    /* 0x0280 */
-       __u64   last_update_timer;              /* 0x0288 */
-       __u64   last_update_clock;              /* 0x0290 */
+       __u64   sync_enter_timer;               /* 0x0258 */
+       __u64   async_enter_timer;              /* 0x0260 */
+       __u64   mcck_enter_timer;               /* 0x0268 */
+       __u64   exit_timer;                     /* 0x0270 */
+       __u64   user_timer;                     /* 0x0278 */
+       __u64   system_timer;                   /* 0x0280 */
+       __u64   steal_timer;                    /* 0x0288 */
+       __u64   last_update_timer;              /* 0x0290 */
+       __u64   last_update_clock;              /* 0x0298 */
 
        /* Current process. */
-       __u32   current_task;                   /* 0x0298 */
-       __u32   thread_info;                    /* 0x029c */
-       __u32   kernel_stack;                   /* 0x02a0 */
+       __u32   current_task;                   /* 0x02a0 */
+       __u32   thread_info;                    /* 0x02a4 */
+       __u32   kernel_stack;                   /* 0x02a8 */
 
        /* Interrupt and panic stack. */
-       __u32   async_stack;                    /* 0x02a4 */
-       __u32   panic_stack;                    /* 0x02a8 */
+       __u32   async_stack;                    /* 0x02ac */
+       __u32   panic_stack;                    /* 0x02b0 */
 
        /* Address space pointer. */
-       __u32   kernel_asce;                    /* 0x02ac */
-       __u32   user_asce;                      /* 0x02b0 */
-       __u32   current_pid;                    /* 0x02b4 */
+       __u32   kernel_asce;                    /* 0x02b4 */
+       __u32   user_asce;                      /* 0x02b8 */
+       __u32   current_pid;                    /* 0x02bc */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x02b8 */
-       __u32   softirq_pending;                /* 0x02bc */
-       __u32   percpu_offset;                  /* 0x02c0 */
-       __u32   ext_call_fast;                  /* 0x02c4 */
-       __u64   int_clock;                      /* 0x02c8 */
-       __u64   mcck_clock;                     /* 0x02d0 */
-       __u64   clock_comparator;               /* 0x02d8 */
-       __u32   machine_flags;                  /* 0x02e0 */
-       __u32   ftrace_func;                    /* 0x02e4 */
-       __u8    pad_0x02e8[0x0300-0x02e8];      /* 0x02e8 */
+       __u32   cpu_nr;                         /* 0x02c0 */
+       __u32   softirq_pending;                /* 0x02c4 */
+       __u32   percpu_offset;                  /* 0x02c8 */
+       __u32   ext_call_fast;                  /* 0x02cc */
+       __u64   int_clock;                      /* 0x02d0 */
+       __u64   mcck_clock;                     /* 0x02d8 */
+       __u64   clock_comparator;               /* 0x02e0 */
+       __u32   machine_flags;                  /* 0x02e8 */
+       __u32   ftrace_func;                    /* 0x02ec */
+       __u8    pad_0x02f8[0x0300-0x02f0];      /* 0x02f0 */
 
        /* Interrupt response block */
        __u8    irb[64];                        /* 0x0300 */
@@ -229,57 +234,62 @@ struct _lowcore {
        psw_t   mcck_new_psw;                   /* 0x01e0 */
        psw_t   io_new_psw;                     /* 0x01f0 */
 
-       /* Entry/exit save area & return psws. */
-       __u64   save_area[16];                  /* 0x0200 */
-       psw_t   return_psw;                     /* 0x0280 */
-       psw_t   return_mcck_psw;                /* 0x0290 */
+       /* Save areas. */
+       __u64   save_area_sync[8];              /* 0x0200 */
+       __u64   save_area_async[8];             /* 0x0240 */
+       __u64   save_area_restart[1];           /* 0x0280 */
+       __u8    pad_0x0288[0x0290-0x0288];      /* 0x0288 */
+
+       /* Return psws. */
+       psw_t   return_psw;                     /* 0x0290 */
+       psw_t   return_mcck_psw;                /* 0x02a0 */
 
        /* CPU accounting and timing values. */
-       __u64   sync_enter_timer;               /* 0x02a0 */
-       __u64   async_enter_timer;              /* 0x02a8 */
-       __u64   mcck_enter_timer;               /* 0x02b0 */
-       __u64   exit_timer;                     /* 0x02b8 */
-       __u64   user_timer;                     /* 0x02c0 */
-       __u64   system_timer;                   /* 0x02c8 */
-       __u64   steal_timer;                    /* 0x02d0 */
-       __u64   last_update_timer;              /* 0x02d8 */
-       __u64   last_update_clock;              /* 0x02e0 */
+       __u64   sync_enter_timer;               /* 0x02b0 */
+       __u64   async_enter_timer;              /* 0x02b8 */
+       __u64   mcck_enter_timer;               /* 0x02c0 */
+       __u64   exit_timer;                     /* 0x02c8 */
+       __u64   user_timer;                     /* 0x02d0 */
+       __u64   system_timer;                   /* 0x02d8 */
+       __u64   steal_timer;                    /* 0x02e0 */
+       __u64   last_update_timer;              /* 0x02e8 */
+       __u64   last_update_clock;              /* 0x02f0 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x02e8 */
-       __u64   thread_info;                    /* 0x02f0 */
-       __u64   kernel_stack;                   /* 0x02f8 */
+       __u64   current_task;                   /* 0x02f8 */
+       __u64   thread_info;                    /* 0x0300 */
+       __u64   kernel_stack;                   /* 0x0308 */
 
        /* Interrupt and panic stack. */
-       __u64   async_stack;                    /* 0x0300 */
-       __u64   panic_stack;                    /* 0x0308 */
+       __u64   async_stack;                    /* 0x0310 */
+       __u64   panic_stack;                    /* 0x0318 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0310 */
-       __u64   user_asce;                      /* 0x0318 */
-       __u64   current_pid;                    /* 0x0320 */
+       __u64   kernel_asce;                    /* 0x0320 */
+       __u64   user_asce;                      /* 0x0328 */
+       __u64   current_pid;                    /* 0x0330 */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0328 */
-       __u32   softirq_pending;                /* 0x032c */
-       __u64   percpu_offset;                  /* 0x0330 */
-       __u64   ext_call_fast;                  /* 0x0338 */
-       __u64   int_clock;                      /* 0x0340 */
-       __u64   mcck_clock;                     /* 0x0348 */
-       __u64   clock_comparator;               /* 0x0350 */
-       __u64   vdso_per_cpu_data;              /* 0x0358 */
-       __u64   machine_flags;                  /* 0x0360 */
-       __u64   ftrace_func;                    /* 0x0368 */
-       __u64   gmap;                           /* 0x0370 */
-       __u8    pad_0x0378[0x0380-0x0378];      /* 0x0378 */
+       __u32   cpu_nr;                         /* 0x0338 */
+       __u32   softirq_pending;                /* 0x033c */
+       __u64   percpu_offset;                  /* 0x0340 */
+       __u64   ext_call_fast;                  /* 0x0348 */
+       __u64   int_clock;                      /* 0x0350 */
+       __u64   mcck_clock;                     /* 0x0358 */
+       __u64   clock_comparator;               /* 0x0360 */
+       __u64   vdso_per_cpu_data;              /* 0x0368 */
+       __u64   machine_flags;                  /* 0x0370 */
+       __u64   ftrace_func;                    /* 0x0378 */
+       __u64   gmap;                           /* 0x0380 */
+       __u8    pad_0x0388[0x0400-0x0388];      /* 0x0388 */
 
        /* Interrupt response block. */
-       __u8    irb[64];                        /* 0x0380 */
+       __u8    irb[64];                        /* 0x0400 */
 
        /* Per cpu primary space access list */
-       __u32   paste[16];                      /* 0x03c0 */
+       __u32   paste[16];                      /* 0x0440 */
 
-       __u8    pad_0x0400[0x0e00-0x0400];      /* 0x0400 */
+       __u8    pad_0x0480[0x0e00-0x0480];      /* 0x0480 */
 
        /*
         * 0xe00 contains the address of the IPL Parameter Information
index 0717363033ebae72d21d695d1e006163b5b645a3..c1a56ba5f8486c3393af91d88dde564d74f52c68 100644 (file)
@@ -108,7 +108,9 @@ int main(void)
        DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
        DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
        DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
-       DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
+       DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
+       DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
+       DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
        DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
        DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
        DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
index f8828d38fa6ec62d3ccd82f46c5228a21e519955..3aa4d00aaf50ec0af3d4581facf40c1cffeef171 100644 (file)
@@ -33,7 +33,7 @@ s390_base_mcck_handler_fn:
        .previous
 
 ENTRY(s390_base_ext_handler)
-       stmg    %r0,%r15,__LC_SAVE_AREA
+       stmg    %r0,%r15,__LC_SAVE_AREA_ASYNC
        basr    %r13,0
 0:     aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_ext_handler_fn
@@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler)
        ltgr    %r1,%r1
        jz      1f
        basr    %r14,%r1
-1:     lmg     %r0,%r15,__LC_SAVE_AREA
+1:     lmg     %r0,%r15,__LC_SAVE_AREA_ASYNC
        ni      __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
        lpswe   __LC_EXT_OLD_PSW
 
@@ -53,7 +53,7 @@ s390_base_ext_handler_fn:
        .previous
 
 ENTRY(s390_base_pgm_handler)
-       stmg    %r0,%r15,__LC_SAVE_AREA
+       stmg    %r0,%r15,__LC_SAVE_AREA_SYNC
        basr    %r13,0
 0:     aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_pgm_handler_fn
@@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler)
        ltgr    %r1,%r1
        jz      1f
        basr    %r14,%r1
-       lmg     %r0,%r15,__LC_SAVE_AREA
+       lmg     %r0,%r15,__LC_SAVE_AREA_SYNC
        lpswe   __LC_PGM_OLD_PSW
 1:     lpswe   disabled_wait_psw-0b(%r13)
 
@@ -142,7 +142,7 @@ s390_base_mcck_handler_fn:
        .previous
 
 ENTRY(s390_base_ext_handler)
-       stm     %r0,%r15,__LC_SAVE_AREA
+       stm     %r0,%r15,__LC_SAVE_AREA_ASYNC
        basr    %r13,0
 0:     ahi     %r15,-STACK_FRAME_OVERHEAD
        l       %r1,2f-0b(%r13)
@@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler)
        ltr     %r1,%r1
        jz      1f
        basr    %r14,%r1
-1:     lm      %r0,%r15,__LC_SAVE_AREA
+1:     lm      %r0,%r15,__LC_SAVE_AREA_ASYNC
        ni      __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
        lpsw    __LC_EXT_OLD_PSW
 
@@ -164,7 +164,7 @@ s390_base_ext_handler_fn:
        .previous
 
 ENTRY(s390_base_pgm_handler)
-       stm     %r0,%r15,__LC_SAVE_AREA
+       stm     %r0,%r15,__LC_SAVE_AREA_SYNC
        basr    %r13,0
 0:     ahi     %r15,-STACK_FRAME_OVERHEAD
        l       %r1,2f-0b(%r13)
@@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler)
        ltr     %r1,%r1
        jz      1f
        basr    %r14,%r1
-       lm      %r0,%r15,__LC_SAVE_AREA
+       lm      %r0,%r15,__LC_SAVE_AREA_SYNC
        lpsw    __LC_PGM_OLD_PSW
 
 1:     lpsw    disabled_wait_psw-0b(%r13)
index b13157057e027127be5c82f37bc0531f5b1204de..c2773cff89c3041e42c1cf08607e286e1e08b48b 100644 (file)
 #include <asm/unistd.h>
 #include <asm/page.h>
 
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS    = STACK_FRAME_OVERHEAD
-SP_ARGS      = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW      =  STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0       =  STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 4
-SP_R2       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R3       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 12
-SP_R4       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R5       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 20
-SP_R6       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R7       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 28
-SP_R8       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R9       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 36
-SP_R10      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R11      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 44
-SP_R12      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R13      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 52
-SP_R14      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R15      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 60
-SP_ORIG_R2   = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE  = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE      = STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0      = __PT_GPRS
+__PT_R1      = __PT_GPRS + 4
+__PT_R2      = __PT_GPRS + 8
+__PT_R3      = __PT_GPRS + 12
+__PT_R4      = __PT_GPRS + 16
+__PT_R5      = __PT_GPRS + 20
+__PT_R6      = __PT_GPRS + 24
+__PT_R7      = __PT_GPRS + 28
+__PT_R8      = __PT_GPRS + 32
+__PT_R9      = __PT_GPRS + 36
+__PT_R10     = __PT_GPRS + 40
+__PT_R11     = __PT_GPRS + 44
+__PT_R12     = __PT_GPRS + 48
+__PT_R13     = __PT_GPRS + 524
+__PT_R14     = __PT_GPRS + 56
+__PT_R15     = __PT_GPRS + 60
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -58,133 +48,91 @@ STACK_SIZE  = 1 << STACK_SHIFT
 
 #define BASED(name) name-system_call(%r13)
 
-#ifdef CONFIG_TRACE_IRQFLAGS
        .macro  TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
        basr    %r2,%r0
-       l       %r1,BASED(.Ltrace_irq_on_caller)
-       basr    %r14,%r1
+       l       %r1,BASED(.Lhardirqs_on)
+       basr    %r14,%r1                # call trace_hardirqs_on_caller
+#endif
        .endm
 
        .macro  TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
        basr    %r2,%r0
-       l       %r1,BASED(.Ltrace_irq_off_caller)
-       basr    %r14,%r1
-       .endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
+       l       %r1,BASED(.Lhardirqs_off)
+       basr    %r14,%r1                # call trace_hardirqs_off_caller
 #endif
+       .endm
 
-#ifdef CONFIG_LOCKDEP
        .macro  LOCKDEP_SYS_EXIT
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
-       jz      0f
+#ifdef CONFIG_LOCKDEP
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jz      .+10
        l       %r1,BASED(.Llockdep_sys_exit)
-       basr    %r14,%r1
-0:
-       .endm
-#else
-#define LOCKDEP_SYS_EXIT
+       basr    %r14,%r1                # call lockdep_sys_exit
 #endif
-
-/*
- * Register usage in interrupt handlers:
- *    R9  - pointer to current task structure
- *    R13 - pointer to literal pool
- *    R14 - return register for function calls
- *    R15 - kernel stack pointer
- */
-
-       .macro  UPDATE_VTIME lc_from,lc_to,lc_sum
-       lm      %r10,%r11,\lc_from
-       sl      %r10,\lc_to
-       sl      %r11,\lc_to+4
-       bc      3,BASED(0f)
-       sl      %r10,BASED(.Lc_1)
-0:     al      %r10,\lc_sum
-       al      %r11,\lc_sum+4
-       bc      12,BASED(1f)
-       al      %r10,BASED(.Lc_1)
-1:     stm     %r10,%r11,\lc_sum
-       .endm
-
-       .macro  SAVE_ALL_SVC psworg,savearea
-       stm     %r12,%r15,\savearea
-       l       %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
-       l       %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-       s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
-       .endm
-
-       .macro  SAVE_ALL_BASE savearea
-       stm     %r12,%r15,\savearea
-       l       %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
        .endm
 
-       .macro  SAVE_ALL_PGM psworg,savearea
-       tm      \psworg+1,0x01          # test problem state bit
+       .macro  CHECK_STACK stacksize,savearea
 #ifdef CONFIG_CHECK_STACK
-       bnz     BASED(1f)
-       tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
-       bnz     BASED(2f)
-       la      %r12,\psworg
-       b       BASED(stack_overflow)
-#else
-       bz      BASED(2f)
+       tml     %r15,\stacksize - CONFIG_STACK_GUARD
+       la      %r14,\savearea
+       jz      stack_overflow
 #endif
-1:     l       %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-2:     s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
        .endm
 
-       .macro  SAVE_ALL_ASYNC psworg,savearea
-       stm     %r12,%r15,\savearea
-       l       %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
-       la      %r12,\psworg
-       tm      \psworg+1,0x01          # test problem state bit
-       bnz     BASED(1f)               # from user -> load async stack
-       clc     \psworg+4(4),BASED(.Lcritical_end)
-       bhe     BASED(0f)
-       clc     \psworg+4(4),BASED(.Lcritical_start)
-       bl      BASED(0f)
-       l       %r14,BASED(.Lcleanup_critical)
-       basr    %r14,%r14
-       tm      1(%r12),0x01            # retest problem state after cleanup
-       bnz     BASED(1f)
-0:     l       %r14,__LC_ASYNC_STACK   # are we already on the async stack ?
+       .macro  SWITCH_ASYNC savearea,stack,shift
+       tmh     %r8,0x0001              # interrupting from user ?
+       jnz     1f
+       lr      %r14,%r9
+       sl      %r14,BASED(.Lcritical_start)
+       cl      %r14,BASED(.Lcritical_length)
+       jhe     0f
+       la      %r11,\savearea          # inside critical section, do cleanup
+       bras    %r14,cleanup_critical
+       tmh     %r8,0x0001              # retest problem state after cleanup
+       jnz     1f
+0:     l       %r14,\stack             # are we already on the target stack?
        slr     %r14,%r15
-       sra     %r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
-       bnz     BASED(1f)
-       tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
-       bnz     BASED(2f)
-       b       BASED(stack_overflow)
-#else
-       bz      BASED(2f)
-#endif
-1:     l       %r15,__LC_ASYNC_STACK
-2:     s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
+       sra     %r14,\shift
+       jnz     1f
+       CHECK_STACK 1<<\shift,\savearea
+       j       2f
+1:     l       %r15,\stack             # load target stack
+2:     ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
        .endm
 
-       .macro  CREATE_STACK_FRAME savearea
-       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-       st      %r2,SP_ORIG_R2(%r15)    # store original content of gpr 2
-       mvc     SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
-       stm     %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
+       .macro  ADD64 high,low,timer
+       al      \high,\timer
+       al      \low,\timer+4
+       brc     12,.+8
+       ahi     \high,1
        .endm
 
-       .macro  RESTORE_ALL psworg,sync
-       mvc     \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
-       .if !\sync
-       ni      \psworg+1,0xfd          # clear wait state bit
-       .endif
-       lm      %r0,%r15,SP_R0(%r15)    # load gprs 0-15 of user
-       stpt    __LC_EXIT_TIMER
-       lpsw    \psworg                 # back to caller
+       .macro  SUB64 high,low,timer
+       sl      \high,\timer
+       sl      \low,\timer+4
+       brc     3,.+8
+       ahi     \high,-1
+       .endm
+
+       .macro  UPDATE_VTIME high,low,enter_timer
+       lm      \high,\low,__LC_EXIT_TIMER
+       SUB64   \high,\low,\enter_timer
+       ADD64   \high,\low,__LC_USER_TIMER
+       stm     \high,\low,__LC_USER_TIMER
+       lm      \high,\low,__LC_LAST_UPDATE_TIMER
+       SUB64   \high,\low,__LC_EXIT_TIMER
+       ADD64   \high,\low,__LC_SYSTEM_TIMER
+       stm     \high,\low,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),\enter_timer
        .endm
 
        .macro REENABLE_IRQS
-       mvc     __SF_EMPTY(1,%r15),SP_PSW(%r15)
-       ni      __SF_EMPTY(%r15),0xbf
-       ssm     __SF_EMPTY(%r15)
+       st      %r8,__LC_RETURN_PSW
+       ni      __LC_RETURN_PSW,0xbf
+       ssm     __LC_RETURN_PSW
        .endm
 
        .section .kprobes.text, "ax"
@@ -197,14 +145,13 @@ STACK_SIZE  = 1 << STACK_SHIFT
  *  gpr2 = prev
  */
 ENTRY(__switch_to)
-       basr    %r1,0
-0:     l       %r4,__THREAD_info(%r2)          # get thread_info of prev
+       l       %r4,__THREAD_info(%r2)          # get thread_info of prev
        l       %r5,__THREAD_info(%r3)          # get thread_info of next
        tm      __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
-       bz      1f-0b(%r1)
+       jz      0f
        ni      __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
        oi      __TI_flags+3(%r5),_TIF_MCCK_PENDING     # set it in next
-1:     stm     %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
+0:     stm     %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
        st      %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
        l       %r15,__THREAD_ksp(%r3)          # load kernel stack of next
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
@@ -224,48 +171,55 @@ __critical_start:
 
 ENTRY(system_call)
        stpt    __LC_SYNC_ENTER_TIMER
-sysc_saveall:
-       SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       mvc     SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+3(%r12),_TIF_SYSCALL
+sysc_stm:
+       stm     %r8,%r15,__LC_SAVE_AREA_SYNC
+       l       %r12,__LC_THREAD_INFO
+       l       %r13,__LC_SVC_NEW_PSW+4
+sysc_per:
+       l       %r15,__LC_KERNEL_STACK
+       ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
 sysc_vtime:
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+       UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+       stm     %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+       mvc     __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+       mvc     __PT_SVC_CODE(4,%r11),__LC_SVC_ILC
 sysc_do_svc:
-       xr      %r7,%r7
-       icm     %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0
-       bnz     BASED(sysc_nr_ok)       # svc number > 0
+       oi      __TI_flags+3(%r12),_TIF_SYSCALL
+       lh      %r8,__PT_SVC_CODE+2(%r11)
+       sla     %r8,2                           # shift and test for svc0
+       jnz     sysc_nr_ok
        # svc 0: system call number in %r1
        cl      %r1,BASED(.Lnr_syscalls)
-       bnl     BASED(sysc_nr_ok)
-       sth     %r1,SP_SVC_CODE+2(%r15)
-       lr      %r7,%r1           # copy svc number to %r7
+       jnl     sysc_nr_ok
+       sth     %r1,__PT_SVC_CODE+2(%r11)
+       lr      %r8,%r1
+       sla     %r8,2
 sysc_nr_ok:
-       sll     %r7,2             # svc number *4
-       l       %r10,BASED(.Lsysc_table)
+       l       %r10,BASED(.Lsys_call_table)    # 31 bit system call table
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       st      %r2,__PT_ORIG_GPR2(%r11)
+       st      %r7,STACK_FRAME_OVERHEAD(%r15)
+       l       %r9,0(%r8,%r10)                 # get system call addr.
        tm      __TI_flags+2(%r12),_TIF_TRACE >> 8
-       mvc     SP_ARGS(4,%r15),SP_R7(%r15)
-       l       %r8,0(%r7,%r10)   # get system call addr.
-       bnz     BASED(sysc_tracesys)
-       basr    %r14,%r8          # call sys_xxxx
-       st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
+       jnz     sysc_tracesys
+       basr    %r14,%r9                        # call sys_xxxx
+       st      %r2,__PT_R2(%r11)               # store return value
 
 sysc_return:
        LOCKDEP_SYS_EXIT
 sysc_tif:
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
-       bno     BASED(sysc_restore)
+       tm      __PT_PSW+1(%r11),0x01           # returning to user ?
+       jno     sysc_restore
        tm      __TI_flags+3(%r12),_TIF_WORK_SVC
-       bnz     BASED(sysc_work)  # there is work to do (signals etc.)
+       jnz     sysc_work                       # check for work
        ni      __TI_flags+3(%r12),255-_TIF_SYSCALL
 sysc_restore:
-       RESTORE_ALL __LC_RETURN_PSW,1
+       mvc     __LC_RETURN_PSW(8),__PT_PSW(%r11)
+       stpt    __LC_EXIT_TIMER
+       lm      %r0,%r15,__PT_R0(%r11)
+       lpsw    __LC_RETURN_PSW
 sysc_done:
 
 #
@@ -273,16 +227,16 @@ sysc_done:
 #
 sysc_work:
        tm      __TI_flags+3(%r12),_TIF_MCCK_PENDING
-       bo      BASED(sysc_mcck_pending)
+       jo      sysc_mcck_pending
        tm      __TI_flags+3(%r12),_TIF_NEED_RESCHED
-       bo      BASED(sysc_reschedule)
+       jo      sysc_reschedule
        tm      __TI_flags+3(%r12),_TIF_SIGPENDING
-       bo      BASED(sysc_sigpending)
+       jo      sysc_sigpending
        tm      __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
-       bo      BASED(sysc_notify_resume)
+       jo      sysc_notify_resume
        tm      __TI_flags+3(%r12),_TIF_PER_TRAP
-       bo      BASED(sysc_singlestep)
-       b       BASED(sysc_return)      # beware of critical section cleanup
+       jo      sysc_singlestep
+       j       sysc_return             # beware of critical section cleanup
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
@@ -290,13 +244,13 @@ sysc_work:
 sysc_reschedule:
        l       %r1,BASED(.Lschedule)
        la      %r14,BASED(sysc_return)
-       br      %r1                     # call scheduler
+       br      %r1                     # call schedule
 
 #
 # _TIF_MCCK_PENDING is set, call handler
 #
 sysc_mcck_pending:
-       l       %r1,BASED(.Ls390_handle_mcck)
+       l       %r1,BASED(.Lhandle_mcck)
        la      %r14,BASED(sysc_return)
        br      %r1                     # TIF bit will be cleared by handler
 
@@ -305,23 +259,24 @@ sysc_mcck_pending:
 #
 sysc_sigpending:
        ni      __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lr      %r2,%r11                # pass pointer to pt_regs
        l       %r1,BASED(.Ldo_signal)
        basr    %r14,%r1                # call do_signal
        tm      __TI_flags+3(%r12),_TIF_SYSCALL
-       bno     BASED(sysc_return)
-       lm      %r2,%r6,SP_R2(%r15)     # load svc arguments
-       xr      %r7,%r7                 # svc 0 returns -ENOSYS
-       clc     SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2)
-       bnl     BASED(sysc_nr_ok)       # invalid svc number -> do svc 0
-       icm     %r7,3,SP_SVC_CODE+2(%r15)# load new svc number
-       b       BASED(sysc_nr_ok)       # restart svc
+       jno     sysc_return
+       lm      %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       xr      %r8,%r8                 # svc 0 returns -ENOSYS
+       clc     __PT_SVC_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+       jnl     sysc_nr_ok              # invalid svc number -> do svc 0
+       lh      %r8,__PT_SVC_CODE+2(%r11)       # load new svc number
+       sla     %r8,2
+       j       sysc_nr_ok              # restart svc
 
 #
 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
 #
 sysc_notify_resume:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lr      %r2,%r11                # pass pointer to pt_regs
        l       %r1,BASED(.Ldo_notify_resume)
        la      %r14,BASED(sysc_return)
        br      %r1                     # call do_notify_resume
@@ -331,56 +286,57 @@ sysc_notify_resume:
 #
 sysc_singlestep:
        ni      __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       l       %r1,BASED(.Lhandle_per) # load adr. of per handler
-       la      %r14,BASED(sysc_return) # load adr. of system return
-       br      %r1                     # branch to do_per_trap
+       lr      %r2,%r11                # pass pointer to pt_regs
+       l       %r1,BASED(.Ldo_per_trap)
+       la      %r14,BASED(sysc_return)
+       br      %r1                     # call do_per_trap
 
 #
 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 # and after the system call
 #
 sysc_tracesys:
-       l       %r1,BASED(.Ltrace_entry)
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       l       %r1,BASED(.Ltrace_enter)
+       lr      %r2,%r11                # pass pointer to pt_regs
        la      %r3,0
        xr      %r0,%r0
-       icm     %r0,3,SP_SVC_CODE(%r15)
-       st      %r0,SP_R2(%r15)
-       basr    %r14,%r1
+       icm     %r0,3,__PT_SVC_CODE+2(%r11)
+       st      %r0,__PT_R2(%r11)
+       basr    %r14,%r1                # call do_syscall_trace_enter
        cl      %r2,BASED(.Lnr_syscalls)
-       bnl     BASED(sysc_tracenogo)
-       lr      %r7,%r2
-       sll     %r7,2                   # svc number *4
-       l       %r8,0(%r7,%r10)
+       jnl     sysc_tracenogo
+       lr      %r8,%r2
+       sll     %r8,2
+       l       %r9,0(%r8,%r10)
 sysc_tracego:
-       lm      %r3,%r6,SP_R3(%r15)
-       mvc     SP_ARGS(4,%r15),SP_R7(%r15)
-       l       %r2,SP_ORIG_R2(%r15)
-       basr    %r14,%r8                # call sys_xxx
-       st      %r2,SP_R2(%r15)         # store return value
+       lm      %r3,%r7,__PT_R3(%r11)
+       st      %r7,STACK_FRAME_OVERHEAD(%r15)
+       l       %r2,__PT_ORIG_GPR2(%r11)
+       basr    %r14,%r9                # call sys_xxx
+       st      %r2,__PT_R2(%r11)       # store return value
 sysc_tracenogo:
        tm      __TI_flags+2(%r12),_TIF_TRACE >> 8
-       bz      BASED(sysc_return)
+       jz      sysc_return
        l       %r1,BASED(.Ltrace_exit)
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lr      %r2,%r11                # pass pointer to pt_regs
        la      %r14,BASED(sysc_return)
-       br      %r1
+       br      %r1                     # call do_syscall_trace_exit
 
 #
 # a new process exits the kernel with ret_from_fork
 #
 ENTRY(ret_from_fork)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       l       %r12,__LC_THREAD_INFO
        l       %r13,__LC_SVC_NEW_PSW+4
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # forking a kernel thread ?
-       bo      BASED(0f)
-       st      %r15,SP_R15(%r15)       # store stack pointer for new kthread
-0:     l       %r1,BASED(.Lschedtail)
-       basr    %r14,%r1
+       tm      __PT_PSW+1(%r11),0x01   # forking a kernel thread ?
+       jo      0f
+       st      %r15,__PT_R15(%r11)     # store stack pointer for new kthread
+0:     l       %r1,BASED(.Lschedule_tail)
+       basr    %r14,%r1                # call schedule_tail
        TRACE_IRQS_ON
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       b       BASED(sysc_tracenogo)
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       j       sysc_tracenogo
 
 #
 # kernel_execve function needs to deal with pt_regs that is not
@@ -390,153 +346,98 @@ ENTRY(kernel_execve)
        stm     %r12,%r15,48(%r15)
        lr      %r14,%r15
        l       %r13,__LC_SVC_NEW_PSW+4
-       s       %r15,BASED(.Lc_spsize)
+       ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
        st      %r14,__SF_BACKCHAIN(%r15)
-       la      %r12,SP_PTREGS(%r15)
+       la      %r12,STACK_FRAME_OVERHEAD(%r15)
        xc      0(__PT_SIZE,%r12),0(%r12)
        l       %r1,BASED(.Ldo_execve)
        lr      %r5,%r12
-       basr    %r14,%r1
+       basr    %r14,%r1                # call do_execve
        ltr     %r2,%r2
-       be      BASED(0f)
-       a       %r15,BASED(.Lc_spsize)
+       je      0f
+       ahi     %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
        lm      %r12,%r15,48(%r15)
        br      %r14
        # execve succeeded.
-0:     stnsm   __SF_EMPTY(%r15),0xfc   # disable interrupts
+0:     ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        l       %r15,__LC_KERNEL_STACK  # load ksp
-       s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
-       mvc     SP_PTREGS(__PT_SIZE,%r15),0(%r12)       # copy pt_regs
+       ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       mvc     0(__PT_SIZE,%r11),0(%r12)       # copy pt_regs
        l       %r12,__LC_THREAD_INFO
        xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
        l       %r1,BASED(.Lexecve_tail)
-       basr    %r14,%r1
-       b       BASED(sysc_return)
+       basr    %r14,%r1                # call execve_tail
+       j       sysc_return
 
 /*
  * Program check handler routine
  */
 
 ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
        stpt    __LC_SYNC_ENTER_TIMER
-       SAVE_ALL_BASE __LC_SAVE_AREA
-       tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-       bnz     BASED(pgm_per)          # got per exception -> special case
-       SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     SP_PSW(8,%r15),__LC_PGM_OLD_PSW
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       bz      BASED(pgm_no_vtime)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime:
-       l       %r3,__LC_PGM_ILC        # load program interruption code
-       l       %r4,__LC_TRANS_EXC_CODE
-       REENABLE_IRQS
-       la      %r8,0x7f
-       nr      %r8,%r3
-       sll     %r8,2
-       l       %r1,BASED(.Ljump_table)
-       l       %r1,0(%r8,%r1)          # load address of handler routine
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       basr    %r14,%r1                # branch to interrupt-handler
-pgm_exit:
-       b       BASED(sysc_return)
-
-#
-# handle per exception
-#
-pgm_per:
-       tm      __LC_PGM_OLD_PSW,0x40   # test if per event recording is on
-       bnz     BASED(pgm_per_std)      # ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
-       clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
-       be      BASED(pgm_svcper)
-# no interesting special case, ignore PER event
-       lm      %r12,%r15,__LC_SAVE_AREA
-       lpsw    0x28
-
-#
-# Normal per exception
-#
-pgm_per_std:
-       SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     SP_PSW(8,%r15),__LC_PGM_OLD_PSW
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       bz      BASED(pgm_no_vtime2)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime2:
+       stm     %r8,%r15,__LC_SAVE_AREA_SYNC
+       l       %r12,__LC_THREAD_INFO
+       l       %r13,__LC_SVC_NEW_PSW+4
+       lm      %r8,%r9,__LC_PGM_OLD_PSW
+       tmh     %r8,0x0001              # test problem state bit
+       jnz     1f                      # -> fault in user space
+       tmh     %r8,0x4000              # PER bit set in old PSW ?
+       jnz     0f                      # -> enabled, can't be a double fault
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jnz     pgm_svcper              # -> single stepped svc
+0:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+       j       2f
+1:     UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+       l       %r15,__LC_KERNEL_STACK
+2:     ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       stm     %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+       stm     %r8,%r9,__PT_PSW(%r11)
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jz      0f
        l       %r1,__TI_task(%r12)
-       tm      SP_PSW+1(%r15),0x01     # kernel per event ?
-       bz      BASED(kernel_per)
-       mvc     __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+       tmh     %r8,0x0001              # kernel per event ?
+       jz      pgm_kprobe
+       oi      __TI_flags+3(%r12),_TIF_PER_TRAP
        mvc     __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
+       mvc     __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
        mvc     __THREAD_per_paid(1,%r1),__LC_PER_PAID
-       oi      __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
-       l       %r3,__LC_PGM_ILC        # load program interruption code
+0:     l       %r3,__LC_PGM_ILC        # load program interruption code
        l       %r4,__LC_TRANS_EXC_CODE
        REENABLE_IRQS
-       la      %r8,0x7f
-       nr      %r8,%r3                 # clear per-event-bit and ilc
-       be      BASED(pgm_exit2)        # only per or per+check ?
-       sll     %r8,2
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
        l       %r1,BASED(.Ljump_table)
-       l       %r1,0(%r8,%r1)          # load address of handler routine
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       la      %r10,0x7f
+       nr      %r10,%r3
+       je      sysc_return
+       sll     %r10,2
+       l       %r1,0(%r10,%r1)         # load address of handler routine
+       lr      %r2,%r11                # pass pointer to pt_regs
        basr    %r14,%r1                # branch to interrupt-handler
-pgm_exit2:
-       b       BASED(sysc_return)
+       j       sysc_return
 
 #
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
 #
-pgm_svcper:
-       SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       mvc     SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       l       %r8,__TI_task(%r12)
-       mvc     __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
-       mvc     __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
-       mvc     __THREAD_per_paid(1,%r8),__LC_PER_PAID
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       lm      %r2,%r6,SP_R2(%r15)     # load svc arguments
-       b       BASED(sysc_do_svc)
+pgm_kprobe:
+       REENABLE_IRQS
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       l       %r1,BASED(.Ldo_per_trap)
+       lr      %r2,%r11                # pass pointer to pt_regs
+       basr    %r14,%r1                # call do_per_trap
+       j       sysc_return
 
 #
-# per was called from kernel, must be kprobes
+# single stepped system call
 #
-kernel_per:
-       REENABLE_IRQS
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       l       %r1,BASED(.Lhandle_per) # load adr. of per handler
-       basr    %r14,%r1                # branch to do_single_step
-       b       BASED(pgm_exit)
+pgm_svcper:
+       oi      __TI_flags+3(%r12),_TIF_PER_TRAP
+       mvc     __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
+       mvc     __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
+       lpsw    __LC_RETURN_PSW         # branch to sysc_per and enable irqs
 
 /*
  * IO interrupt handler routine
@@ -545,28 +446,35 @@ kernel_per:
 ENTRY(io_int_handler)
        stck    __LC_INT_CLOCK
        stpt    __LC_ASYNC_ENTER_TIMER
-       SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
-       CREATE_STACK_FRAME __LC_SAVE_AREA+16
-       mvc     SP_PSW(8,%r15),0(%r12)  # move user PSW to stack
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       bz      BASED(io_no_vtime)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-io_no_vtime:
+       stm     %r8,%r15,__LC_SAVE_AREA_ASYNC
+       l       %r12,__LC_THREAD_INFO
+       l       %r13,__LC_SVC_NEW_PSW+4
+       lm      %r8,%r9,__LC_IO_OLD_PSW
+       tmh     %r8,0x0001              # interrupting from user ?
+       jz      io_skip
+       UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+io_skip:
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       stm     %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+       stm     %r8,%r9,__PT_PSW(%r11)
        TRACE_IRQS_OFF
-       l       %r1,BASED(.Ldo_IRQ)     # load address of do_IRQ
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       basr    %r14,%r1                # branch to standard irq handler
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       l       %r1,BASED(.Ldo_IRQ)
+       lr      %r2,%r11                # pass pointer to pt_regs
+       basr    %r14,%r1                # call do_IRQ
 io_return:
        LOCKDEP_SYS_EXIT
        TRACE_IRQS_ON
 io_tif:
        tm      __TI_flags+3(%r12),_TIF_WORK_INT
-       bnz     BASED(io_work)          # there is work to do (signals etc.)
+       jnz     io_work                 # there is work to do (signals etc.)
 io_restore:
-       RESTORE_ALL __LC_RETURN_PSW,0
+       mvc     __LC_RETURN_PSW(8),__PT_PSW(%r11)
+       ni      __LC_RETURN_PSW+1,0xfd  # clean wait state bit
+       stpt    __LC_EXIT_TIMER
+       lm      %r0,%r15,__PT_R0(%r11)
+       lpsw    __LC_RETURN_PSW
 io_done:
 
 #
@@ -577,28 +485,29 @@ io_done:
 # Before any work can be done, a switch to the kernel stack is required.
 #
 io_work:
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
-       bo      BASED(io_work_user)     # yes -> do resched & signal
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jo      io_work_user            # yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
        # check for preemptive scheduling
        icm     %r0,15,__TI_precount(%r12)
-       bnz     BASED(io_restore)       # preemption disabled
+       jnz     io_restore              # preemption disabled
        tm      __TI_flags+3(%r12),_TIF_NEED_RESCHED
-       bno     BASED(io_restore)
+       jno     io_restore
        # switch to kernel stack
-       l       %r1,SP_R15(%r15)
-       s       %r1,BASED(.Lc_spsize)
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       l       %r1,__PT_R15(%r11)
+       ahi     %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lr      %r15,%r1
        # TRACE_IRQS_ON already done at io_return, call
        # TRACE_IRQS_OFF to keep things symmetrical
        TRACE_IRQS_OFF
-       l       %r1,BASED(.Lpreempt_schedule_irq)
+       l       %r1,BASED(.Lpreempt_irq)
        basr    %r14,%r1                # call preempt_schedule_irq
-       b       BASED(io_return)
+       j       io_return
 #else
-       b       BASED(io_restore)
+       j       io_restore
 #endif
 
 #
@@ -606,9 +515,10 @@ io_work:
 #
 io_work_user:
        l       %r1,__LC_KERNEL_STACK
-       s       %r1,BASED(.Lc_spsize)
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       ahi     %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lr      %r15,%r1
 
 #
@@ -618,24 +528,24 @@ io_work_user:
 #
 io_work_tif:
        tm      __TI_flags+3(%r12),_TIF_MCCK_PENDING
-       bo      BASED(io_mcck_pending)
+       jo      io_mcck_pending
        tm      __TI_flags+3(%r12),_TIF_NEED_RESCHED
-       bo      BASED(io_reschedule)
+       jo      io_reschedule
        tm      __TI_flags+3(%r12),_TIF_SIGPENDING
-       bo      BASED(io_sigpending)
+       jo      io_sigpending
        tm      __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
-       bo      BASED(io_notify_resume)
-       b       BASED(io_return)        # beware of critical section cleanup
+       jo      io_notify_resume
+       j       io_return               # beware of critical section cleanup
 
 #
 # _TIF_MCCK_PENDING is set, call handler
 #
 io_mcck_pending:
        # TRACE_IRQS_ON already done at io_return
-       l       %r1,BASED(.Ls390_handle_mcck)
+       l       %r1,BASED(.Lhandle_mcck)
        basr    %r14,%r1                # TIF bit will be cleared by handler
        TRACE_IRQS_OFF
-       b       BASED(io_return)
+       j       io_return
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
@@ -643,37 +553,37 @@ io_mcck_pending:
 io_reschedule:
        # TRACE_IRQS_ON already done at io_return
        l       %r1,BASED(.Lschedule)
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
        basr    %r14,%r1                # call scheduler
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
-       b       BASED(io_return)
+       j       io_return
 
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
 io_sigpending:
        # TRACE_IRQS_ON already done at io_return
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
        l       %r1,BASED(.Ldo_signal)
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lr      %r2,%r11                # pass pointer to pt_regs
        basr    %r14,%r1                # call do_signal
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
-       b       BASED(io_return)
+       j       io_return
 
 #
 # _TIF_SIGPENDING is set, call do_signal
 #
 io_notify_resume:
        # TRACE_IRQS_ON already done at io_return
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
        l       %r1,BASED(.Ldo_notify_resume)
-       basr    %r14,%r1                # call do_signal
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lr      %r2,%r11                # pass pointer to pt_regs
+       basr    %r14,%r1                # call do_notify_resume
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
-       b       BASED(io_return)
+       j       io_return
 
 /*
  * External interrupt handler routine
@@ -682,23 +592,25 @@ io_notify_resume:
 ENTRY(ext_int_handler)
        stck    __LC_INT_CLOCK
        stpt    __LC_ASYNC_ENTER_TIMER
-       SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
-       CREATE_STACK_FRAME __LC_SAVE_AREA+16
-       mvc     SP_PSW(8,%r15),0(%r12)  # move user PSW to stack
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       bz      BASED(ext_no_vtime)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-ext_no_vtime:
+       stm     %r8,%r15,__LC_SAVE_AREA_ASYNC
+       l       %r12,__LC_THREAD_INFO
+       l       %r13,__LC_SVC_NEW_PSW+4
+       lm      %r8,%r9,__LC_EXT_OLD_PSW
+       tmh     %r8,0x0001              # interrupting from user ?
+       jz      ext_skip
+       UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ext_skip:
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       stm     %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+       stm     %r8,%r9,__PT_PSW(%r11)
        TRACE_IRQS_OFF
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       lr      %r2,%r11                # pass pointer to pt_regs
        l       %r3,__LC_CPU_ADDRESS    # get cpu address + interruption code
        l       %r4,__LC_EXT_PARAMS     # get external parameters
        l       %r1,BASED(.Ldo_extint)
-       basr    %r14,%r1
-       b       BASED(io_return)
+       basr    %r14,%r1                # call do_extint
+       j       io_return
 
 __critical_end:
 
@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler)
        stck    __LC_MCCK_CLOCK
        spt     __LC_CPU_TIMER_SAVE_AREA        # revalidate cpu timer
        lm      %r0,%r15,__LC_GPREGS_SAVE_AREA  # revalidate gprs
-       SAVE_ALL_BASE __LC_SAVE_AREA+32
-       la      %r12,__LC_MCK_OLD_PSW
+       l       %r12,__LC_THREAD_INFO
+       l       %r13,__LC_SVC_NEW_PSW+4
+       lm      %r8,%r9,__LC_MCK_OLD_PSW
        tm      __LC_MCCK_CODE,0x80     # system damage?
-       bo      BASED(mcck_int_main)    # yes -> rest of mcck code invalid
-       mvc     __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
+       jo      mcck_panic              # yes -> rest of mcck code invalid
+       la      %r14,__LC_CPU_TIMER_SAVE_AREA
+       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
        tm      __LC_MCCK_CODE+5,0x02   # stored cpu timer value valid?
-       bo      BASED(1f)
+       jo      3f
        la      %r14,__LC_SYNC_ENTER_TIMER
        clc     0(8,%r14),__LC_ASYNC_ENTER_TIMER
-       bl      BASED(0f)
+       jl      0f
        la      %r14,__LC_ASYNC_ENTER_TIMER
 0:     clc     0(8,%r14),__LC_EXIT_TIMER
-       bl      BASED(0f)
+       jl      1f
        la      %r14,__LC_EXIT_TIMER
-0:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
-       bl      BASED(0f)
+1:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
+       jl      2f
        la      %r14,__LC_LAST_UPDATE_TIMER
-0:     spt     0(%r14)
+2:     spt     0(%r14)
        mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
-1:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
-       bno     BASED(mcck_int_main)    # no -> skip cleanup critical
-       tm      __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
-       bnz     BASED(mcck_int_main)    # from user -> load async stack
-       clc     __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
-       bhe     BASED(mcck_int_main)
-       clc     __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
-       bl      BASED(mcck_int_main)
-       l       %r14,BASED(.Lcleanup_critical)
-       basr    %r14,%r14
-mcck_int_main:
-       l       %r14,__LC_PANIC_STACK   # are we already on the panic stack?
-       slr     %r14,%r15
-       sra     %r14,PAGE_SHIFT
-       be      BASED(0f)
-       l       %r15,__LC_PANIC_STACK   # load panic stack
-0:     s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
-       CREATE_STACK_FRAME __LC_SAVE_AREA+32
-       mvc     SP_PSW(8,%r15),0(%r12)
-       l       %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      __LC_MCCK_CODE+2,0x08   # mwp of old psw valid?
-       bno     BASED(mcck_no_vtime)    # no -> skip cleanup critical
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       bz      BASED(mcck_no_vtime)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
-mcck_no_vtime:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       l       %r1,BASED(.Ls390_mcck)
-       basr    %r14,%r1                # call machine check handler
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
-       bno     BASED(mcck_return)
+3:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
+       jno     mcck_panic              # no -> skip cleanup critical
+       tm      %r8,0x0001              # interrupting from user ?
+       jz      mcck_skip
+       UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+mcck_skip:
+       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
+       mvc     __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+       stm     %r8,%r9,__PT_PSW(%r11)
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       l       %r1,BASED(.Ldo_machine_check)
+       lr      %r2,%r11                # pass pointer to pt_regs
+       basr    %r14,%r1                # call s390_do_machine_check
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jno     mcck_return
        l       %r1,__LC_KERNEL_STACK   # switch to kernel stack
-       s       %r1,BASED(.Lc_spsize)
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       ahi     %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
        lr      %r15,%r1
-       stosm   __SF_EMPTY(%r15),0x04   # turn dat on
+       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
        tm      __TI_flags+3(%r12),_TIF_MCCK_PENDING
-       bno     BASED(mcck_return)
+       jno     mcck_return
        TRACE_IRQS_OFF
-       l       %r1,BASED(.Ls390_handle_mcck)
-       basr    %r14,%r1                # call machine check handler
+       l       %r1,BASED(.Lhandle_mcck)
+       basr    %r14,%r1                # call s390_handle_mcck
        TRACE_IRQS_ON
 mcck_return:
-       mvc     __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
+       mvc     __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
        ni      __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
        tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
-       bno     BASED(0f)
-       lm      %r0,%r15,SP_R0(%r15)    # load gprs 0-15
+       jno     0f
+       lm      %r0,%r15,__PT_R0(%r11)
        stpt    __LC_EXIT_TIMER
-       lpsw    __LC_RETURN_MCCK_PSW    # back to caller
-0:     lm      %r0,%r15,SP_R0(%r15)    # load gprs 0-15
-       lpsw    __LC_RETURN_MCCK_PSW    # back to caller
+       lpsw    __LC_RETURN_MCCK_PSW
+0:     lm      %r0,%r15,__PT_R0(%r11)
+       lpsw    __LC_RETURN_MCCK_PSW
 
-       RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+mcck_panic:
+       l       %r14,__LC_PANIC_STACK
+       slr     %r14,%r15
+       sra     %r14,PAGE_SHIFT
+       jz      0f
+       l       %r15,__LC_PANIC_STACK
+0:     ahi     %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       j       mcck_skip
 
 /*
  * Restart interruption handler, kick starter for additional CPUs
@@ -799,18 +703,18 @@ restart_base:
        stck    __LC_LAST_UPDATE_CLOCK
        mvc     __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
        mvc     __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-       l       %r15,__LC_SAVE_AREA+60  # load ksp
+       l       %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
        lctl    %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
        lam     %a0,%a15,__LC_AREGS_SAVE_AREA
-       lm      %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+       lm      %r6,%r15,__SF_GPRS(%r15)# load registers from clone
        l       %r1,__LC_THREAD_INFO
        mvc     __LC_USER_TIMER(8),__TI_user_timer(%r1)
        mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
        xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-       stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
+       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
        basr    %r14,0
        l       %r14,restart_addr-.(%r14)
-       basr    %r14,%r14               # branch to start_secondary
+       basr    %r14,%r14               # call start_secondary
 restart_addr:
        .long   start_secondary
        .align  8
@@ -835,19 +739,19 @@ restart_go:
 # PSW restart interrupt handler
 #
 ENTRY(psw_restart_int_handler)
-       st      %r15,__LC_SAVE_AREA+48(%r0)     # save r15
+       st      %r15,__LC_SAVE_AREA_RESTART
        basr    %r15,0
 0:     l       %r15,.Lrestart_stack-0b(%r15)   # load restart stack
        l       %r15,0(%r15)
-       ahi     %r15,-SP_SIZE                   # make room for pt_regs
-       stm     %r0,%r14,SP_R0(%r15)            # store gprs %r0-%r14 to stack
-       mvc     SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack
-       mvc     SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
-       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+       ahi     %r15,-__PT_SIZE                 # create pt_regs on stack
+       stm     %r0,%r14,__PT_R0(%r15)
+       mvc     __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
+       mvc     __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
+       ahi     %r15,-STACK_FRAME_OVERHEAD
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
        basr    %r14,0
 1:     l       %r14,.Ldo_restart-1b(%r14)
        basr    %r14,%r14
-
        basr    %r14,0                          # load disabled wait PSW if
 2:     lpsw    restart_psw_crash-2b(%r14)      # do_restart returns
        .align 4
@@ -869,215 +773,174 @@ restart_psw_crash:
  */
 stack_overflow:
        l       %r15,__LC_PANIC_STACK   # change to panic stack
-       sl      %r15,BASED(.Lc_spsize)
-       mvc     SP_PSW(8,%r15),0(%r12)  # move user PSW to stack
-       stm     %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
-       la      %r1,__LC_SAVE_AREA
-       ch      %r12,BASED(.L0x020)     # old psw addr == __LC_SVC_OLD_PSW ?
-       be      BASED(0f)
-       ch      %r12,BASED(.L0x028)     # old psw addr == __LC_PGM_OLD_PSW ?
-       be      BASED(0f)
-       la      %r1,__LC_SAVE_AREA+16
-0:     mvc     SP_R12(16,%r15),0(%r1)  # move %r12-%r15 to stack
-       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
-       l       %r1,BASED(1f)           # branch to kernel_stack_overflow
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       br      %r1
+       ahi     %r15,-__PT_SIZE         # create pt_regs
+       stm     %r0,%r7,__PT_R0(%r15)
+       stm     %r8,%r9,__PT_PSW(%r15)
+       mvc     __PT_R8(32,%r11),0(%r14)
+       lr      %r15,%r11
+       ahi     %r15,-STACK_FRAME_OVERHEAD
+       l       %r1,BASED(1f)
+       xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+       lr      %r2,%r11                # pass pointer to pt_regs
+       br      %r1                     # branch to kernel_stack_overflow
 1:     .long   kernel_stack_overflow
 #endif
 
-cleanup_table_system_call:
-       .long   system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_tif:
-       .long   sysc_tif + 0x80000000, sysc_restore + 0x80000000
-cleanup_table_sysc_restore:
-       .long   sysc_restore + 0x80000000, sysc_done + 0x80000000
-cleanup_table_io_tif:
-       .long   io_tif + 0x80000000, io_restore + 0x80000000
-cleanup_table_io_restore:
-       .long   io_restore + 0x80000000, io_done + 0x80000000
+cleanup_table:
+       .long   system_call + 0x80000000
+       .long   sysc_do_svc + 0x80000000
+       .long   sysc_tif + 0x80000000
+       .long   sysc_restore + 0x80000000
+       .long   sysc_done + 0x80000000
+       .long   io_tif + 0x80000000
+       .long   io_restore + 0x80000000
+       .long   io_done + 0x80000000
 
 cleanup_critical:
-       clc     4(4,%r12),BASED(cleanup_table_system_call)
-       bl      BASED(0f)
-       clc     4(4,%r12),BASED(cleanup_table_system_call+4)
-       bl      BASED(cleanup_system_call)
-0:
-       clc     4(4,%r12),BASED(cleanup_table_sysc_tif)
-       bl      BASED(0f)
-       clc     4(4,%r12),BASED(cleanup_table_sysc_tif+4)
-       bl      BASED(cleanup_sysc_tif)
-0:
-       clc     4(4,%r12),BASED(cleanup_table_sysc_restore)
-       bl      BASED(0f)
-       clc     4(4,%r12),BASED(cleanup_table_sysc_restore+4)
-       bl      BASED(cleanup_sysc_restore)
-0:
-       clc     4(4,%r12),BASED(cleanup_table_io_tif)
-       bl      BASED(0f)
-       clc     4(4,%r12),BASED(cleanup_table_io_tif+4)
-       bl      BASED(cleanup_io_tif)
-0:
-       clc     4(4,%r12),BASED(cleanup_table_io_restore)
-       bl      BASED(0f)
-       clc     4(4,%r12),BASED(cleanup_table_io_restore+4)
-       bl      BASED(cleanup_io_restore)
-0:
-       br      %r14
+       cl      %r9,BASED(cleanup_table)        # system_call
+       jl      0f
+       cl      %r9,BASED(cleanup_table+4)      # sysc_do_svc
+       jl      cleanup_system_call
+       cl      %r9,BASED(cleanup_table+8)      # sysc_tif
+       jl      0f
+       cl      %r9,BASED(cleanup_table+12)     # sysc_restore
+       jl      cleanup_sysc_tif
+       cl      %r9,BASED(cleanup_table+16)     # sysc_done
+       jl      cleanup_sysc_restore
+       cl      %r9,BASED(cleanup_table+20)     # io_tif
+       jl      0f
+       cl      %r9,BASED(cleanup_table+24)     # io_restore
+       jl      cleanup_io_tif
+       cl      %r9,BASED(cleanup_table+28)     # io_done
+       jl      cleanup_io_restore
+0:     br      %r14
 
 cleanup_system_call:
-       mvc     __LC_RETURN_PSW(8),0(%r12)
-       clc     __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
-       bh      BASED(0f)
-       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-       c       %r12,BASED(.Lmck_old_psw)
-       be      BASED(0f)
+       # check if stpt has been executed
+       cl      %r9,BASED(cleanup_system_call_insn)
+       jh      0f
        mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:     c       %r12,BASED(.Lmck_old_psw)
-       la      %r12,__LC_SAVE_AREA+32
-       be      BASED(0f)
-       la      %r12,__LC_SAVE_AREA+16
-0:     clc     __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
-       bhe     BASED(cleanup_vtime)
-       clc     __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
-       bh      BASED(0f)
-       mvc     __LC_SAVE_AREA(16),0(%r12)
-0:     st      %r13,4(%r12)
-       l       %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-       s       %r15,BASED(.Lc_spsize)  # make room for registers & psw
-       st      %r15,12(%r12)
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     0(4,%r12),__LC_THREAD_INFO
-       l       %r12,__LC_THREAD_INFO
-       mvc     SP_PSW(8,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+3(%r12),_TIF_SYSCALL
-cleanup_vtime:
-       clc     __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
-       bhe     BASED(cleanup_stime)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
-       clc     __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
-       bh      BASED(cleanup_update)
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+       chi     %r11,__LC_SAVE_AREA_ASYNC
+       je      0f
+       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:     # check if stm has been executed
+       cl      %r9,BASED(cleanup_system_call_insn+4)
+       jh      0f
+       mvc     __LC_SAVE_AREA_SYNC(32),0(%r11)
+0:     # set up saved registers r12, and r13
+       st      %r12,16(%r11)           # r12 thread-info pointer
+       st      %r13,20(%r11)           # r13 literal-pool pointer
+       # check if the user time calculation has been done
+       cl      %r9,BASED(cleanup_system_call_insn+8)
+       jh      0f
+       l       %r10,__LC_EXIT_TIMER
+       l       %r15,__LC_EXIT_TIMER+4
+       SUB64   %r10,%r15,__LC_SYNC_ENTER_TIMER
+       ADD64   %r10,%r15,__LC_USER_TIMER
+       st      %r10,__LC_USER_TIMER
+       st      %r15,__LC_USER_TIMER+4
+0:     # check if the system time calculation has been done
+       cl      %r9,BASED(cleanup_system_call_insn+12)
+       jh      0f
+       l       %r10,__LC_LAST_UPDATE_TIMER
+       l       %r15,__LC_LAST_UPDATE_TIMER+4
+       SUB64   %r10,%r15,__LC_EXIT_TIMER
+       ADD64   %r10,%r15,__LC_SYSTEM_TIMER
+       st      %r10,__LC_SYSTEM_TIMER
+       st      %r15,__LC_SYSTEM_TIMER+4
+0:     # update accounting time stamp
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       mvc     __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
-       la      %r12,__LC_RETURN_PSW
+       # set up saved register 11
+       l       %r15,__LC_KERNEL_STACK
+       ahi     %r15,-__PT_SIZE
+       st      %r15,12(%r11)           # r11 pt_regs pointer
+       # fill pt_regs
+       mvc     __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
+       stm     %r0,%r7,__PT_R0(%r15)
+       mvc     __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
+       mvc     __PT_SVC_CODE(4,%r15),__LC_SVC_ILC
+       # setup saved register 15
+       ahi     %r15,-STACK_FRAME_OVERHEAD
+       st      %r15,28(%r11)           # r15 stack pointer
+       # set new psw address and exit
+       l       %r9,BASED(cleanup_table+4)      # sysc_do_svc + 0x80000000
        br      %r14
 cleanup_system_call_insn:
-       .long   sysc_saveall + 0x80000000
        .long   system_call + 0x80000000
-       .long   sysc_vtime + 0x80000000
-       .long   sysc_stime + 0x80000000
-       .long   sysc_update + 0x80000000
+       .long   sysc_stm + 0x80000000
+       .long   sysc_vtime + 0x80000000 + 36
+       .long   sysc_vtime + 0x80000000 + 76
 
 cleanup_sysc_tif:
-       mvc     __LC_RETURN_PSW(4),0(%r12)
-       mvc     __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
-       la      %r12,__LC_RETURN_PSW
+       l       %r9,BASED(cleanup_table+8)      # sysc_tif + 0x80000000
        br      %r14
 
 cleanup_sysc_restore:
-       clc     4(4,%r12),BASED(cleanup_sysc_restore_insn)
-       be      BASED(2f)
-       mvc     __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-       c       %r12,BASED(.Lmck_old_psw)
-       be      BASED(0f)
-       mvc     __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:     clc     4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
-       be      BASED(2f)
-       mvc     __LC_RETURN_PSW(8),SP_PSW(%r15)
-       c       %r12,BASED(.Lmck_old_psw)
-       la      %r12,__LC_SAVE_AREA+32
-       be      BASED(1f)
-       la      %r12,__LC_SAVE_AREA+16
-1:     mvc     0(16,%r12),SP_R12(%r15)
-       lm      %r0,%r11,SP_R0(%r15)
-       l       %r15,SP_R15(%r15)
-2:     la      %r12,__LC_RETURN_PSW
+       cl      %r9,BASED(cleanup_sysc_restore_insn)
+       jhe     0f
+       l       %r9,12(%r11)            # get saved pointer to pt_regs
+       mvc     __LC_RETURN_PSW(8),__PT_PSW(%r9)
+       mvc     0(32,%r11),__PT_R8(%r9)
+       lm      %r0,%r7,__PT_R0(%r9)
+0:     lm      %r8,%r9,__LC_RETURN_PSW
        br      %r14
 cleanup_sysc_restore_insn:
        .long   sysc_done - 4 + 0x80000000
-       .long   sysc_done - 8 + 0x80000000
 
 cleanup_io_tif:
-       mvc     __LC_RETURN_PSW(4),0(%r12)
-       mvc     __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
-       la      %r12,__LC_RETURN_PSW
+       l       %r9,BASED(cleanup_table+20)     # io_tif + 0x80000000
        br      %r14
 
 cleanup_io_restore:
-       clc     4(4,%r12),BASED(cleanup_io_restore_insn)
-       be      BASED(1f)
-       mvc     __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-       clc     4(4,%r12),BASED(cleanup_io_restore_insn+4)
-       be      BASED(1f)
-       mvc     __LC_RETURN_PSW(8),SP_PSW(%r15)
-       mvc     __LC_SAVE_AREA+32(16),SP_R12(%r15)
-       lm      %r0,%r11,SP_R0(%r15)
-       l       %r15,SP_R15(%r15)
-1:     la      %r12,__LC_RETURN_PSW
+       cl      %r9,BASED(cleanup_io_restore_insn)
+       jhe     0f
+       l       %r9,12(%r11)            # get saved r11 pointer to pt_regs
+       mvc     __LC_RETURN_PSW(8),__PT_PSW(%r9)
+       ni      __LC_RETURN_PSW+1,0xfd  # clear wait state bit
+       mvc     0(32,%r11),__PT_R8(%r9)
+       lm      %r0,%r7,__PT_R0(%r9)
+0:     lm      %r8,%r9,__LC_RETURN_PSW
        br      %r14
 cleanup_io_restore_insn:
        .long   io_done - 4 + 0x80000000
-       .long   io_done - 8 + 0x80000000
 
 /*
  * Integer constants
  */
-               .align  4
-.Lc_spsize:    .long   SP_SIZE
-.Lc_overhead:  .long   STACK_FRAME_OVERHEAD
-.Lnr_syscalls: .long   NR_syscalls
-.L0x018:       .short  0x018
-.L0x020:       .short  0x020
-.L0x028:       .short  0x028
-.L0x030:       .short  0x030
-.L0x038:       .short  0x038
-.Lc_1:         .long   1
+       .align  4
+.Lnr_syscalls:         .long   NR_syscalls
 
 /*
  * Symbol constants
  */
-.Ls390_mcck:   .long   s390_do_machine_check
-.Ls390_handle_mcck:
-               .long   s390_handle_mcck
-.Lmck_old_psw: .long   __LC_MCK_OLD_PSW
-.Ldo_IRQ:      .long   do_IRQ
-.Ldo_extint:   .long   do_extint
-.Ldo_signal:   .long   do_signal
-.Ldo_notify_resume:
-               .long   do_notify_resume
-.Lhandle_per:  .long   do_per_trap
-.Ldo_execve:   .long   do_execve
-.Lexecve_tail: .long   execve_tail
-.Ljump_table:  .long   pgm_check_table
-.Lschedule:    .long   schedule
+.Ldo_machine_check:    .long   s390_do_machine_check
+.Lhandle_mcck:         .long   s390_handle_mcck
+.Ldo_IRQ:              .long   do_IRQ
+.Ldo_extint:           .long   do_extint
+.Ldo_signal:           .long   do_signal
+.Ldo_notify_resume:    .long   do_notify_resume
+.Ldo_per_trap:         .long   do_per_trap
+.Ldo_execve:           .long   do_execve
+.Lexecve_tail:         .long   execve_tail
+.Ljump_table:          .long   pgm_check_table
+.Lschedule:            .long   schedule
 #ifdef CONFIG_PREEMPT
-.Lpreempt_schedule_irq:
-               .long   preempt_schedule_irq
+.Lpreempt_irq:         .long   preempt_schedule_irq
 #endif
-.Ltrace_entry: .long   do_syscall_trace_enter
-.Ltrace_exit:  .long   do_syscall_trace_exit
-.Lschedtail:   .long   schedule_tail
-.Lsysc_table:  .long   sys_call_table
+.Ltrace_enter:         .long   do_syscall_trace_enter
+.Ltrace_exit:          .long   do_syscall_trace_exit
+.Lschedule_tail:       .long   schedule_tail
+.Lsys_call_table:      .long   sys_call_table
+.Lsysc_per:            .long   sysc_per + 0x80000000
 #ifdef CONFIG_TRACE_IRQFLAGS
-.Ltrace_irq_on_caller:
-               .long   trace_hardirqs_on_caller
-.Ltrace_irq_off_caller:
-               .long   trace_hardirqs_off_caller
+.Lhardirqs_on:         .long   trace_hardirqs_on_caller
+.Lhardirqs_off:                .long   trace_hardirqs_off_caller
 #endif
 #ifdef CONFIG_LOCKDEP
-.Llockdep_sys_exit:
-               .long   lockdep_sys_exit
+.Llockdep_sys_exit:    .long   lockdep_sys_exit
 #endif
-.Lcritical_start:
-               .long   __critical_start + 0x80000000
-.Lcritical_end:
-               .long   __critical_end + 0x80000000
-.Lcleanup_critical:
-               .long   cleanup_critical
+.Lcritical_start:      .long   __critical_start + 0x80000000
+.Lcritical_length:     .long   __critical_end - __critical_start
 
                .section .rodata, "a"
 #define SYSCALL(esa,esame,emu) .long esa
index 130fb02305c179ac14b6937c23b19c7019df3ac2..73845a9e587cba618750164d03333afc892c49d9 100644 (file)
@@ -1,3 +1,4 @@
+
 /*
  *  arch/s390/kernel/entry64.S
  *    S390 low-level entry points.
 #include <asm/unistd.h>
 #include <asm/page.h>
 
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS    = STACK_FRAME_OVERHEAD
-SP_ARGS      = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW      =  STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0       =  STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R2       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R3       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R4       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R5       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R6       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R7       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R8       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 64
-SP_R9       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 72
-SP_R10      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 80
-SP_R11      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 88
-SP_R12      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 96
-SP_R13      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 104
-SP_R14      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 112
-SP_R15      =  STACK_FRAME_OVERHEAD + __PT_GPRS + 120
-SP_ORIG_R2   = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE  = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE      = STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0      = __PT_GPRS
+__PT_R1      = __PT_GPRS + 8
+__PT_R2      = __PT_GPRS + 16
+__PT_R3      = __PT_GPRS + 24
+__PT_R4      = __PT_GPRS + 32
+__PT_R5      = __PT_GPRS + 40
+__PT_R6      = __PT_GPRS + 48
+__PT_R7      = __PT_GPRS + 56
+__PT_R8      = __PT_GPRS + 64
+__PT_R9      = __PT_GPRS + 72
+__PT_R10     = __PT_GPRS + 80
+__PT_R11     = __PT_GPRS + 88
+__PT_R12     = __PT_GPRS + 96
+__PT_R13     = __PT_GPRS + 104
+__PT_R14     = __PT_GPRS + 112
+__PT_R15     = __PT_GPRS + 120
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
@@ -59,154 +50,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
 
 #define BASED(name) name-system_call(%r13)
 
-       .macro SPP newpp
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-       tm      __LC_MACHINE_FLAGS+6,0x20       # MACHINE_FLAG_SPP
-       jz      .+8
-       .insn   s,0xb2800000,\newpp
-#endif
-       .endm
-
-       .macro  HANDLE_SIE_INTERCEPT
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-       tm      __TI_flags+6(%r12),_TIF_SIE>>8
-       jz      0f
-       SPP     BASED(.Lhost_id)                # set host id
-       clc     SP_PSW+8(8,%r15),BASED(.Lsie_loop)
-       jl      0f
-       clc     SP_PSW+8(8,%r15),BASED(.Lsie_done)
-       jhe     0f
-       mvc     SP_PSW+8(8,%r15),BASED(.Lsie_loop)
-0:
-#endif
-       .endm
-
-#ifdef CONFIG_TRACE_IRQFLAGS
        .macro  TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
        basr    %r2,%r0
        brasl   %r14,trace_hardirqs_on_caller
+#endif
        .endm
 
        .macro  TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
        basr    %r2,%r0
        brasl   %r14,trace_hardirqs_off_caller
-       .endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
 #endif
+       .endm
 
-#ifdef CONFIG_LOCKDEP
        .macro  LOCKDEP_SYS_EXIT
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
-       jz      0f
+#ifdef CONFIG_LOCKDEP
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jz      .+10
        brasl   %r14,lockdep_sys_exit
-0:
-       .endm
-#else
-#define LOCKDEP_SYS_EXIT
 #endif
-
-       .macro  UPDATE_VTIME lc_from,lc_to,lc_sum
-       lg      %r10,\lc_from
-       slg     %r10,\lc_to
-       alg     %r10,\lc_sum
-       stg     %r10,\lc_sum
        .endm
 
-/*
- * Register usage in interrupt handlers:
- *    R9  - pointer to current task structure
- *    R13 - pointer to literal pool
- *    R14 - return register for function calls
- *    R15 - kernel stack pointer
- */
+       .macro SPP newpp
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+       tm      __LC_MACHINE_FLAGS+6,0x20       # MACHINE_FLAG_SPP
+       jz      .+8
+       .insn   s,0xb2800000,\newpp
+#endif
+       .endm
 
-       .macro  SAVE_ALL_SVC psworg,savearea
-       stmg    %r11,%r15,\savearea
-       lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-       aghi    %r15,-SP_SIZE           # make room for registers & psw
-       lg      %r11,__LC_LAST_BREAK
+       .macro  HANDLE_SIE_INTERCEPT scratch
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+       tm      __TI_flags+6(%r12),_TIF_SIE>>8
+       jz      .+42
+       tm      __LC_MACHINE_FLAGS+6,0x20       # MACHINE_FLAG_SPP
+       jz      .+8
+       .insn   s,0xb2800000,BASED(.Lhost_id)   # set host id
+       lgr     \scratch,%r9
+       slg     \scratch,BASED(.Lsie_loop)
+       clg     \scratch,BASED(.Lsie_length)
+       jhe     .+10
+       lg      %r9,BASED(.Lsie_loop)
+#endif
        .endm
 
-       .macro  SAVE_ALL_PGM psworg,savearea
-       stmg    %r11,%r15,\savearea
-       tm      \psworg+1,0x01          # test problem state bit
+       .macro  CHECK_STACK stacksize,savearea
 #ifdef CONFIG_CHECK_STACK
-       jnz     1f
-       tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
-       jnz     2f
-       la      %r12,\psworg
-       j       stack_overflow
-#else
-       jz      2f
+       tml     %r15,\stacksize - CONFIG_STACK_GUARD
+       lghi    %r14,\savearea
+       jz      stack_overflow
 #endif
-1:     lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-2:     aghi    %r15,-SP_SIZE           # make room for registers & psw
-       larl    %r13,system_call
-       lg      %r11,__LC_LAST_BREAK
        .endm
 
-       .macro  SAVE_ALL_ASYNC psworg,savearea
-       stmg    %r11,%r15,\savearea
-       larl    %r13,system_call
-       lg      %r11,__LC_LAST_BREAK
-       la      %r12,\psworg
-       tm      \psworg+1,0x01          # test problem state bit
-       jnz     1f                      # from user -> load kernel stack
-       clc     \psworg+8(8),BASED(.Lcritical_end)
+       .macro  SWITCH_ASYNC savearea,stack,shift
+       tmhh    %r8,0x0001              # interrupting from user ?
+       jnz     1f
+       lgr     %r14,%r9
+       slg     %r14,BASED(.Lcritical_start)
+       clg     %r14,BASED(.Lcritical_length)
        jhe     0f
-       clc     \psworg+8(8),BASED(.Lcritical_start)
-       jl      0f
+       lghi    %r11,\savearea          # inside critical section, do cleanup
        brasl   %r14,cleanup_critical
-       tm      1(%r12),0x01            # retest problem state after cleanup
+       tmhh    %r8,0x0001              # retest problem state after cleanup
        jnz     1f
-0:     lg      %r14,__LC_ASYNC_STACK   # are we already on the async. stack ?
+0:     lg      %r14,\stack             # are we already on the target stack?
        slgr    %r14,%r15
-       srag    %r14,%r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
+       srag    %r14,%r14,\shift
        jnz     1f
-       tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
-       jnz     2f
-       j       stack_overflow
-#else
-       jz      2f
-#endif
-1:     lg      %r15,__LC_ASYNC_STACK   # load async stack
-2:     aghi    %r15,-SP_SIZE           # make room for registers & psw
-       .endm
-
-       .macro  CREATE_STACK_FRAME savearea
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       stg     %r2,SP_ORIG_R2(%r15)    # store original content of gpr 2
-       mvc     SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
-       stmg    %r0,%r10,SP_R0(%r15)    # store gprs %r0-%r10 to kernel stack
+       CHECK_STACK 1<<\shift,\savearea
+       j       2f
+1:     lg      %r15,\stack             # load target stack
+2:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
        .endm
 
-       .macro  RESTORE_ALL psworg,sync
-       mvc     \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
-       .if !\sync
-       ni      \psworg+1,0xfd          # clear wait state bit
-       .endif
-       lg      %r14,__LC_VDSO_PER_CPU
-       lmg     %r0,%r13,SP_R0(%r15)    # load gprs 0-13 of user
-       stpt    __LC_EXIT_TIMER
-       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-       lmg     %r14,%r15,SP_R14(%r15)  # load grps 14-15 of user
-       lpswe   \psworg                 # back to caller
+       .macro UPDATE_VTIME scratch,enter_timer
+       lg      \scratch,__LC_EXIT_TIMER
+       slg     \scratch,\enter_timer
+       alg     \scratch,__LC_USER_TIMER
+       stg     \scratch,__LC_USER_TIMER
+       lg      \scratch,__LC_LAST_UPDATE_TIMER
+       slg     \scratch,__LC_EXIT_TIMER
+       alg     \scratch,__LC_SYSTEM_TIMER
+       stg     \scratch,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),\enter_timer
        .endm
 
-       .macro  LAST_BREAK
-       srag    %r10,%r11,23
-       jz      0f
-       stg     %r11,__TI_last_break(%r12)
-0:
+       .macro  LAST_BREAK scratch
+       srag    \scratch,%r10,23
+       jz      .+10
+       stg     %r10,__TI_last_break(%r12)
        .endm
 
        .macro REENABLE_IRQS
-       mvc     __SF_EMPTY(1,%r15),SP_PSW(%r15)
-       ni      __SF_EMPTY(%r15),0xbf
-       ssm     __SF_EMPTY(%r15)
+       stg     %r8,__LC_RETURN_PSW
+       ni      __LC_RETURN_PSW,0xbf
+       ssm     __LC_RETURN_PSW
        .endm
 
        .section .kprobes.text, "ax"
@@ -245,55 +185,66 @@ __critical_start:
 
 ENTRY(system_call)
        stpt    __LC_SYNC_ENTER_TIMER
-sysc_saveall:
-       SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       mvc     SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+7(%r12),_TIF_SYSCALL
+sysc_stmg:
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+sysc_per:
+       lg      %r15,__LC_KERNEL_STACK
+       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
 sysc_vtime:
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       LAST_BREAK
+       UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+       LAST_BREAK %r13
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       mvc     __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+       mvc     __PT_SVC_CODE(4,%r11),__LC_SVC_ILC
 sysc_do_svc:
-       llgh    %r7,SP_SVC_CODE+2(%r15)
-       slag    %r7,%r7,2       # shift and test for svc 0
+       oi      __TI_flags+7(%r12),_TIF_SYSCALL
+       llgh    %r8,__PT_SVC_CODE+2(%r11)
+       slag    %r8,%r8,2                       # shift and test for svc 0
        jnz     sysc_nr_ok
        # svc 0: system call number in %r1
-       llgfr   %r1,%r1         # clear high word in r1
+       llgfr   %r1,%r1                         # clear high word in r1
        cghi    %r1,NR_syscalls
        jnl     sysc_nr_ok
-       sth     %r1,SP_SVC_CODE+2(%r15)
-       slag    %r7,%r1,2       # shift and test for svc 0
+       sth     %r1,__PT_SVC_CODE+2(%r11)
+       slag    %r8,%r1,2
 sysc_nr_ok:
-       larl    %r10,sys_call_table
+       larl    %r10,sys_call_table             # 64 bit system call table
 #ifdef CONFIG_COMPAT
-       tm      __TI_flags+5(%r12),(_TIF_31BIT>>16)  # running in 31 bit mode ?
+       tm      __TI_flags+5(%r12),(_TIF_31BIT>>16)
        jno     sysc_noemu
-       larl    %r10,sys_call_table_emu  # use 31 bit emulation system calls
+       larl    %r10,sys_call_table_emu         # 31 bit system call table
 sysc_noemu:
 #endif
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       stg     %r2,__PT_ORIG_GPR2(%r11)
+       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
+       lgf     %r9,0(%r8,%r10)                 # get system call add.
        tm      __TI_flags+6(%r12),_TIF_TRACE >> 8
-       mvc     SP_ARGS(8,%r15),SP_R7(%r15)
-       lgf     %r8,0(%r7,%r10) # load address of system call routine
        jnz     sysc_tracesys
-       basr    %r14,%r8        # call sys_xxxx
-       stg     %r2,SP_R2(%r15) # store return value (change R2 on stack)
+       basr    %r14,%r9                        # call sys_xxxx
+       stg     %r2,__PT_R2(%r11)               # store return value
 
 sysc_return:
        LOCKDEP_SYS_EXIT
 sysc_tif:
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
+       tm      __PT_PSW+1(%r11),0x01           # returning to user ?
        jno     sysc_restore
        tm      __TI_flags+7(%r12),_TIF_WORK_SVC
-       jnz     sysc_work       # there is work to do (signals etc.)
+       jnz     sysc_work                       # check for work
        ni      __TI_flags+7(%r12),255-_TIF_SYSCALL
 sysc_restore:
-       RESTORE_ALL __LC_RETURN_PSW,1
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+       lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_PSW
 sysc_done:
 
 #
@@ -317,7 +268,7 @@ sysc_work:
 #
 sysc_reschedule:
        larl    %r14,sysc_return
-       jg      schedule                # return point is sysc_return
+       jg      schedule
 
 #
 # _TIF_MCCK_PENDING is set, call handler
@@ -331,33 +282,33 @@ sysc_mcck_pending:
 #
 sysc_sigpending:
        ni      __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       brasl   %r14,do_signal          # call do_signal
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_signal
        tm      __TI_flags+7(%r12),_TIF_SYSCALL
        jno     sysc_return
-       lmg     %r2,%r6,SP_R2(%r15)     # load svc arguments
-       lghi    %r7,0                   # svc 0 returns -ENOSYS
-       lh      %r1,SP_SVC_CODE+2(%r15) # load new svc number
+       lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       lghi    %r8,0                   # svc 0 returns -ENOSYS
+       lh      %r1,__PT_SVC_CODE+2(%r11)       # load new svc number
        cghi    %r1,NR_syscalls
        jnl     sysc_nr_ok              # invalid svc number -> do svc 0
-       slag    %r7,%r1,2
+       slag    %r8,%r1,2
        j       sysc_nr_ok              # restart svc
 
 #
 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
 #
 sysc_notify_resume:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lgr     %r2,%r11                # pass pointer to pt_regs
        larl    %r14,sysc_return
-       jg      do_notify_resume        # call do_notify_resume
+       jg      do_notify_resume
 
 #
 # _TIF_PER_TRAP is set, call do_per_trap
 #
 sysc_singlestep:
        ni      __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       larl    %r14,sysc_return        # load adr. of system return
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,sysc_return
        jg      do_per_trap
 
 #
@@ -365,41 +316,41 @@ sysc_singlestep:
 # and after the system call
 #
 sysc_tracesys:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lgr     %r2,%r11                # pass pointer to pt_regs
        la      %r3,0
-       llgh    %r0,SP_SVC_CODE+2(%r15)
-       stg     %r0,SP_R2(%r15)
+       llgh    %r0,__PT_SVC_CODE+2(%r11)
+       stg     %r0,__PT_R2(%r11)
        brasl   %r14,do_syscall_trace_enter
        lghi    %r0,NR_syscalls
        clgr    %r0,%r2
        jnh     sysc_tracenogo
-       sllg    %r7,%r2,2               # svc number *4
-       lgf     %r8,0(%r7,%r10)
+       sllg    %r8,%r2,2
+       lgf     %r9,0(%r8,%r10)
 sysc_tracego:
-       lmg     %r3,%r6,SP_R3(%r15)
-       mvc     SP_ARGS(8,%r15),SP_R7(%r15)
-       lg      %r2,SP_ORIG_R2(%r15)
-       basr    %r14,%r8                # call sys_xxx
-       stg     %r2,SP_R2(%r15)         # store return value
+       lmg     %r3,%r7,__PT_R3(%r11)
+       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
+       lg      %r2,__PT_ORIG_GPR2(%r11)
+       basr    %r14,%r9                # call sys_xxx
+       stg     %r2,__PT_R2(%r11)       # store return value
 sysc_tracenogo:
        tm      __TI_flags+6(%r12),_TIF_TRACE >> 8
        jz      sysc_return
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       larl    %r14,sysc_return        # return point is sysc_return
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,sysc_return
        jg      do_syscall_trace_exit
 
 #
 # a new process exits the kernel with ret_from_fork
 #
 ENTRY(ret_from_fork)
-       lg      %r13,__LC_SVC_NEW_PSW+8
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      SP_PSW+1(%r15),0x01     # forking a kernel thread ?
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       lg      %r12,__LC_THREAD_INFO
+       tm      __PT_PSW+1(%r11),0x01   # forking a kernel thread ?
        jo      0f
-       stg     %r15,SP_R15(%r15)       # store stack pointer for new kthread
+       stg     %r15,__PT_R15(%r11)     # store stack pointer for new kthread
 0:     brasl   %r14,schedule_tail
        TRACE_IRQS_ON
-       stosm   24(%r15),0x03           # reenable interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
        j       sysc_tracenogo
 
 #
@@ -409,26 +360,26 @@ ENTRY(ret_from_fork)
 ENTRY(kernel_execve)
        stmg    %r12,%r15,96(%r15)
        lgr     %r14,%r15
-       aghi    %r15,-SP_SIZE
+       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
        stg     %r14,__SF_BACKCHAIN(%r15)
-       la      %r12,SP_PTREGS(%r15)
+       la      %r12,STACK_FRAME_OVERHEAD(%r15)
        xc      0(__PT_SIZE,%r12),0(%r12)
        lgr     %r5,%r12
        brasl   %r14,do_execve
        ltgfr   %r2,%r2
        je      0f
-       aghi    %r15,SP_SIZE
+       aghi    %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
        lmg     %r12,%r15,96(%r15)
        br      %r14
        # execve succeeded.
-0:     stnsm   __SF_EMPTY(%r15),0xfc   # disable interrupts
+0:     ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        lg      %r15,__LC_KERNEL_STACK  # load ksp
-       aghi    %r15,-SP_SIZE           # make room for registers & psw
-       lg      %r13,__LC_SVC_NEW_PSW+8
-       mvc     SP_PTREGS(__PT_SIZE,%r15),0(%r12)       # copy pt_regs
+       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       mvc     0(__PT_SIZE,%r11),0(%r12)       # copy pt_regs
        lg      %r12,__LC_THREAD_INFO
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
        brasl   %r14,execve_tail
        j       sysc_return
 
@@ -437,127 +388,72 @@ ENTRY(kernel_execve)
  */
 
 ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
        stpt    __LC_SYNC_ENTER_TIMER
-       tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-       jnz     pgm_per                  # got per exception -> special case
-       SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     SP_PSW(16,%r15),__LC_PGM_OLD_PSW
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       HANDLE_SIE_INTERCEPT
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       jz      pgm_no_vtime
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       LAST_BREAK
-pgm_no_vtime:
-       stg     %r11,SP_ARGS(%r15)
-       lgf     %r3,__LC_PGM_ILC        # load program interruption code
-       lg      %r4,__LC_TRANS_EXC_CODE
-       REENABLE_IRQS
-       lghi    %r8,0x7f
-       ngr     %r8,%r3
-       sll     %r8,3
-       larl    %r1,pgm_check_table
-       lg      %r1,0(%r8,%r1)          # load address of handler routine
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       basr    %r14,%r1                # branch to interrupt-handler
-pgm_exit:
-       j       sysc_return
-
-#
-# handle per exception
-#
-pgm_per:
-       tm      __LC_PGM_OLD_PSW,0x40   # test if per event recording is on
-       jnz     pgm_per_std             # ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
-       clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
-       je      pgm_svcper
-# no interesting special case, ignore PER event
-       lpswe   __LC_PGM_OLD_PSW
-
-#
-# Normal per exception
-#
-pgm_per_std:
-       SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     SP_PSW(16,%r15),__LC_PGM_OLD_PSW
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       HANDLE_SIE_INTERCEPT
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       jz      pgm_no_vtime2
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       LAST_BREAK
-pgm_no_vtime2:
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_PGM_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14
+       tmhh    %r8,0x0001              # test problem state bit
+       jnz     1f                      # -> fault in user space
+       tmhh    %r8,0x4000              # PER bit set in old PSW ?
+       jnz     0f                      # -> enabled, can't be a double fault
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jnz     pgm_svcper              # -> single stepped svc
+0:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+       j       2f
+1:     UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+       LAST_BREAK %r14
+       lg      %r15,__LC_KERNEL_STACK
+2:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       stg     %r10,__PT_ARGS(%r11)
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jz      0f
        lg      %r1,__TI_task(%r12)
-       tm      SP_PSW+1(%r15),0x01     # kernel per event ?
-       jz      kernel_per
-       mvc     __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+       tmhh    %r8,0x0001              # kernel per event ?
+       jz      pgm_kprobe
+       oi      __TI_flags+7(%r12),_TIF_PER_TRAP
        mvc     __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
+       mvc     __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
        mvc     __THREAD_per_paid(1,%r1),__LC_PER_PAID
-       oi      __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
-       lgf     %r3,__LC_PGM_ILC        # load program interruption code
+0:     lgf     %r3,__LC_PGM_ILC        # load program interruption code
        lg      %r4,__LC_TRANS_EXC_CODE
        REENABLE_IRQS
-       lghi    %r8,0x7f
-       ngr     %r8,%r3                 # clear per-event-bit and ilc
-       je      pgm_exit2
-       sll     %r8,3
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lghi    %r10,0x7f
+       ngr     %r10,%r3
+       je      sysc_return
+       sll     %r10,3
        larl    %r1,pgm_check_table
-       lg      %r1,0(%r8,%r1)          # load address of handler routine
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       lg      %r1,0(%r10,%r1)         # load address of handler routine
+       lgr     %r2,%r11                # pass pointer to pt_regs
        basr    %r14,%r1                # branch to interrupt-handler
-pgm_exit2:
        j       sysc_return
 
 #
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
 #
-pgm_svcper:
-       SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       mvc     SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       LAST_BREAK
-       lg      %r8,__TI_task(%r12)
-       mvc     __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
-       mvc     __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
-       mvc     __THREAD_per_paid(1,%r8),__LC_PER_PAID
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       lmg     %r2,%r6,SP_R2(%r15)     # load svc arguments
-       j       sysc_do_svc
+pgm_kprobe:
+       REENABLE_IRQS
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_per_trap
+       j       sysc_return
 
 #
-# per was called from kernel, must be kprobes
+# single stepped system call
 #
-kernel_per:
-       REENABLE_IRQS
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       brasl   %r14,do_per_trap
-       j       pgm_exit
+pgm_svcper:
+       oi      __TI_flags+7(%r12),_TIF_PER_TRAP
+       mvc     __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+       larl    %r14,sysc_per
+       stg     %r14,__LC_RETURN_PSW+8
+       lpswe   __LC_RETURN_PSW         # branch to sysc_per and enable irqs
 
 /*
  * IO interrupt handler routine
@@ -565,21 +461,25 @@ kernel_per:
 ENTRY(io_int_handler)
        stck    __LC_INT_CLOCK
        stpt    __LC_ASYNC_ENTER_TIMER
-       SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
-       CREATE_STACK_FRAME __LC_SAVE_AREA+40
-       mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       HANDLE_SIE_INTERCEPT
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       jz      io_no_vtime
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-       LAST_BREAK
-io_no_vtime:
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_IO_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       tmhh    %r8,0x0001              # interrupting from user?
+       jz      io_skip
+       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+       LAST_BREAK %r14
+io_skip:
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
        TRACE_IRQS_OFF
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
-       brasl   %r14,do_IRQ             # call standard irq handler
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_IRQ
 io_return:
        LOCKDEP_SYS_EXIT
        TRACE_IRQS_ON
@@ -587,7 +487,14 @@ io_tif:
        tm      __TI_flags+7(%r12),_TIF_WORK_INT
        jnz     io_work                 # there is work to do (signals etc.)
 io_restore:
-       RESTORE_ALL __LC_RETURN_PSW,0
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
+       ni      __LC_RETURN_PSW+1,0xfd  # clear wait state bit
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+       lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_PSW
 io_done:
 
 #
@@ -600,7 +507,7 @@ io_done:
 # Before any work can be done, a switch to the kernel stack is required.
 #
 io_work:
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
        jo      io_work_user            # yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
        # check for preemptive scheduling
@@ -609,10 +516,11 @@ io_work:
        tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
        jno     io_restore
        # switch to kernel stack
-       lg      %r1,SP_R15(%r15)
-       aghi    %r1,-SP_SIZE
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       lg      %r1,__PT_R15(%r11)
+       aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lgr     %r15,%r1
        # TRACE_IRQS_ON already done at io_return, call
        # TRACE_IRQS_OFF to keep things symmetrical
@@ -628,9 +536,10 @@ io_work:
 #
 io_work_user:
        lg      %r1,__LC_KERNEL_STACK
-       aghi    %r1,-SP_SIZE
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lgr     %r15,%r1
 
 #
@@ -663,9 +572,9 @@ io_mcck_pending:
 #
 io_reschedule:
        # TRACE_IRQS_ON already done at io_return
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
        brasl   %r14,schedule           # call scheduler
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
        j       io_return
 
@@ -674,10 +583,10 @@ io_reschedule:
 #
 io_sigpending:
        # TRACE_IRQS_ON already done at io_return
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       brasl   %r14,do_signal          # call do_signal
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_signal
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
        j       io_return
 
@@ -686,10 +595,10 @@ io_sigpending:
 #
 io_notify_resume:
        # TRACE_IRQS_ON already done at io_return
-       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
-       brasl   %r14,do_notify_resume   # call do_notify_resume
-       stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_notify_resume
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
        TRACE_IRQS_OFF
        j       io_return
 
@@ -699,21 +608,24 @@ io_notify_resume:
 ENTRY(ext_int_handler)
        stck    __LC_INT_CLOCK
        stpt    __LC_ASYNC_ENTER_TIMER
-       SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
-       CREATE_STACK_FRAME __LC_SAVE_AREA+40
-       mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       HANDLE_SIE_INTERCEPT
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       jz      ext_no_vtime
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-       LAST_BREAK
-ext_no_vtime:
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_EXT_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       tmhh    %r8,0x0001              # interrupting from user ?
+       jz      ext_skip
+       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+       LAST_BREAK %r14
+ext_skip:
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
        TRACE_IRQS_OFF
        lghi    %r1,4096
-       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       lgr     %r2,%r11                # pass pointer to pt_regs
        llgf    %r3,__LC_CPU_ADDRESS    # get cpu address + interruption code
        llgf    %r4,__LC_EXT_PARAMS     # get external parameter
        lg      %r5,__LC_EXT_PARAMS2-4096(%r1)  # get 64 bit external parameter
@@ -730,81 +642,77 @@ ENTRY(mcck_int_handler)
        la      %r1,4095                # revalidate r1
        spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # revalidate cpu timer
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
-       stmg    %r11,%r15,__LC_SAVE_AREA+80
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
        larl    %r13,system_call
-       lg      %r11,__LC_LAST_BREAK
-       la      %r12,__LC_MCK_OLD_PSW
+       lmg     %r8,%r9,__LC_MCK_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14
        tm      __LC_MCCK_CODE,0x80     # system damage?
-       jo      mcck_int_main           # yes -> rest of mcck code invalid
-       la      %r14,4095
-       mvc     __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
+       jo      mcck_panic              # yes -> rest of mcck code invalid
+       lghi    %r14,__LC_CPU_TIMER_SAVE_AREA
+       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
        tm      __LC_MCCK_CODE+5,0x02   # stored cpu timer value valid?
-       jo      1f
+       jo      3f
        la      %r14,__LC_SYNC_ENTER_TIMER
        clc     0(8,%r14),__LC_ASYNC_ENTER_TIMER
        jl      0f
        la      %r14,__LC_ASYNC_ENTER_TIMER
 0:     clc     0(8,%r14),__LC_EXIT_TIMER
-       jl      0f
+       jl      1f
        la      %r14,__LC_EXIT_TIMER
-0:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
-       jl      0f
+1:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
+       jl      2f
        la      %r14,__LC_LAST_UPDATE_TIMER
-0:     spt     0(%r14)
+2:     spt     0(%r14)
        mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
-1:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
-       jno     mcck_int_main           # no -> skip cleanup critical
-       tm      __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
-       jnz     mcck_int_main           # from user -> load kernel stack
-       clc     __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
-       jhe     mcck_int_main
-       clc     __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
-       jl      mcck_int_main
-       brasl   %r14,cleanup_critical
-mcck_int_main:
-       lg      %r14,__LC_PANIC_STACK   # are we already on the panic stack?
-       slgr    %r14,%r15
-       srag    %r14,%r14,PAGE_SHIFT
-       jz      0f
-       lg      %r15,__LC_PANIC_STACK   # load panic stack
-0:     aghi    %r15,-SP_SIZE           # make room for registers & psw
-       CREATE_STACK_FRAME __LC_SAVE_AREA+80
-       mvc     SP_PSW(16,%r15),0(%r12)
-       lg      %r12,__LC_THREAD_INFO   # load pointer to thread_info struct
-       tm      __LC_MCCK_CODE+2,0x08   # mwp of old psw valid?
-       jno     mcck_no_vtime           # no -> no timer update
-       HANDLE_SIE_INTERCEPT
-       tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
-       jz      mcck_no_vtime
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
-       LAST_BREAK
-mcck_no_vtime:
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+3:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
+       jno     mcck_panic              # no -> skip cleanup critical
+       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
+       tm      %r8,0x0001              # interrupting from user ?
+       jz      mcck_skip
+       UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+       LAST_BREAK %r14
+mcck_skip:
+       lghi    %r14,__LC_GPREGS_SAVE_AREA
+       mvc     __PT_R0(128,%r11),0(%r14)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
        brasl   %r14,s390_do_machine_check
-       tm      SP_PSW+1(%r15),0x01     # returning to user ?
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
        jno     mcck_return
        lg      %r1,__LC_KERNEL_STACK   # switch to kernel stack
-       aghi    %r1,-SP_SIZE
-       mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+       aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
        lgr     %r15,%r1
-       stosm   __SF_EMPTY(%r15),0x04   # turn dat on
+       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
        tm      __TI_flags+7(%r12),_TIF_MCCK_PENDING
        jno     mcck_return
        TRACE_IRQS_OFF
        brasl   %r14,s390_handle_mcck
        TRACE_IRQS_ON
 mcck_return:
-       mvc     __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
        ni      __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
-       lmg     %r0,%r15,SP_R0(%r15)    # load gprs 0-15
        tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
        jno     0f
        stpt    __LC_EXIT_TIMER
-0:     lpswe   __LC_RETURN_MCCK_PSW    # back to caller
-mcck_done:
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0:     lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_MCCK_PSW
+
+mcck_panic:
+       lg      %r14,__LC_PANIC_STACK
+       slgr    %r14,%r15
+       srag    %r14,%r14,PAGE_SHIFT
+       jz      0f
+       lg      %r15,__LC_PANIC_STACK
+0:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       j       mcck_skip
 
 /*
  * Restart interruption handler, kick starter for additional CPUs
@@ -818,17 +726,18 @@ restart_base:
        stck    __LC_LAST_UPDATE_CLOCK
        mvc     __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
        mvc     __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-       lg      %r15,__LC_SAVE_AREA+120 # load ksp
+       lghi    %r10,__LC_GPREGS_SAVE_AREA
+       lg      %r15,120(%r10)          # load ksp
        lghi    %r10,__LC_CREGS_SAVE_AREA
-       lctlg   %c0,%c15,0(%r10) # get new ctl regs
+       lctlg   %c0,%c15,0(%r10)        # get new ctl regs
        lghi    %r10,__LC_AREGS_SAVE_AREA
        lam     %a0,%a15,0(%r10)
-       lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+       lmg     %r6,%r15,__SF_GPRS(%r15)# load registers from clone
        lg      %r1,__LC_THREAD_INFO
        mvc     __LC_USER_TIMER(8),__TI_user_timer(%r1)
        mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
        xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-       stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
+       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
        brasl   %r14,start_secondary
        .align  8
 restart_vtime:
@@ -852,16 +761,16 @@ restart_go:
 # PSW restart interrupt handler
 #
 ENTRY(psw_restart_int_handler)
-       stg     %r15,__LC_SAVE_AREA+120(%r0)    # save r15
+       stg     %r15,__LC_SAVE_AREA_RESTART
        larl    %r15,restart_stack              # load restart stack
        lg      %r15,0(%r15)
-       aghi    %r15,-SP_SIZE                   # make room for pt_regs
-       stmg    %r0,%r14,SP_R0(%r15)            # store gprs %r0-%r14 to stack
-       mvc     SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack
-       mvc     SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+       aghi    %r15,-__PT_SIZE                 # create pt_regs on stack
+       stmg    %r0,%r14,__PT_R0(%r15)
+       mvc     __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+       mvc     __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+       aghi    %r15,-STACK_FRAME_OVERHEAD
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        brasl   %r14,do_restart
-
        larl    %r14,restart_psw_crash          # load disabled wait PSW if
        lpswe   0(%r14)                         # do_restart returns
        .align 8
@@ -877,172 +786,153 @@ restart_psw_crash:
  * Setup a pt_regs so that show_trace can provide a good call trace.
  */
 stack_overflow:
-       lg      %r15,__LC_PANIC_STACK   # change to panic stack
-       aghi    %r15,-SP_SIZE
-       mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
-       stmg    %r0,%r10,SP_R0(%r15)    # store gprs %r0-%r10 to kernel stack
-       la      %r1,__LC_SAVE_AREA
-       chi     %r12,__LC_SVC_OLD_PSW
-       je      0f
-       chi     %r12,__LC_PGM_OLD_PSW
-       je      0f
-       la      %r1,__LC_SAVE_AREA+40
-0:     mvc     SP_R11(40,%r15),0(%r1)  # move %r11-%r15 to stack
-       mvc     SP_ARGS(8,%r15),__LC_LAST_BREAK
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
-       la      %r2,SP_PTREGS(%r15)     # load pt_regs
+       lg      %r11,__LC_PANIC_STACK   # change to panic stack
+       aghi    %r11,-__PT_SIZE         # create pt_regs
+       stmg    %r0,%r7,__PT_R0(%r11)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_R8(64,%r11),0(%r14)
+       stg     %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+       lgr     %r15,%r11
+       aghi    %r15,-STACK_FRAME_OVERHEAD
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
        jg      kernel_stack_overflow
 #endif
 
-cleanup_table_system_call:
-       .quad   system_call, sysc_do_svc
-cleanup_table_sysc_tif:
-       .quad   sysc_tif, sysc_restore
-cleanup_table_sysc_restore:
-       .quad   sysc_restore, sysc_done
-cleanup_table_io_tif:
-       .quad   io_tif, io_restore
-cleanup_table_io_restore:
-       .quad   io_restore, io_done
+       .align  8
+cleanup_table:
+       .quad   system_call
+       .quad   sysc_do_svc
+       .quad   sysc_tif
+       .quad   sysc_restore
+       .quad   sysc_done
+       .quad   io_tif
+       .quad   io_restore
+       .quad   io_done
 
 cleanup_critical:
-       clc     8(8,%r12),BASED(cleanup_table_system_call)
+       clg     %r9,BASED(cleanup_table)        # system_call
        jl      0f
-       clc     8(8,%r12),BASED(cleanup_table_system_call+8)
+       clg     %r9,BASED(cleanup_table+8)      # sysc_do_svc
        jl      cleanup_system_call
-0:
-       clc     8(8,%r12),BASED(cleanup_table_sysc_tif)
+       clg     %r9,BASED(cleanup_table+16)     # sysc_tif
        jl      0f
-       clc     8(8,%r12),BASED(cleanup_table_sysc_tif+8)
+       clg     %r9,BASED(cleanup_table+24)     # sysc_restore
        jl      cleanup_sysc_tif
-0:
-       clc     8(8,%r12),BASED(cleanup_table_sysc_restore)
-       jl      0f
-       clc     8(8,%r12),BASED(cleanup_table_sysc_restore+8)
+       clg     %r9,BASED(cleanup_table+32)     # sysc_done
        jl      cleanup_sysc_restore
-0:
-       clc     8(8,%r12),BASED(cleanup_table_io_tif)
+       clg     %r9,BASED(cleanup_table+40)     # io_tif
        jl      0f
-       clc     8(8,%r12),BASED(cleanup_table_io_tif+8)
+       clg     %r9,BASED(cleanup_table+48)     # io_restore
        jl      cleanup_io_tif
-0:
-       clc     8(8,%r12),BASED(cleanup_table_io_restore)
-       jl      0f
-       clc     8(8,%r12),BASED(cleanup_table_io_restore+8)
+       clg     %r9,BASED(cleanup_table+56)     # io_done
        jl      cleanup_io_restore
-0:
-       br      %r14
+0:     br      %r14
+
 
 cleanup_system_call:
-       mvc     __LC_RETURN_PSW(16),0(%r12)
-       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+       # check if stpt has been executed
+       clg     %r9,BASED(cleanup_system_call_insn)
        jh      0f
-       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-       cghi    %r12,__LC_MCK_OLD_PSW
-       je      0f
        mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:     cghi    %r12,__LC_MCK_OLD_PSW
-       la      %r12,__LC_SAVE_AREA+80
+       cghi    %r11,__LC_SAVE_AREA_ASYNC
        je      0f
-       la      %r12,__LC_SAVE_AREA+40
-0:     clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
-       jhe     cleanup_vtime
-       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:     # check if stmg has been executed
+       clg     %r9,BASED(cleanup_system_call_insn+8)
        jh      0f
-       mvc     __LC_SAVE_AREA(40),0(%r12)
-0:     lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
-       aghi    %r15,-SP_SIZE           # make room for registers & psw
-       stg     %r15,32(%r12)
-       stg     %r11,0(%r12)
-       CREATE_STACK_FRAME __LC_SAVE_AREA
-       mvc     8(8,%r12),__LC_THREAD_INFO
-       lg      %r12,__LC_THREAD_INFO
-       mvc     SP_PSW(16,%r15),__LC_SVC_OLD_PSW
-       mvc     SP_SVC_CODE(4,%r15),__LC_SVC_ILC
-       oi      __TI_flags+7(%r12),_TIF_SYSCALL
-cleanup_vtime:
-       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
-       jhe     cleanup_stime
-       UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
-       clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
-       jh      cleanup_update
-       UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+       mvc     __LC_SAVE_AREA_SYNC(64),0(%r11)
+0:     # check if base register setup + TIF bit load has been done
+       clg     %r9,BASED(cleanup_system_call_insn+16)
+       jhe     0f
+       # set up saved registers r10 and r12
+       stg     %r10,16(%r11)           # r10 last break
+       stg     %r12,32(%r11)           # r12 thread-info pointer
+0:     # check if the user time update has been done
+       clg     %r9,BASED(cleanup_system_call_insn+24)
+       jh      0f
+       lg      %r15,__LC_EXIT_TIMER
+       slg     %r15,__LC_SYNC_ENTER_TIMER
+       alg     %r15,__LC_USER_TIMER
+       stg     %r15,__LC_USER_TIMER
+0:     # check if the system time update has been done
+       clg     %r9,BASED(cleanup_system_call_insn+32)
+       jh      0f
+       lg      %r15,__LC_LAST_UPDATE_TIMER
+       slg     %r15,__LC_EXIT_TIMER
+       alg     %r15,__LC_SYSTEM_TIMER
+       stg     %r15,__LC_SYSTEM_TIMER
+0:     # update accounting time stamp
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       srag    %r12,%r11,23
-       lg      %r12,__LC_THREAD_INFO
+       # do LAST_BREAK
+       lg      %r9,16(%r11)
+       srag    %r9,%r9,23
        jz      0f
-       stg     %r11,__TI_last_break(%r12)
-0:     mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
-       la      %r12,__LC_RETURN_PSW
+       mvc     __TI_last_break(8,%r12),16(%r11)
+0:     # set up saved register r11
+       lg      %r15,__LC_KERNEL_STACK
+       aghi    %r15,-__PT_SIZE
+       stg     %r15,24(%r11)           # r11 pt_regs pointer
+       # fill pt_regs
+       mvc     __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
+       stmg    %r0,%r7,__PT_R0(%r15)
+       mvc     __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
+       mvc     __PT_SVC_CODE(4,%r15),__LC_SVC_ILC
+       # setup saved register r15
+       aghi    %r15,-STACK_FRAME_OVERHEAD
+       stg     %r15,56(%r11)           # r15 stack pointer
+       # set new psw address and exit
+       larl    %r9,sysc_do_svc
        br      %r14
 cleanup_system_call_insn:
-       .quad   sysc_saveall
        .quad   system_call
-       .quad   sysc_vtime
-       .quad   sysc_stime
-       .quad   sysc_update
+       .quad   sysc_stmg
+       .quad   sysc_per
+       .quad   sysc_vtime+18
+       .quad   sysc_vtime+42
 
 cleanup_sysc_tif:
-       mvc     __LC_RETURN_PSW(8),0(%r12)
-       mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
-       la      %r12,__LC_RETURN_PSW
+       larl    %r9,sysc_tif
        br      %r14
 
 cleanup_sysc_restore:
-       clc     8(8,%r12),BASED(cleanup_sysc_restore_insn)
-       je      2f
-       clc     8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
-       jhe     0f
-       mvc     __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-       cghi    %r12,__LC_MCK_OLD_PSW
+       clg     %r9,BASED(cleanup_sysc_restore_insn)
        je      0f
-       mvc     __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0:     mvc     __LC_RETURN_PSW(16),SP_PSW(%r15)
-       cghi    %r12,__LC_MCK_OLD_PSW
-       la      %r12,__LC_SAVE_AREA+80
-       je      1f
-       la      %r12,__LC_SAVE_AREA+40
-1:     mvc     0(40,%r12),SP_R11(%r15)
-       lmg     %r0,%r10,SP_R0(%r15)
-       lg      %r15,SP_R15(%r15)
-2:     la      %r12,__LC_RETURN_PSW
+       lg      %r9,24(%r11)            # get saved pointer to pt_regs
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+0:     lmg     %r8,%r9,__LC_RETURN_PSW
        br      %r14
 cleanup_sysc_restore_insn:
        .quad   sysc_done - 4
-       .quad   sysc_done - 16
 
 cleanup_io_tif:
-       mvc     __LC_RETURN_PSW(8),0(%r12)
-       mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
-       la      %r12,__LC_RETURN_PSW
+       larl    %r9,io_tif
        br      %r14
 
 cleanup_io_restore:
-       clc     8(8,%r12),BASED(cleanup_io_restore_insn)
-       je      1f
-       clc     8(8,%r12),BASED(cleanup_io_restore_insn+8)
-       jhe     0f
-       mvc     __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0:     mvc     __LC_RETURN_PSW(16),SP_PSW(%r15)
-       mvc     __LC_SAVE_AREA+80(40),SP_R11(%r15)
-       lmg     %r0,%r10,SP_R0(%r15)
-       lg      %r15,SP_R15(%r15)
-1:     la      %r12,__LC_RETURN_PSW
+       clg     %r9,BASED(cleanup_io_restore_insn)
+       je      0f
+       lg      %r9,24(%r11)            # get saved r11 pointer to pt_regs
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
+       ni      __LC_RETURN_PSW+1,0xfd  # clear wait state bit
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+0:     lmg     %r8,%r9,__LC_RETURN_PSW
        br      %r14
 cleanup_io_restore_insn:
        .quad   io_done - 4
-       .quad   io_done - 16
 
 /*
  * Integer constants
  */
-               .align  4
+       .align  8
 .Lcritical_start:
-               .quad   __critical_start
-.Lcritical_end:
-               .quad   __critical_end
+       .quad   __critical_start
+.Lcritical_length:
+       .quad   __critical_end - __critical_start
+
 
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
 /*
@@ -1094,8 +984,8 @@ sie_fault:
        .align  8
 .Lsie_loop:
        .quad   sie_loop
-.Lsie_done:
-       .quad   sie_done
+.Lsie_length:
+       .quad   sie_done - sie_loop
 .Lhost_id:
        .quad   0
 
index 900068d2bf929d3222ed9f82aceaccc2edb18660..c27a0727f9304cecf518aa310c70c33260fc3245 100644 (file)
@@ -329,8 +329,8 @@ iplstart:
 #
 # reset files in VM reader
 #
-       stidp   __LC_SAVE_AREA          # store cpuid
-       tm      __LC_SAVE_AREA,0xff     # running VM ?
+       stidp   __LC_SAVE_AREA_SYNC     # store cpuid
+       tm      __LC_SAVE_AREA_SYNC,0xff# running VM ?
        bno     .Lnoreset
        la      %r2,.Lreset
        lhi     %r3,26
index 732a793ec53a65a173d17245cfeadd0bc970fa24..36b32658fb241146d86d672fd498781b3180e441 100644 (file)
 #
 ENTRY(store_status)
        /* Save register one and load save area base */
-       stg     %r1,__LC_SAVE_AREA+120(%r0)
+       stg     %r1,__LC_SAVE_AREA_RESTART
        lghi    %r1,SAVE_AREA_BASE
        /* General purpose registers */
        stmg    %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       lg      %r2,__LC_SAVE_AREA+120(%r0)
+       lg      %r2,__LC_SAVE_AREA_RESTART
        stg     %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
        /* Control registers */
        stctg   %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
index 9cf01e455e50daf701fc1596199ce262ec9d7777..109e7422bb207f81fdce012d9245791ebc888794 100644 (file)
@@ -654,7 +654,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
                                     - sizeof(struct stack_frame));
        memset(sf, 0, sizeof(struct stack_frame));
        sf->gprs[9] = (unsigned long) sf;
-       cpu_lowcore->save_area[15] = (unsigned long) sf;
+       cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
        __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
        atomic_inc(&init_mm.context.attach_count);
        asm volatile(