From 5276597a467415e7d56034ba587925385f78662c Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 4 May 2012 17:20:44 +0100 Subject: [PATCH] ARM: kernel: fix MPIDR cpu_{suspend}/{resume} usage The current version of cpu_{suspend}/{resume} relies on the 8 LSBs of the MPIDR register to index the context pointer saved and restored on CPU shutdown. This approach breaks as soon as platforms with populated MPIDR affinity levels 1 and 2 are deployed, since the MPIDR cannot be considered a linear index anymore. There are multiple solutions to this problem, each with pros and cons. This patch changes cpu_{suspend}/{resume} so that the CPU logical id is used to retrieve an index into the context pointers array. Performance is impacted on both save and restore paths. On save path the CPU logical id has to be retrieved from thread_info; since caches are on, the performance hit should be neglectable. In the resume code path the MMU is off and so are the caches. The patch adds a trivial for loop that polls the cpu_logical_map array scanning the present MPIDRs and retrieves the actual CPU logical index. Since everything runs out of strongly ordered memory the perfomance hit in the resume code path must be measured and thought over; it worsens as the number of CPUs increases since it is a linear search (but can be improved). On the up side, the logical index approach is by far the easiest solution in terms of coding and make dynamic changes to the cpu mapping trivial at run-time. Any change to the cpu_logical_map (ie in-kernel switcher) at run time must be cleaned from the caches since this data has to be retrieved with the MMU off, when caches are not searched. Tested on TC2 and fast models. --- arch/arm/kernel/sleep.S | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 987dcf33415c..c8952daf4104 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -4,6 +4,7 @@ #include #include #include +#include "entry-header.S" .text /* @@ -30,9 +31,8 @@ ENTRY(__cpu_suspend) mov r2, r5 @ virtual SP ldr r3, =sleep_save_sp #ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, lr, c0, c0, 5) - ALT_UP(mov lr, #0) - and lr, lr, #15 + get_thread_info r5 + ldr lr, [r5, #TI_CPU] @ cpu logical index add r3, r3, lr, lsl #2 #endif bl __cpu_suspend_save @@ -82,10 +82,13 @@ ENDPROC(cpu_resume_after_mmu) .align ENTRY(cpu_resume) #ifdef CONFIG_SMP + mov r1, #0 @ fall-back logical index for UP + ALT_SMP(mrc p15, 0, r0, c0, c0, 5) + ALT_UP_B(1f) + bfc r0, #24, #8 + bl cpu_logical_index @ return logical index in r1 +1: adr r0, sleep_save_sp - ALT_SMP(mrc p15, 0, r1, c0, c0, 5) - ALT_UP(mov r1, #0) - and r1, r1, #15 ldr r0, [r0, r1, lsl #2] @ stack phys addr #else ldr r0, sleep_save_sp @ stack phys addr @@ -102,3 +105,20 @@ sleep_save_sp: .rept CONFIG_NR_CPUS .long 0 @ preserve stack phys ptr here .endr + +#ifdef CONFIG_SMP +cpu_logical_index: + adr r3, cpu_map_ptr + ldr r2, [r3] + add r3, r3, r2 @ virt_to_phys(__cpu_logical_map) + mov r1, #0 +1: + ldr r2, [r3, r1, lsl #2] + cmp r2, r0 + moveq pc, lr + add r1, r1, #1 + b 1b + +cpu_map_ptr: + .long __cpu_logical_map - . +#endif -- 2.34.1