From: Alex Shi <alex.shi@linaro.org>
Date: Mon, 12 Dec 2016 14:16:26 +0000 (+0800)
Subject: Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4
X-Git-Tag: firefly_0821_release~176^2~4^2~9
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=a057484ab40ff81f22a94bb62c035c78b5abd940;p=firefly-linux-kernel-4.4.55.git

Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4

Conflicts:
	also change cpu_enable_uao in arch/arm64/include/asm/processor.h
	comment unmatch fixed in arch/arm64/kernel/suspend.c
---

a057484ab40ff81f22a94bb62c035c78b5abd940
diff --cc arch/arm64/include/asm/processor.h
index cef1cf398356,d08559528927..4be934fde409
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@@ -190,7 -186,6 +190,7 @@@ static inline void spin_lock_prefetch(c
  
  #endif
  
- void cpu_enable_pan(void *__unused);
- void cpu_enable_uao(void *__unused);
+ int cpu_enable_pan(void *__unused);
++int cpu_enable_uao(void *__unused);
  
  #endif /* __ASM_PROCESSOR_H */
diff --cc arch/arm64/kernel/cpufeature.c
index 24ecbeb733ed,2735bf814592..eda7d5915fbb
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@@ -821,9 -764,15 +823,15 @@@ enable_cpu_capabilities(const struct ar
  {
  	int i;
  
 -	for (i = 0; caps[i].desc; i++)
 +	for (i = 0; caps[i].matches; i++)
  		if (caps[i].enable && cpus_have_cap(caps[i].capability))
- 			on_each_cpu(caps[i].enable, NULL, true);
+ 			/*
+ 			 * Use stop_machine() as it schedules the work allowing
+ 			 * us to modify PSTATE, instead of on_each_cpu() which
+ 			 * uses an IPI, giving us a PSTATE that disappears when
+ 			 * we return.
+ 			 */
+ 			stop_machine(caps[i].enable, NULL, cpu_online_mask);
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
diff --cc arch/arm64/kernel/suspend.c
index b616e365cee3,00c1372bf57b..5a0b1088c17c
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@@ -83,21 -80,52 +85,26 @@@ int cpu_suspend(unsigned long arg, int 
  	 */
  	pause_graph_tracing();
  
 -	/*
 -	 * mm context saved on the stack, it will be restored when
 -	 * the cpu comes out of reset through the identity mapped
 -	 * page tables, so that the thread address space is properly
 -	 * set-up on function return.
 -	 */
 -	ret = __cpu_suspend_enter(arg, fn);
 -	if (ret == 0) {
 -		/*
 -		 * We are resuming from reset with TTBR0_EL1 set to the
 -		 * idmap to enable the MMU; set the TTBR0 to the reserved
 -		 * page tables to prevent speculative TLB allocations, flush
 -		 * the local tlb and set the default tcr_el1.t0sz so that
 -		 * the TTBR0 address space set-up is properly restored.
 -		 * If the current active_mm != &init_mm we entered cpu_suspend
 -		 * with mappings in TTBR0 that must be restored, so we switch
 -		 * them back to complete the address space configuration
 -		 * restoration before returning.
 -		 */
 -		cpu_set_reserved_ttbr0();
 -		local_flush_tlb_all();
 -		cpu_set_default_tcr_t0sz();
 -
 -		if (mm != &init_mm)
 -			cpu_switch_mm(mm->pgd, mm);
 -
 -		/*
 -		 * Restore per-cpu offset before any kernel
 -		 * subsystem relying on it has a chance to run.
 -		 */
 -		set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 +	if (__cpu_suspend_enter(&state)) {
 +		/* Call the suspend finisher */
 +		ret = fn(arg);
  
  		/*
- 		 * Never gets here, unless the suspend finisher fails.
- 		 * Successful cpu_suspend() should return from cpu_resume(),
- 		 * returning through this code path is considered an error
- 		 * If the return value is set to 0 force ret = -EOPNOTSUPP
- 		 * to make sure a proper error condition is propagated
+ 		 * PSTATE was not saved over suspend/resume, re-enable any
+ 		 * detected features that might not have been set correctly.
+ 		 */
+ 		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ 				CONFIG_ARM64_PAN));
+ 
+ 		/*
+ 		 * Restore HW breakpoint registers to sane values
+ 		 * before debug exceptions are possibly reenabled
+ 		 * through local_dbg_restore.
  		 */
 -		if (hw_breakpoint_restore)
 -			hw_breakpoint_restore(NULL);
 +		if (!ret)
 +			ret = -EOPNOTSUPP;
 +	} else {
 +		__cpu_suspend_exit();
  	}
  
  	unpause_graph_tracing();
diff --cc arch/arm64/mm/fault.c
index 6c16e4963b39,247bae758e1e..18e5a2c3d554
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@@ -638,24 -606,18 +640,32 @@@ asmlinkage int __exception do_debug_exc
  
  	return 0;
  }
 +NOKPROBE_SYMBOL(do_debug_exception);
  
  #ifdef CONFIG_ARM64_PAN
- void cpu_enable_pan(void *__unused)
+ int cpu_enable_pan(void *__unused)
  {
+ 	/*
+ 	 * We modify PSTATE. This won't work from irq context as the PSTATE
+ 	 * is discarded once we return from the exception.
+ 	 */
+ 	WARN_ON_ONCE(in_interrupt());
+ 
  	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ 	asm(SET_PSTATE_PAN(1));
+ 	return 0;
  }
  #endif /* CONFIG_ARM64_PAN */
 +
 +#ifdef CONFIG_ARM64_UAO
 +/*
 + * Kernel threads have fs=KERNEL_DS by default, and don't need to call
 + * set_fs(), devtmpfs in particular relies on this behaviour.
 + * We need to enable the feature at runtime (instead of adding it to
 + * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
 + */
- void cpu_enable_uao(void *__unused)
++int cpu_enable_uao(void *__unused)
 +{
 +	asm(SET_PSTATE_UAO(1));
 +}
 +#endif /* CONFIG_ARM64_UAO */