Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4
authorAlex Shi <alex.shi@linaro.org>
Mon, 12 Dec 2016 14:16:26 +0000 (22:16 +0800)
committerAlex Shi <alex.shi@linaro.org>
Mon, 12 Dec 2016 14:16:26 +0000 (22:16 +0800)
Conflicts:
also change cpu_enable_uao in arch/arm64/include/asm/processor.h
comment unmatch fixed in arch/arm64/kernel/suspend.c

1  2 
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/suspend.c
arch/arm64/mm/fault.c

Simple merge
index cef1cf398356f1f61ceea854fc564f6df1d316cc,d085595289271bbcc663c70129ab7649de1dd3a8..4be934fde40906cac5d3b6e6f2c34ea8f35d695a
@@@ -190,7 -186,6 +190,7 @@@ static inline void spin_lock_prefetch(c
  
  #endif
  
- void cpu_enable_pan(void *__unused);
- void cpu_enable_uao(void *__unused);
+ int cpu_enable_pan(void *__unused);
++int cpu_enable_uao(void *__unused);
  
  #endif /* __ASM_PROCESSOR_H */
index 24ecbeb733ed354f24be2d11fcfb9eba3ddd939c,2735bf8145926c5c8c03f2d58a7ec08646bcee2e..eda7d5915fbb209bf4057cd1f621beadd8c02606
@@@ -821,9 -764,15 +823,15 @@@ enable_cpu_capabilities(const struct ar
  {
        int i;
  
 -      for (i = 0; caps[i].desc; i++)
 +      for (i = 0; caps[i].matches; i++)
                if (caps[i].enable && cpus_have_cap(caps[i].capability))
-                       on_each_cpu(caps[i].enable, NULL, true);
+                       /*
+                        * Use stop_machine() as it schedules the work allowing
+                        * us to modify PSTATE, instead of on_each_cpu() which
+                        * uses an IPI, giving us a PSTATE that disappears when
+                        * we return.
+                        */
+                       stop_machine(caps[i].enable, NULL, cpu_online_mask);
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
index b616e365cee33fdb2e61520943e2d8a9ae372861,00c1372bf57ba19e74d6bb87cc0fdc8e6ead8837..5a0b1088c17c0fa0d9998ee03a67791d111b2e75
@@@ -83,21 -80,52 +85,26 @@@ int cpu_suspend(unsigned long arg, int 
         */
        pause_graph_tracing();
  
 -      /*
 -       * mm context saved on the stack, it will be restored when
 -       * the cpu comes out of reset through the identity mapped
 -       * page tables, so that the thread address space is properly
 -       * set-up on function return.
 -       */
 -      ret = __cpu_suspend_enter(arg, fn);
 -      if (ret == 0) {
 -              /*
 -               * We are resuming from reset with TTBR0_EL1 set to the
 -               * idmap to enable the MMU; set the TTBR0 to the reserved
 -               * page tables to prevent speculative TLB allocations, flush
 -               * the local tlb and set the default tcr_el1.t0sz so that
 -               * the TTBR0 address space set-up is properly restored.
 -               * If the current active_mm != &init_mm we entered cpu_suspend
 -               * with mappings in TTBR0 that must be restored, so we switch
 -               * them back to complete the address space configuration
 -               * restoration before returning.
 -               */
 -              cpu_set_reserved_ttbr0();
 -              local_flush_tlb_all();
 -              cpu_set_default_tcr_t0sz();
 -
 -              if (mm != &init_mm)
 -                      cpu_switch_mm(mm->pgd, mm);
 -
 -              /*
 -               * Restore per-cpu offset before any kernel
 -               * subsystem relying on it has a chance to run.
 -               */
 -              set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 +      if (__cpu_suspend_enter(&state)) {
 +              /* Call the suspend finisher */
 +              ret = fn(arg);
  
                /*
-                * Never gets here, unless the suspend finisher fails.
-                * Successful cpu_suspend() should return from cpu_resume(),
-                * returning through this code path is considered an error
-                * If the return value is set to 0 force ret = -EOPNOTSUPP
-                * to make sure a proper error condition is propagated
+                * PSTATE was not saved over suspend/resume, re-enable any
+                * detected features that might not have been set correctly.
+                */
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+                               CONFIG_ARM64_PAN));
+               /*
+                * Restore HW breakpoint registers to sane values
+                * before debug exceptions are possibly reenabled
+                * through local_dbg_restore.
                 */
 -              if (hw_breakpoint_restore)
 -                      hw_breakpoint_restore(NULL);
 +              if (!ret)
 +                      ret = -EOPNOTSUPP;
 +      } else {
 +              __cpu_suspend_exit();
        }
  
        unpause_graph_tracing();
index 6c16e4963b39092a30ad444f1862d39d270538ae,247bae758e1e7ec37b34f26d92e63d5e96745e62..18e5a2c3d5546ff0bf7c1e809d2817ad53ac348a
@@@ -638,24 -606,18 +640,32 @@@ asmlinkage int __exception do_debug_exc
  
        return 0;
  }
 +NOKPROBE_SYMBOL(do_debug_exception);
  
  #ifdef CONFIG_ARM64_PAN
void cpu_enable_pan(void *__unused)
int cpu_enable_pan(void *__unused)
  {
+       /*
+        * We modify PSTATE. This won't work from irq context as the PSTATE
+        * is discarded once we return from the exception.
+        */
+       WARN_ON_ONCE(in_interrupt());
        config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+       asm(SET_PSTATE_PAN(1));
+       return 0;
  }
  #endif /* CONFIG_ARM64_PAN */
- void cpu_enable_uao(void *__unused)
 +
 +#ifdef CONFIG_ARM64_UAO
 +/*
 + * Kernel threads have fs=KERNEL_DS by default, and don't need to call
 + * set_fs(), devtmpfs in particular relies on this behaviour.
 + * We need to enable the feature at runtime (instead of adding it to
 + * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
 + */
++int cpu_enable_uao(void *__unused)
 +{
 +      asm(SET_PSTATE_UAO(1));
 +}
 +#endif /* CONFIG_ARM64_UAO */