Merge branch 'v4.4/topic/hibernate' into linux-linaro-lsk-v4.4
authorAlex Shi <alex.shi@linaro.org>
Thu, 17 Nov 2016 06:14:11 +0000 (14:14 +0800)
committerAlex Shi <alex.shi@linaro.org>
Thu, 17 Nov 2016 06:14:11 +0000 (14:14 +0800)
Conflicts:
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/head.S
arch/arm64/kvm/hyp.S

1  2 
arch/arm64/include/asm/kvm_asm.h

index 054ac25e7c2e7c7d9b9f2c3ef32286c35ed9c254,fca51486cd3700ba7df7efa6c8c70602bcdd9996..36a30c80032d46b8bd51b0aaee3eb2f5c11599e5
  #define KVM_ARM64_DEBUG_DIRTY_SHIFT   0
  #define KVM_ARM64_DEBUG_DIRTY         (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
  
 -#define kvm_ksym_ref(sym)             ((void *)&sym + kvm_ksym_shift)
 +#define kvm_ksym_ref(sym)             phys_to_virt((u64)&sym - kimage_voffset)
  
  #ifndef __ASSEMBLY__
+ #if __GNUC__ > 4
+ #define kvm_ksym_shift                        (PAGE_OFFSET - KIMAGE_VADDR)
+ #else
+ /*
+  * GCC versions 4.9 and older will fold the constant below into the addend of
+  * the reference to 'sym' above if kvm_ksym_shift is declared static or if the
+  * constant is used directly. However, since we use the small code model for
+  * the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
+  * with a +/- 4 GB range, resulting in linker relocation errors if the shift
+  * is sufficiently large. So prevent the compiler from folding the shift into
+  * the addend, by making the shift a variable with external linkage.
+  */
+ __weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
+ #endif
  struct kvm;
  struct kvm_vcpu;