Merge remote-tracking branch 'v4.4/topic/mm-kaslr-pax_usercopy' into linux-linaro...
authorAlex Shi <alex.shi@linaro.org>
Sat, 27 Aug 2016 03:27:14 +0000 (11:27 +0800)
committerAlex Shi <alex.shi@linaro.org>
Sat, 27 Aug 2016 03:27:14 +0000 (11:27 +0800)
38 files changed:
arch/Kconfig
arch/arm/Kconfig
arch/arm/include/asm/uaccess.h
arch/arm64/Kconfig
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/arm64ksyms.c
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_to_user.S
arch/ia64/Kconfig
arch/ia64/include/asm/uaccess.h
arch/powerpc/Kconfig
arch/powerpc/include/asm/uaccess.h
arch/s390/Kconfig
arch/s390/lib/uaccess.c
arch/sparc/Kconfig
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/x86/Kconfig
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
include/linux/mmzone.h
include/linux/slab.h
include/linux/thread_info.h
include/linux/uaccess.h
init/Kconfig
kernel/events/uprobes.c
kernel/futex.c
lib/strncpy_from_user.c
lib/strnlen_user.c
mm/Makefile
mm/maccess.c
mm/slab.c
mm/slub.c
mm/usercopy.c [new file with mode: 0644]
security/Kconfig

index 4e949e58b1928363232abac3a69a25413e90652e..d4d9845530f1df6cb70117a1eca282d31fb4acbb 100644 (file)
@@ -423,6 +423,15 @@ config CC_STACKPROTECTOR_STRONG
 
 endchoice
 
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+       bool
+       help
+         An architecture should select this if it can walk the kernel stack
+         frames to determine if an object is part of either the arguments
+         or local variables (i.e. that it excludes saved return addresses,
+         and similar) by implementing an inline arch_within_stack_frames(),
+         which is used by CONFIG_HARDENED_USERCOPY.
+
 config HAVE_CONTEXT_TRACKING
        bool
        help
index 34e1569a11ee322a0a020bdfe0c9b801b6b58c12..51f1775e3adbea19c648036e215ee512e682d1f3 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
        select HAVE_BPF_JIT
index 35c9db857ebe9c7d53715ec42518a6e9fbe1dc6e..7fb59199c6bbbebc03fafbff1c8083aa5892de91 100644 (file)
@@ -496,7 +496,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(to, n, false);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_from_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
@@ -511,11 +514,15 @@ static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
-       unsigned int __ua_flags = uaccess_save_and_enable();
+       unsigned int __ua_flags;
+
+       check_object_size(from, n, true);
+       __ua_flags = uaccess_save_and_enable();
        n = arm_copy_to_user(to, from, n);
        uaccess_restore(__ua_flags);
        return n;
 #else
+       check_object_size(from, n, true);
        return arm_copy_to_user(to, from, n);
 #endif
 }
index 97583a1878dbbb91b1ee8f39adb8e426392324d0..8dbe3cba855c185012aed6a86cc49acef526f363 100644 (file)
@@ -49,6 +49,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
index 0685d74572af788b05d44658c7dba7e4fc3742ba..c3d445b42351e1a529d94c4ba44e9c4d37544821 100644 (file)
@@ -269,24 +269,39 @@ do {                                                                      \
                -EFAULT;                                                \
 })
 
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       check_object_size(to, n, false);
+       return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       check_object_size(from, n, true);
+       return __arch_copy_to_user(to, from, n);
+}
+
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
-               n = __copy_from_user(to, from, n);
-       else /* security hole - plug it */
+       if (access_ok(VERIFY_READ, from, n)) {
+               check_object_size(to, n, false);
+               n = __arch_copy_from_user(to, from, n);
+       } else /* security hole - plug it */
                memset(to, 0, n);
        return n;
 }
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (access_ok(VERIFY_WRITE, to, n))
-               n = __copy_to_user(to, from, n);
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               check_object_size(from, n, true);
+               n = __arch_copy_to_user(to, from, n);
+       }
        return n;
 }
 
index 3b6d8cc9dfe00ce14b764a3102ad0cd73f546c82..c654df05b7d71c38e4580fc80b4e338ce1a0599c 100644 (file)
@@ -33,8 +33,8 @@ EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
        /* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__copy_in_user);
 
index 17e8306dca294ecf37fc27355956eb6bcfc5d92e..0b90497d4424c59d0a9ce2dcf5642012f452d3c8 100644 (file)
@@ -66,7 +66,7 @@
        .endm
 
 end    .req    x5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        add     end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x0, #0                          // Nothing to copy
        ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
 
        .section .fixup,"ax"
        .align  2
index 21faae60f9887ecbbfccb7ba1fb918839d47a291..7a7efe25503452bdfe8e4108b5f4aa0ad9495da5 100644 (file)
@@ -65,7 +65,7 @@
        .endm
 
 end    .req    x5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        add     end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
            CONFIG_ARM64_PAN)
        mov     x0, #0
        ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
 
        .section .fixup,"ax"
        .align  2
index eb0249e3798112615fd5774d6f30229aa6241e53..2c86a4ef67424c0495c5438854d1794198a74b3f 100644 (file)
@@ -53,6 +53,7 @@ config IA64
        select MODULES_USE_ELF_RELA
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 4f3fb6ccbf2139b3e23798df6615e34ec5e4d09a..3d6b840c5c99aeba08852ab70ce40d1a56a65c0c 100644 (file)
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(from, count, true);
+
        return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+       if (!__builtin_constant_p(count))
+               check_object_size(to, count, false);
+
        return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        const void *__cu_from = (from);                                                 \
        long __cu_len = (n);                                                            \
                                                                                        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
-               __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
+       if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_from, __cu_len, true);                   \
+               __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        long __cu_len = (n);                                                            \
                                                                                        \
        __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
+       if (__access_ok(__cu_from, __cu_len, get_fs())) {                               \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_to, __cu_len, false);                    \
                __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
index db49e0d796b1bf50642365aa92d63c5a83a07320..ec7b8f1e4822c66a4ced8bad6cd52f869ad413b2 100644 (file)
@@ -160,6 +160,7 @@ config PPC
        select EDAC_ATOMIC_SCRUB
        select ARCH_HAS_DMA_SET_COHERENT_MASK
        select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index 2a8ebae0936beb0f6b3ec46eafaf979f0d8ddedd..b39a6937005776470de5c6ee3b4b3a26d4a954cc 100644 (file)
@@ -325,10 +325,15 @@ static inline unsigned long copy_from_user(void *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_READ, from, n))
+       if (access_ok(VERIFY_READ, from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        if ((unsigned long)from < TASK_SIZE) {
                over = (unsigned long)from + n - TASK_SIZE;
+               if (!__builtin_constant_p(n - over))
+                       check_object_size(to, n - over, false);
                return __copy_tofrom_user((__force void __user *)to, from,
                                n - over) + over;
        }
@@ -340,10 +345,15 @@ static inline unsigned long copy_to_user(void __user *to,
 {
        unsigned long over;
 
-       if (access_ok(VERIFY_WRITE, to, n))
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
+       }
        if ((unsigned long)to < TASK_SIZE) {
                over = (unsigned long)to + n - TASK_SIZE;
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n - over, true);
                return __copy_tofrom_user(to, (__force void __user *)from,
                                n - over) + over;
        }
@@ -387,6 +397,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -413,6 +427,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
index 3a55f493c7da72b9ee7ee7ad7c8c76d9e01bed8d..60530fd93d6d11368731fedfe145afb6826c7e59 100644 (file)
@@ -117,6 +117,7 @@ config S390
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_EARLY_PFN_TO_NID
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index ae4de559e3a04288c6be111de684b3355f35109b..6986c20166f028bd859fa38af033d076a93eb3f3 100644 (file)
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       check_object_size(to, n, false);
        if (static_branch_likely(&have_mvcos))
                return copy_from_user_mvcos(to, from, n);
        return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       check_object_size(from, n, true);
        if (static_branch_likely(&have_mvcos))
                return copy_to_user_mvcos(to, from, n);
        return copy_to_user_mvcs(to, from, n);
index 56442d2d7bbca4181e8897bfa5d9719dbded43d1..3736be630113b4e1ae8fd26ff45cff32d03d47a9 100644 (file)
@@ -43,6 +43,7 @@ config SPARC
        select ODD_RT_SIGACTION
        select OLD_SIGSUSPEND
        select ARCH_HAS_SG_CHAIN
+       select HAVE_ARCH_HARDENED_USERCOPY
 
 config SPARC32
        def_bool !64BIT
index 64ee103dc29da142305d93d26fd44cf4a62ae699..4cfb77913cd2ef388c9c9bc316fc1eb014e7d2ca 100644 (file)
@@ -313,22 +313,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) to, n))
+       if (n && __access_ok((unsigned long) to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) from, n))
+       if (n && __access_ok((unsigned long) from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       else
+       else
                return n;
 }
 
index ea6e9a20f3ffb5a80156b1766ac2112c3c0d2bdc..6069e9040388330d81692d9cccfb529f1fbf8986 100644 (file)
@@ -250,8 +250,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-       unsigned long ret = ___copy_from_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(to, size, false);
+
+       ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
                ret = copy_from_user_fixup(to, from, size);
 
@@ -267,8 +271,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-       unsigned long ret = ___copy_to_user(to, from, size);
+       unsigned long ret;
 
+       if (!__builtin_constant_p(size))
+               check_object_size(from, size, true);
+       ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
        return ret;
index 9d5e3a27bef202a9c8e38da85e6dcd1274cb1627..924bbffc56f090081e451ffd37ae23552e597076 100644 (file)
@@ -77,6 +77,7 @@ config X86
        select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
        select HAVE_AOUT                        if X86_32
        select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
@@ -86,7 +87,7 @@ config X86
        select HAVE_ARCH_SOFT_DIRTY             if X86_64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_BPF_JIT                     if X86_64
+       select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
index c7b551028740f18a5360070a6ccb5dc9ad714c5e..0c977fc124a77b7a766cce85de4c9e130d699689 100644 (file)
@@ -177,6 +177,50 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ *              1 if within a frame
+ *             -1 if placed across a frame boundary (or outside stack)
+ *              0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+       const void *frame = NULL;
+       const void *oldframe;
+
+       oldframe = __builtin_frame_address(1);
+       if (oldframe)
+               frame = __builtin_frame_address(2);
+       /*
+        * low ----------------------------------------------> high
+        * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+        *                     ^----------------^
+        *               allow copies only within here
+        */
+       while (stack <= frame && frame < stackend) {
+               /*
+                * If obj + len extends past the last frame, this
+                * check won't pass and the next frame will be 0,
+                * causing us to bail out and correctly report
+                * the copy as invalid.
+                */
+               if (obj + len <= frame)
+                       return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+               oldframe = frame;
+               frame = *(const void * const *)frame;
+       }
+       return -1;
+#else
+       return 0;
+#endif
+}
+
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
index 09b1b0ab94b7653f7ed7019eb7e35b518582f825..dd73cf90fb18f60b7b3c12370600bd53d3a664ea 100644 (file)
@@ -134,6 +134,9 @@ extern int __get_user_4(void);
 extern int __get_user_8(void);
 extern int __get_user_bad(void);
 
+#define __uaccess_begin() stac()
+#define __uaccess_end()   clac()
+
 /*
  * This is a type: either unsigned long, if the argument fits into
  * that type, or otherwise unsigned long long.
@@ -193,10 +196,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
 #ifdef CONFIG_X86_32
 #define __put_user_asm_u64(x, addr, err, errret)                       \
-       asm volatile(ASM_STAC "\n"                                      \
+       asm volatile("\n"                                               \
                     "1:        movl %%eax,0(%2)\n"                     \
                     "2:        movl %%edx,4(%2)\n"                     \
-                    "3: " ASM_CLAC "\n"                                \
+                    "3:"                                               \
                     ".section .fixup,\"ax\"\n"                         \
                     "4:        movl %3,%0\n"                           \
                     "  jmp 3b\n"                                       \
@@ -207,10 +210,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
                     : "A" (x), "r" (addr), "i" (errret), "0" (err))
 
 #define __put_user_asm_ex_u64(x, addr)                                 \
-       asm volatile(ASM_STAC "\n"                                      \
+       asm volatile("\n"                                               \
                     "1:        movl %%eax,0(%1)\n"                     \
                     "2:        movl %%edx,4(%1)\n"                     \
-                    "3: " ASM_CLAC "\n"                                \
+                    "3:"                                               \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
                     _ASM_EXTABLE_EX(2b, 3b)                            \
                     : : "A" (x), "r" (addr))
@@ -304,6 +307,10 @@ do {                                                                       \
        }                                                               \
 } while (0)
 
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
 #define __put_user_size_ex(x, ptr, size)                               \
 do {                                                                   \
        __chk_user_ptr(ptr);                                            \
@@ -358,9 +365,9 @@ do {                                                                        \
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       asm volatile(ASM_STAC "\n"                                      \
+       asm volatile("\n"                                               \
                     "1:        mov"itype" %2,%"rtype"1\n"              \
-                    "2: " ASM_CLAC "\n"                                \
+                    "2:\n"                                             \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
@@ -370,6 +377,10 @@ do {                                                                       \
                     : "=r" (err), ltype(x)                             \
                     : "m" (__m(addr)), "i" (errret), "0" (err))
 
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
 #define __get_user_size_ex(x, ptr, size)                               \
 do {                                                                   \
        __chk_user_ptr(ptr);                                            \
@@ -400,7 +411,9 @@ do {                                                                        \
 #define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        int __pu_err;                                           \
+       __uaccess_begin();                                      \
        __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+       __uaccess_end();                                        \
        __builtin_expect(__pu_err, 0);                          \
 })
 
@@ -408,7 +421,9 @@ do {                                                                        \
 ({                                                                     \
        int __gu_err;                                                   \
        unsigned long __gu_val;                                         \
+       __uaccess_begin();                                              \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       __uaccess_end();                                                \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __builtin_expect(__gu_err, 0);                                  \
 })
@@ -423,9 +438,9 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
-       asm volatile(ASM_STAC "\n"                                      \
+       asm volatile("\n"                                               \
                     "1:        mov"itype" %"rtype"1,%2\n"              \
-                    "2: " ASM_CLAC "\n"                                \
+                    "2:\n"                                             \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
                     "  jmp 2b\n"                                       \
@@ -445,11 +460,11 @@ struct __large_struct { unsigned long buf[100]; };
  */
 #define uaccess_try    do {                                            \
        current_thread_info()->uaccess_err = 0;                         \
-       stac();                                                         \
+       __uaccess_begin();                                              \
        barrier();
 
 #define uaccess_catch(err)                                             \
-       clac();                                                         \
+       __uaccess_end();                                                \
        (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
 } while (0)
 
@@ -547,12 +562,13 @@ extern void __cmpxchg_wrong_size(void)
        __typeof__(ptr) __uval = (uval);                                \
        __typeof__(*(ptr)) __old = (old);                               \
        __typeof__(*(ptr)) __new = (new);                               \
+       __uaccess_begin();                                              \
        switch (size) {                                                 \
        case 1:                                                         \
        {                                                               \
-               asm volatile("\t" ASM_STAC "\n"                         \
+               asm volatile("\n"                                       \
                        "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
-                       "2:\t" ASM_CLAC "\n"                            \
+                       "2:\n"                                          \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
@@ -566,9 +582,9 @@ extern void __cmpxchg_wrong_size(void)
        }                                                               \
        case 2:                                                         \
        {                                                               \
-               asm volatile("\t" ASM_STAC "\n"                         \
+               asm volatile("\n"                                       \
                        "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
-                       "2:\t" ASM_CLAC "\n"                            \
+                       "2:\n"                                          \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
@@ -582,9 +598,9 @@ extern void __cmpxchg_wrong_size(void)
        }                                                               \
        case 4:                                                         \
        {                                                               \
-               asm volatile("\t" ASM_STAC "\n"                         \
+               asm volatile("\n"                                       \
                        "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
-                       "2:\t" ASM_CLAC "\n"                            \
+                       "2:\n"                                          \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
@@ -601,9 +617,9 @@ extern void __cmpxchg_wrong_size(void)
                if (!IS_ENABLED(CONFIG_X86_64))                         \
                        __cmpxchg_wrong_size();                         \
                                                                        \
-               asm volatile("\t" ASM_STAC "\n"                         \
+               asm volatile("\n"                                       \
                        "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
-                       "2:\t" ASM_CLAC "\n"                            \
+                       "2:\n"                                          \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
@@ -618,6 +634,7 @@ extern void __cmpxchg_wrong_size(void)
        default:                                                        \
                __cmpxchg_wrong_size();                                 \
        }                                                               \
+       __uaccess_end();                                                \
        *__uval = __old;                                                \
        __ret;                                                          \
 })
@@ -714,9 +731,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
         * case, and do only runtime checking for non-constant sizes.
         */
 
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_from_user_overflow();
        else
                __copy_from_user_overflow(sz, n);
@@ -732,9 +750,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        might_fault();
 
        /* See the comment in copy_from_user() above. */
-       if (likely(sz < 0 || sz >= n))
+       if (likely(sz < 0 || sz >= n)) {
+               check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       else if(__builtin_constant_p(n))
+       } else if (__builtin_constant_p(n))
                copy_to_user_overflow();
        else
                __copy_to_user_overflow(sz, n);
@@ -745,5 +764,30 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 #undef __copy_from_user_overflow
 #undef __copy_to_user_overflow
 
+/*
+ * The "unsafe" user accesses aren't really "unsafe", but the naming
+ * is a big fat warning: you have to not only do the access_ok()
+ * checking before using them, but you have to surround them with the
+ * user_access_begin/end() pair.
+ */
+#define user_access_begin()    __uaccess_begin()
+#define user_access_end()      __uaccess_end()
+
+#define unsafe_put_user(x, ptr)                                                \
+({                                                                             \
+       int __pu_err;                                                           \
+       __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
+       __builtin_expect(__pu_err, 0);                                          \
+})
+
+#define unsafe_get_user(x, ptr)                                                \
+({                                                                             \
+       int __gu_err;                                                           \
+       unsigned long __gu_val;                                                 \
+       __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
+       __builtin_expect(__gu_err, 0);                                          \
+})
+
 #endif /* _ASM_X86_UACCESS_H */
 
index f5dcb5204dcd5b27e8b8e9a1b87612a28cda10c6..7d3bdd1ed6977b5e1f69dc8ba3e3d6cfa8f861a3 100644 (file)
@@ -33,38 +33,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
  * the specified block with access_ok() before calling this function.
  * The caller should also make sure he pins the user space address
  * so that we don't result in page fault and sleep.
- *
- * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
- * we return the initial request size (1, 2 or 4), as copy_*_user should do.
- * If a store crosses a page boundary and gets a fault, the x86 will not write
- * anything, so this is accurate.
  */
-
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
-       if (__builtin_constant_p(n)) {
-               unsigned long ret;
-
-               switch (n) {
-               case 1:
-                       __put_user_size(*(u8 *)from, (u8 __user *)to,
-                                       1, ret, 1);
-                       return ret;
-               case 2:
-                       __put_user_size(*(u16 *)from, (u16 __user *)to,
-                                       2, ret, 2);
-                       return ret;
-               case 4:
-                       __put_user_size(*(u32 *)from, (u32 __user *)to,
-                                       4, ret, 4);
-                       return ret;
-               case 8:
-                       __put_user_size(*(u64 *)from, (u64 __user *)to,
-                                       8, ret, 8);
-                       return ret;
-               }
-       }
+       check_object_size(from, n, true);
        return __copy_to_user_ll(to, from, n);
 }
 
@@ -93,26 +66,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
 static __always_inline unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
-       /* Avoid zeroing the tail if the copy fails..
-        * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
-        * but as the zeroing behaviour is only significant when n is not
-        * constant, that shouldn't be a problem.
-        */
-       if (__builtin_constant_p(n)) {
-               unsigned long ret;
-
-               switch (n) {
-               case 1:
-                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
-                       return ret;
-               case 2:
-                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
-                       return ret;
-               case 4:
-                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
-                       return ret;
-               }
-       }
        return __copy_from_user_ll_nozero(to, from, n);
 }
 
@@ -143,18 +96,25 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_fault();
+       check_object_size(to, n, false);
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
                        return ret;
                }
        }
@@ -170,13 +130,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
 
                switch (n) {
                case 1:
+                       __uaccess_begin();
                        __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       __uaccess_end();
                        return ret;
                case 2:
+                       __uaccess_begin();
                        __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       __uaccess_end();
                        return ret;
                case 4:
+                       __uaccess_begin();
                        __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       __uaccess_end();
                        return ret;
                }
        }
index f2f9b39b274ab0c2f81ab6b388f78ba5c881ec5e..2957c8237c28d6e46283bca9dd58a6ec528f1913 100644 (file)
@@ -53,38 +53,53 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(dst, size, false);
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
-       case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+       case 1:
+               __uaccess_begin();
+               __get_user_asm(*(u8 *)dst, (u8 __user *)src,
                              ret, "b", "b", "=q", 1);
+               __uaccess_end();
                return ret;
-       case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+       case 2:
+               __uaccess_begin();
+               __get_user_asm(*(u16 *)dst, (u16 __user *)src,
                              ret, "w", "w", "=r", 2);
+               __uaccess_end();
                return ret;
-       case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+       case 4:
+               __uaccess_begin();
+               __get_user_asm(*(u32 *)dst, (u32 __user *)src,
                              ret, "l", "k", "=r", 4);
+               __uaccess_end();
                return ret;
-       case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+       case 8:
+               __uaccess_begin();
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                              ret, "q", "", "=r", 8);
+               __uaccess_end();
                return ret;
        case 10:
+               __uaccess_begin();
                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 10);
-               if (unlikely(ret))
-                       return ret;
-               __get_user_asm(*(u16 *)(8 + (char *)dst),
-                              (u16 __user *)(8 + (char __user *)src),
-                              ret, "w", "w", "=r", 2);
+               if (likely(!ret))
+                       __get_user_asm(*(u16 *)(8 + (char *)dst),
+                                      (u16 __user *)(8 + (char __user *)src),
+                                      ret, "w", "w", "=r", 2);
+               __uaccess_end();
                return ret;
        case 16:
+               __uaccess_begin();
                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
                               ret, "q", "", "=r", 16);
-               if (unlikely(ret))
-                       return ret;
-               __get_user_asm(*(u64 *)(8 + (char *)dst),
-                              (u64 __user *)(8 + (char __user *)src),
-                              ret, "q", "", "=r", 8);
+               if (likely(!ret))
+                       __get_user_asm(*(u64 *)(8 + (char *)dst),
+                                      (u64 __user *)(8 + (char __user *)src),
+                                      ret, "q", "", "=r", 8);
+               __uaccess_end();
                return ret;
        default:
                return copy_user_generic(dst, (__force void *)src, size);
@@ -103,38 +118,55 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
 
+       check_object_size(src, size, true);
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
-       case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+       case 1:
+               __uaccess_begin();
+               __put_user_asm(*(u8 *)src, (u8 __user *)dst,
                              ret, "b", "b", "iq", 1);
+               __uaccess_end();
                return ret;
-       case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+       case 2:
+               __uaccess_begin();
+               __put_user_asm(*(u16 *)src, (u16 __user *)dst,
                              ret, "w", "w", "ir", 2);
+               __uaccess_end();
                return ret;
-       case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+       case 4:
+               __uaccess_begin();
+               __put_user_asm(*(u32 *)src, (u32 __user *)dst,
                              ret, "l", "k", "ir", 4);
+               __uaccess_end();
                return ret;
-       case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+       case 8:
+               __uaccess_begin();
+               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
                              ret, "q", "", "er", 8);
+               __uaccess_end();
                return ret;
        case 10:
+               __uaccess_begin();
                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
                               ret, "q", "", "er", 10);
-               if (unlikely(ret))
-                       return ret;
-               asm("":::"memory");
-               __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
-                              ret, "w", "w", "ir", 2);
+               if (likely(!ret)) {
+                       asm("":::"memory");
+                       __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+                                      ret, "w", "w", "ir", 2);
+               }
+               __uaccess_end();
                return ret;
        case 16:
+               __uaccess_begin();
                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
                               ret, "q", "", "er", 16);
-               if (unlikely(ret))
-                       return ret;
-               asm("":::"memory");
-               __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
-                              ret, "q", "", "er", 8);
+               if (likely(!ret)) {
+                       asm("":::"memory");
+                       __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+                                      ret, "q", "", "er", 8);
+               }
+               __uaccess_end();
                return ret;
        default:
                return copy_user_generic((__force void *)dst, src, size);
@@ -160,39 +192,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        switch (size) {
        case 1: {
                u8 tmp;
+               __uaccess_begin();
                __get_user_asm(tmp, (u8 __user *)src,
                               ret, "b", "b", "=q", 1);
                if (likely(!ret))
                        __put_user_asm(tmp, (u8 __user *)dst,
                                       ret, "b", "b", "iq", 1);
+               __uaccess_end();
                return ret;
        }
        case 2: {
                u16 tmp;
+               __uaccess_begin();
                __get_user_asm(tmp, (u16 __user *)src,
                               ret, "w", "w", "=r", 2);
                if (likely(!ret))
                        __put_user_asm(tmp, (u16 __user *)dst,
                                       ret, "w", "w", "ir", 2);
+               __uaccess_end();
                return ret;
        }
 
        case 4: {
                u32 tmp;
+               __uaccess_begin();
                __get_user_asm(tmp, (u32 __user *)src,
                               ret, "l", "k", "=r", 4);
                if (likely(!ret))
                        __put_user_asm(tmp, (u32 __user *)dst,
                                       ret, "l", "k", "ir", 4);
+               __uaccess_end();
                return ret;
        }
        case 8: {
                u64 tmp;
+               __uaccess_begin();
                __get_user_asm(tmp, (u64 __user *)src,
                               ret, "q", "", "=r", 8);
                if (likely(!ret))
                        __put_user_asm(tmp, (u64 __user *)dst,
                                       ret, "q", "", "er", 8);
+               __uaccess_end();
                return ret;
        }
        default:
index 6ed7d63a0688384830894f4a455463bd7e9e060c..201947b4377c769d9393d718fc5b35c5000328ed 100644 (file)
@@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                                return ret;
 
                        if (r->presumed_offset != offset &&
-                           __copy_to_user_inatomic(&user_relocs->presumed_offset,
-                                                   &r->presumed_offset,
-                                                   sizeof(r->presumed_offset))) {
+                           __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
                                return -EFAULT;
                        }
 
index e23a9e704536278dad66bc5e5d1f9f798036b8be..bab4053fb795b0a054616d8c1dc81d2ea509e441 100644 (file)
@@ -65,8 +65,10 @@ enum {
 
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
 #else
 #  define is_migrate_cma(migratetype) false
+#  define is_migrate_cma_page(_page) false
 #endif
 
 #define for_each_migratetype_order(order, type) \
index 2037a861e3679910152a98ba98a667b395c6773c..4ef384b172e0e5f307edfd69abd5d100d339c913 100644 (file)
@@ -144,6 +144,18 @@ void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+                                             unsigned long n,
+                                             struct page *page)
+{
+       return NULL;
+}
+#endif
+
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
index ff307b548ed3c91a0f1cd05e486789305cefb43f..0ae29ff9ccfde0dfbbcee5ee9c19032064fb45aa 100644 (file)
@@ -145,6 +145,30 @@ static inline bool test_and_clear_restore_sigmask(void)
 #error "no set_restore_sigmask() provided and default one won't work"
 #endif
 
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+                                          const void * const stackend,
+                                          const void *obj, unsigned long len)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+                                       bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{
+       __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+                                    bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 558129af828a7eb97ad64b1531ac2a4e3f71174d..349557825428e9b3d815e0bc2781d5d79b172d47 100644 (file)
@@ -111,4 +111,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #define probe_kernel_address(addr, retval)             \
        probe_kernel_read(&retval, addr, sizeof(retval))
 
+#ifndef user_access_begin
+#define user_access_begin() do { } while (0)
+#define user_access_end() do { } while (0)
+#define unsafe_get_user(x, ptr) __get_user(x, ptr)
+#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#endif
+
 #endif         /* __LINUX_UACCESS_H__ */
index 235c7a2c0d2004f1121b7d98f4140683ab45ef53..e1d1d6936f9228ad1580144563c27a8b2d8d2dd1 100644 (file)
@@ -1719,6 +1719,7 @@ choice
 
 config SLAB
        bool "SLAB"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
          The regular slab allocator that is established and known to work
          well in all environments. It organizes cache hot objects in
@@ -1726,6 +1727,7 @@ config SLAB
 
 config SLUB
        bool "SLUB (Unqueued Allocator)"
+       select HAVE_HARDENED_USERCOPY_ALLOCATOR
        help
           SLUB is a slab allocator that minimizes cache line usage
           instead of managing queues of cached objects (SLAB approach).
index 7dad84913abfb06df2495fae555e7f8bcac2b104..4dcc16991b6707564091f2d9a28d3ab62504a4ca 100644 (file)
@@ -1692,8 +1692,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
        int result;
 
        pagefault_disable();
-       result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
-                                                       sizeof(opcode));
+       result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
        pagefault_enable();
 
        if (likely(result == 0))
index 9d8163afd87ca7605ef85d2ca64d3c4521838fae..e8af73cc51a7cd6bf453aaf03e5ccd5842aacca2 100644 (file)
@@ -681,7 +681,7 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
        int ret;
 
        pagefault_disable();
-       ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+       ret = __get_user(*dest, from);
        pagefault_enable();
 
        return ret ? -EFAULT : 0;
index e0af6ff73d146cfa3356080ebcb1bbea36cd1f96..33840324138c21fc427c5c8dcbda66cb64ab3636 100644 (file)
@@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
                unsigned long c, data;
 
                /* Fall back to byte-at-a-time if we get a page fault */
-               if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
                        break;
                *(unsigned long *)(dst+res) = c;
                if (has_zero(c, &data, &constants)) {
@@ -55,7 +55,7 @@ byte_at_a_time:
        while (max) {
                char c;
 
-               if (unlikely(__get_user(c,src+res)))
+               if (unlikely(unsafe_get_user(c,src+res)))
                        return -EFAULT;
                dst[res] = c;
                if (!c)
@@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
        src_addr = (unsigned long)src;
        if (likely(src_addr < max_addr)) {
                unsigned long max = max_addr - src_addr;
-               return do_strncpy_from_user(dst, src, count, max);
+               long retval;
+
+               user_access_begin();
+               retval = do_strncpy_from_user(dst, src, count, max);
+               user_access_end();
+               return retval;
        }
        return -EFAULT;
 }
index 3a5f2b366d84ed209a012cf62491ca30f6a8bca8..2625943625d7fb229e6e2cf104e5d84c95246ffa 100644 (file)
@@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        src -= align;
        max += align;
 
-       if (unlikely(__get_user(c,(unsigned long __user *)src)))
+       if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
                return 0;
        c |= aligned_byte_mask(align);
 
@@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
-               if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+               if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
                        return 0;
        }
        res -= align;
@@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count)
        src_addr = (unsigned long)str;
        if (likely(src_addr < max_addr)) {
                unsigned long max = max_addr - src_addr;
-               return do_strnlen_user(str, count, max);
+               long retval;
+
+               user_access_begin();
+               retval = do_strnlen_user(str, count, max);
+               user_access_end();
+               return retval;
        }
        return 0;
 }
@@ -141,7 +146,12 @@ long strlen_user(const char __user *str)
        src_addr = (unsigned long)str;
        if (likely(src_addr < max_addr)) {
                unsigned long max = max_addr - src_addr;
-               return do_strnlen_user(str, ~0ul, max);
+               long retval;
+
+               user_access_begin();
+               retval = do_strnlen_user(str, ~0ul, max);
+               user_access_end();
+               return retval;
        }
        return 0;
 }
index 2ed43191fc3bf78f46f111e88fa9d5a01b8c661a..8b532c94008f2231b2d3d01fc583ff88bb3822ca 100644 (file)
@@ -5,6 +5,9 @@
 KASAN_SANITIZE_slab_common.o := n
 KASAN_SANITIZE_slub.o := n
 
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
 mmu-y                  := nommu.o
 mmu-$(CONFIG_MMU)      := gup.o highmem.o memory.o mincore.o \
                           mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -81,3 +84,4 @@ obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
 obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
index d159b1c96e484d902f6edb34ebb5a83a3977bd57..78f9274dd49d06f11f87beae9520a0e1267dc3ff 100644 (file)
@@ -96,8 +96,7 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
        pagefault_disable();
 
        do {
-               ret = __copy_from_user_inatomic(dst++,
-                                               (const void __user __force *)src++, 1);
+               ret = __get_user(*dst++, (const char __user __force *)src++);
        } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
 
        dst[-1] = '\0';
index 4765c97ce6900d98b8ce2968cf9ff62a176f6e42..24a615d42d74f9bacd69e45372b871eeba89dbd1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4228,6 +4228,36 @@ static int __init slab_proc_init(void)
 module_init(slab_proc_init);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *cachep;
+       unsigned int objnr;
+       unsigned long offset;
+
+       /* Find and validate object. */
+       cachep = page->slab_cache;
+       objnr = obj_to_index(cachep, page, (void *)ptr);
+       BUG_ON(objnr >= cachep->num);
+
+       /* Find offset within object. */
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+               return NULL;
+
+       return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
index 65d5f92d51d27ec1e0993cbe194eaaaae9bcd503..fbadb3753d4d1b098855bcefa20d765c992b85bc 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3585,6 +3585,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+                               struct page *page)
+{
+       struct kmem_cache *s;
+       unsigned long offset;
+       size_t object_size;
+
+       /* Find object and usable object size. */
+       s = page->slab_cache;
+       object_size = slab_ksize(s);
+
+       /* Reject impossible pointers. */
+       if (ptr < page_address(page))
+               return s->name;
+
+       /* Find offset within object. */
+       offset = (ptr - page_address(page)) % s->size;
+
+       /* Adjust for redzone and reject if within the redzone. */
+       if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+               if (offset < s->red_left_pad)
+                       return s->name;
+               offset -= s->red_left_pad;
+       }
+
+       /* Allow address range falling entirely within object size. */
+       if (offset <= object_size && n <= object_size - offset)
+               return NULL;
+
+       return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
        struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644 (file)
index 0000000..8ebae91
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+       BAD_STACK = -1,
+       NOT_STACK = 0,
+       GOOD_FRAME,
+       GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ *     NOT_STACK: not at all on the stack
+ *     GOOD_FRAME: fully within a valid stack frame
+ *     GOOD_STACK: fully on the stack (when can't do frame-checking)
+ *     BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+       const void * const stack = task_stack_page(current);
+       const void * const stackend = stack + THREAD_SIZE;
+       int ret;
+
+       /* Object is not on the stack at all. */
+       if (obj + len <= stack || stackend <= obj)
+               return NOT_STACK;
+
+       /*
+        * Reject: object partially overlaps the stack (passing the
+        * the check above means at least one end is within the stack,
+        * so if this check fails, the other end is outside the stack).
+        */
+       if (obj < stack || stackend < obj + len)
+               return BAD_STACK;
+
+       /* Check if object is safely within a valid frame. */
+       ret = arch_within_stack_frames(stack, stackend, obj, len);
+       if (ret)
+               return ret;
+
+       return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+                           bool to_user, const char *type)
+{
+       pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+               to_user ? "exposure" : "overwrite",
+               to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       /*
+        * For greater effect, it would be nice to do do_group_exit(),
+        * but BUG() actually hooks all the lock-breaking and per-arch
+        * Oops code, so that is used here instead.
+        */
+       BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+                    unsigned long high)
+{
+       unsigned long check_low = (uintptr_t)ptr;
+       unsigned long check_high = check_low + n;
+
+       /* Does not overlap if entirely above or entirely below. */
+       if (check_low >= high || check_high < low)
+               return false;
+
+       return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+                                                  unsigned long n)
+{
+       unsigned long textlow = (unsigned long)_stext;
+       unsigned long texthigh = (unsigned long)_etext;
+       unsigned long textlow_linear, texthigh_linear;
+
+       if (overlaps(ptr, n, textlow, texthigh))
+               return "<kernel text>";
+
+       /*
+        * Some architectures have virtual memory mappings with a secondary
+        * mapping of the kernel text, i.e. there is more than one virtual
+        * kernel address that points to the kernel image. It is usually
+        * when there is a separate linear physical memory mapping, in that
+        * __pa() is not just the reverse of __va(). This can be detected
+        * and checked:
+        */
+       textlow_linear = (unsigned long)__va(__pa(textlow));
+       /* No different mapping: we're done. */
+       if (textlow_linear == textlow)
+               return NULL;
+
+       /* Check the secondary mapping... */
+       texthigh_linear = (unsigned long)__va(__pa(texthigh));
+       if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+               return "<linear kernel text>";
+
+       return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+       /* Reject if object wraps past end of memory. */
+       if (ptr + n < ptr)
+               return "<wrapped address>";
+
+       /* Reject if NULL or ZERO-allocation. */
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page, *endpage;
+       const void *end = ptr + n - 1;
+       bool is_reserved, is_cma;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        */
+       if (is_vmalloc_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
+
+       /*
+        * Sometimes the kernel data regions are not marked Reserved (see
+        * check below). And sometimes [_sdata,_edata) does not cover
+        * rodata and/or bss, so check each range explicitly.
+        */
+
+       /* Allow reads of kernel rodata region (if not marked as Reserved). */
+       if (ptr >= (const void *)__start_rodata &&
+           end <= (const void *)__end_rodata) {
+               if (!to_user)
+                       return "<rodata>";
+               return NULL;
+       }
+
+       /* Allow kernel data region (if not marked as Reserved). */
+       if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+               return NULL;
+
+       /* Allow kernel bss region (if not marked as Reserved). */
+       if (ptr >= (const void *)__bss_start &&
+           end <= (const void *)__bss_stop)
+               return NULL;
+
+       /* Is the object wholly within one base page? */
+       if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+                  ((unsigned long)end & (unsigned long)PAGE_MASK)))
+               return NULL;
+
+       /* Allow if start and end are inside the same compound page. */
+       endpage = virt_to_head_page(end);
+       if (likely(endpage == page))
+               return NULL;
+
+       /*
+        * Reject if range is entirely either Reserved (i.e. special or
+        * device memory), or CMA. Otherwise, reject since the object spans
+        * several independently allocated pages.
+        */
+       is_reserved = PageReserved(page);
+       is_cma = is_migrate_cma_page(page);
+       if (!is_reserved && !is_cma)
+               goto reject;
+
+       for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+               page = virt_to_head_page(ptr);
+               if (is_reserved && !PageReserved(page))
+                       goto reject;
+               if (is_cma && !is_migrate_cma_page(page))
+                       goto reject;
+       }
+
+       return NULL;
+
+reject:
+       return "<spans multiple pages>";
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+       const char *err;
+
+       /* Skip all tests if size is zero. */
+       if (!n)
+               return;
+
+       /* Check for invalid addresses. */
+       err = check_bogus_address(ptr, n);
+       if (err)
+               goto report;
+
+       /* Check for bad heap object. */
+       err = check_heap_object(ptr, n, to_user);
+       if (err)
+               goto report;
+
+       /* Check for bad stack object. */
+       switch (check_stack_object(ptr, n)) {
+       case NOT_STACK:
+               /* Object is not touching the current process stack. */
+               break;
+       case GOOD_FRAME:
+       case GOOD_STACK:
+               /*
+                * Object is either in the correct frame (when it
+                * is possible to check) or just generally on the
+                * process stack (when frame checking not available).
+                */
+               return;
+       default:
+               err = "<process stack>";
+               goto report;
+       }
+
+       /* Check for object in kernel to avoid text exposure. */
+       err = check_kernel_text_object(ptr, n);
+       if (!err)
+               return;
+
+report:
+       report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
index e45237897b435f8fbf64950f580b9730f79eb1f4..46c00a674eecc316ba4be08d49b036010269d500 100644 (file)
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR
          this low address space will need the permission specific to the
          systems running LSM.
 
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+       bool
+       help
+         The heap allocator implements __check_heap_object() for
+         validating memory ranges against heap object sizes in
+         support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+       bool
+       help
+         The architecture supports CONFIG_HARDENED_USERCOPY by
+         calling check_object_size() just before performing the
+         userspace copies in the low level implementation of
+         copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+       bool "Harden memory copies between kernel and userspace"
+       depends on HAVE_ARCH_HARDENED_USERCOPY
+       select BUG
+       help
+         This option checks for obviously wrong memory regions when
+         copying memory to/from the kernel (via copy_to_user() and
+         copy_from_user() functions) by rejecting memory ranges that
+         are larger than the specified heap object, span multiple
+         separately allocates pages, are not on the process stack,
+         or are part of the kernel text. This kills entire classes
+         of heap overflow exploits and similar kernel memory exposures.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig