9139854d37b095d364e49b2b6ffd3f99ea6922c0
[firefly-linux-kernel-4.4.55.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define ARCH_HAS_SEARCH_EXTABLE
13
14 extern void __put_user_1(void);
15 extern void __put_user_2(void);
16 extern void __put_user_4(void);
17 extern void __put_user_8(void);
18 extern void __put_user_bad(void);
19
20 #define __put_user_x(size, ret, x, ptr)                                 \
21         asm volatile("call __put_user_" #size                           \
22                      :"=a" (ret)                                        \
23                      :"c" (ptr),"a" (x)                                 \
24                      :"ebx")
25
26 #define __get_user(x, ptr)                                              \
27         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
28 #define __put_user(x, ptr)                                              \
29         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
30
31 #define __get_user_unaligned __get_user
32 #define __put_user_unaligned __put_user
33
34 #define put_user(x, ptr)                                        \
35 ({                                                              \
36         int __pu_err;                                           \
37         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
38         __chk_user_ptr(ptr);                                    \
39         switch (sizeof(*(ptr))) {                               \
40         case 1:                                                 \
41                 __put_user_x(1, __pu_err, x, __pu_addr);        \
42                 break;                                          \
43         case 2:                                                 \
44                 __put_user_x(2, __pu_err, x, __pu_addr);        \
45                 break;                                          \
46         case 4:                                                 \
47                 __put_user_x(4, __pu_err, x, __pu_addr);        \
48                 break;                                          \
49         case 8:                                                 \
50                 __put_user_x(8, __pu_err, x, __pu_addr);        \
51                 break;                                          \
52         default:                                                \
53                 __put_user_bad();                               \
54         }                                                       \
55         __pu_err;                                               \
56 })
57
58 /*
59  * Copy To/From Userspace
60  */
61
62 /* Handles exceptions in both to and from, but doesn't do access_ok */
63 __must_check unsigned long
64 copy_user_generic(void *to, const void *from, unsigned len);
65
66 __must_check unsigned long
67 copy_to_user(void __user *to, const void *from, unsigned len);
68 __must_check unsigned long
69 copy_from_user(void *to, const void __user *from, unsigned len);
70 __must_check unsigned long
71 copy_in_user(void __user *to, const void __user *from, unsigned len);
72
73 static __always_inline __must_check
74 int __copy_from_user(void *dst, const void __user *src, unsigned size)
75 {
76         int ret = 0;
77         if (!__builtin_constant_p(size))
78                 return copy_user_generic(dst, (__force void *)src, size);
79         switch (size) {
80         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
81                               ret, "b", "b", "=q", 1);
82                 return ret;
83         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
84                               ret, "w", "w", "=r", 2);
85                 return ret;
86         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
87                               ret, "l", "k", "=r", 4);
88                 return ret;
89         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
90                               ret, "q", "", "=r", 8);
91                 return ret;
92         case 10:
93                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
94                                ret, "q", "", "=r", 16);
95                 if (unlikely(ret))
96                         return ret;
97                 __get_user_asm(*(u16 *)(8 + (char *)dst),
98                                (u16 __user *)(8 + (char __user *)src),
99                                ret, "w", "w", "=r", 2);
100                 return ret;
101         case 16:
102                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
103                                ret, "q", "", "=r", 16);
104                 if (unlikely(ret))
105                         return ret;
106                 __get_user_asm(*(u64 *)(8 + (char *)dst),
107                                (u64 __user *)(8 + (char __user *)src),
108                                ret, "q", "", "=r", 8);
109                 return ret;
110         default:
111                 return copy_user_generic(dst, (__force void *)src, size);
112         }
113 }
114
115 static __always_inline __must_check
116 int __copy_to_user(void __user *dst, const void *src, unsigned size)
117 {
118         int ret = 0;
119         if (!__builtin_constant_p(size))
120                 return copy_user_generic((__force void *)dst, src, size);
121         switch (size) {
122         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
123                               ret, "b", "b", "iq", 1);
124                 return ret;
125         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
126                               ret, "w", "w", "ir", 2);
127                 return ret;
128         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
129                               ret, "l", "k", "ir", 4);
130                 return ret;
131         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
132                               ret, "q", "", "ir", 8);
133                 return ret;
134         case 10:
135                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
136                                ret, "q", "", "ir", 10);
137                 if (unlikely(ret))
138                         return ret;
139                 asm("":::"memory");
140                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
141                                ret, "w", "w", "ir", 2);
142                 return ret;
143         case 16:
144                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
145                                ret, "q", "", "ir", 16);
146                 if (unlikely(ret))
147                         return ret;
148                 asm("":::"memory");
149                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
150                                ret, "q", "", "ir", 8);
151                 return ret;
152         default:
153                 return copy_user_generic((__force void *)dst, src, size);
154         }
155 }
156
157 static __always_inline __must_check
158 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
159 {
160         int ret = 0;
161         if (!__builtin_constant_p(size))
162                 return copy_user_generic((__force void *)dst,
163                                          (__force void *)src, size);
164         switch (size) {
165         case 1: {
166                 u8 tmp;
167                 __get_user_asm(tmp, (u8 __user *)src,
168                                ret, "b", "b", "=q", 1);
169                 if (likely(!ret))
170                         __put_user_asm(tmp, (u8 __user *)dst,
171                                        ret, "b", "b", "iq", 1);
172                 return ret;
173         }
174         case 2: {
175                 u16 tmp;
176                 __get_user_asm(tmp, (u16 __user *)src,
177                                ret, "w", "w", "=r", 2);
178                 if (likely(!ret))
179                         __put_user_asm(tmp, (u16 __user *)dst,
180                                        ret, "w", "w", "ir", 2);
181                 return ret;
182         }
183
184         case 4: {
185                 u32 tmp;
186                 __get_user_asm(tmp, (u32 __user *)src,
187                                ret, "l", "k", "=r", 4);
188                 if (likely(!ret))
189                         __put_user_asm(tmp, (u32 __user *)dst,
190                                        ret, "l", "k", "ir", 4);
191                 return ret;
192         }
193         case 8: {
194                 u64 tmp;
195                 __get_user_asm(tmp, (u64 __user *)src,
196                                ret, "q", "", "=r", 8);
197                 if (likely(!ret))
198                         __put_user_asm(tmp, (u64 __user *)dst,
199                                        ret, "q", "", "ir", 8);
200                 return ret;
201         }
202         default:
203                 return copy_user_generic((__force void *)dst,
204                                          (__force void *)src, size);
205         }
206 }
207
208 __must_check long
209 strncpy_from_user(char *dst, const char __user *src, long count);
210 __must_check long
211 __strncpy_from_user(char *dst, const char __user *src, long count);
212 __must_check long strnlen_user(const char __user *str, long n);
213 __must_check long __strnlen_user(const char __user *str, long n);
214 __must_check long strlen_user(const char __user *str);
215 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
216 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
217
218 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
219                                             unsigned size);
220
221 static __must_check __always_inline int
222 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
223 {
224         return copy_user_generic((__force void *)dst, src, size);
225 }
226
227 #define ARCH_HAS_NOCACHE_UACCESS 1
228 extern long __copy_user_nocache(void *dst, const void __user *src,
229                                 unsigned size, int zerorest);
230
231 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
232                                            unsigned size)
233 {
234         might_sleep();
235         return __copy_user_nocache(dst, src, size, 1);
236 }
237
238 static inline int __copy_from_user_inatomic_nocache(void *dst,
239                                                     const void __user *src,
240                                                     unsigned size)
241 {
242         return __copy_user_nocache(dst, src, size, 0);
243 }
244
245 #endif /* __X86_64_UACCESS_H */