Merge tag 'stable/for-linus-3.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / include / asm / xsave.h
1 #ifndef __ASM_X86_XSAVE_H
2 #define __ASM_X86_XSAVE_H
3
4 #include <linux/types.h>
5 #include <asm/processor.h>
6
7 #define XSTATE_CPUID            0x0000000d
8
9 #define XSTATE_FP               0x1
10 #define XSTATE_SSE              0x2
11 #define XSTATE_YMM              0x4
12 #define XSTATE_BNDREGS          0x8
13 #define XSTATE_BNDCSR           0x10
14 #define XSTATE_OPMASK           0x20
15 #define XSTATE_ZMM_Hi256        0x40
16 #define XSTATE_Hi16_ZMM         0x80
17
18 #define XSTATE_FPSSE    (XSTATE_FP | XSTATE_SSE)
19 /* Bit 63 of XCR0 is reserved for future expansion */
20 #define XSTATE_EXTEND_MASK      (~(XSTATE_FPSSE | (1ULL << 63)))
21
22 #define FXSAVE_SIZE     512
23
24 #define XSAVE_HDR_SIZE      64
25 #define XSAVE_HDR_OFFSET    FXSAVE_SIZE
26
27 #define XSAVE_YMM_SIZE      256
28 #define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
29
30 /* Supported features which support lazy state saving */
31 #define XSTATE_LAZY     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM                  \
32                         | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
33
34 /* Supported features which require eager state saving */
35 #define XSTATE_EAGER    (XSTATE_BNDREGS | XSTATE_BNDCSR)
36
37 /* All currently supported features */
38 #define XCNTXT_MASK     (XSTATE_LAZY | XSTATE_EAGER)
39
40 #ifdef CONFIG_X86_64
41 #define REX_PREFIX      "0x48, "
42 #else
43 #define REX_PREFIX
44 #endif
45
46 extern unsigned int xstate_size;
47 extern u64 pcntxt_mask;
48 extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
49 extern struct xsave_struct *init_xstate_buf;
50
51 extern void xsave_init(void);
52 extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
53 extern int init_fpu(struct task_struct *child);
54
55 static inline int fpu_xrstor_checking(struct xsave_struct *fx)
56 {
57         int err;
58
59         asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
60                      "2:\n"
61                      ".section .fixup,\"ax\"\n"
62                      "3:  movl $-1,%[err]\n"
63                      "    jmp  2b\n"
64                      ".previous\n"
65                      _ASM_EXTABLE(1b, 3b)
66                      : [err] "=r" (err)
67                      : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
68                      : "memory");
69
70         return err;
71 }
72
73 static inline int xsave_user(struct xsave_struct __user *buf)
74 {
75         int err;
76
77         /*
78          * Clear the xsave header first, so that reserved fields are
79          * initialized to zero.
80          */
81         err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
82         if (unlikely(err))
83                 return -EFAULT;
84
85         __asm__ __volatile__(ASM_STAC "\n"
86                              "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
87                              "2: " ASM_CLAC "\n"
88                              ".section .fixup,\"ax\"\n"
89                              "3:  movl $-1,%[err]\n"
90                              "    jmp  2b\n"
91                              ".previous\n"
92                              _ASM_EXTABLE(1b,3b)
93                              : [err] "=r" (err)
94                              : "D" (buf), "a" (-1), "d" (-1), "0" (0)
95                              : "memory");
96         return err;
97 }
98
99 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
100 {
101         int err;
102         struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
103         u32 lmask = mask;
104         u32 hmask = mask >> 32;
105
106         __asm__ __volatile__(ASM_STAC "\n"
107                              "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
108                              "2: " ASM_CLAC "\n"
109                              ".section .fixup,\"ax\"\n"
110                              "3:  movl $-1,%[err]\n"
111                              "    jmp  2b\n"
112                              ".previous\n"
113                              _ASM_EXTABLE(1b,3b)
114                              : [err] "=r" (err)
115                              : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
116                              : "memory");       /* memory required? */
117         return err;
118 }
119
120 static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
121 {
122         u32 lmask = mask;
123         u32 hmask = mask >> 32;
124
125         asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
126                      : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
127                      :   "memory");
128 }
129
130 static inline void xsave_state(struct xsave_struct *fx, u64 mask)
131 {
132         u32 lmask = mask;
133         u32 hmask = mask >> 32;
134
135         asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
136                      : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
137                      :   "memory");
138 }
139
140 static inline void fpu_xsave(struct fpu *fpu)
141 {
142         /* This, however, we can work around by forcing the compiler to select
143            an addressing mode that doesn't require extended registers. */
144         alternative_input(
145                 ".byte " REX_PREFIX "0x0f,0xae,0x27",
146                 ".byte " REX_PREFIX "0x0f,0xae,0x37",
147                 X86_FEATURE_XSAVEOPT,
148                 [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) :
149                 "memory");
150 }
151 #endif