7df08075fc7a511feb2d912601d63bd858b9a67f
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / insn.c
1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/smp.h>
20 #include <linux/stop_machine.h>
21 #include <linux/uaccess.h>
22 #include <asm/cacheflush.h>
23 #include <asm/insn.h>
24
25 static int aarch64_insn_encoding_class[] = {
26         AARCH64_INSN_CLS_UNKNOWN,
27         AARCH64_INSN_CLS_UNKNOWN,
28         AARCH64_INSN_CLS_UNKNOWN,
29         AARCH64_INSN_CLS_UNKNOWN,
30         AARCH64_INSN_CLS_LDST,
31         AARCH64_INSN_CLS_DP_REG,
32         AARCH64_INSN_CLS_LDST,
33         AARCH64_INSN_CLS_DP_FPSIMD,
34         AARCH64_INSN_CLS_DP_IMM,
35         AARCH64_INSN_CLS_DP_IMM,
36         AARCH64_INSN_CLS_BR_SYS,
37         AARCH64_INSN_CLS_BR_SYS,
38         AARCH64_INSN_CLS_LDST,
39         AARCH64_INSN_CLS_DP_REG,
40         AARCH64_INSN_CLS_LDST,
41         AARCH64_INSN_CLS_DP_FPSIMD,
42 };
43
44 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
45 {
46         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
47 }
48
49 /* NOP is an alias of HINT */
50 bool __kprobes aarch64_insn_is_nop(u32 insn)
51 {
52         if (!aarch64_insn_is_hint(insn))
53                 return false;
54
55         switch (insn & 0xFE0) {
56         case AARCH64_INSN_HINT_YIELD:
57         case AARCH64_INSN_HINT_WFE:
58         case AARCH64_INSN_HINT_WFI:
59         case AARCH64_INSN_HINT_SEV:
60         case AARCH64_INSN_HINT_SEVL:
61                 return false;
62         default:
63                 return true;
64         }
65 }
66
67 /*
68  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
69  * little-endian.
70  */
71 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
72 {
73         int ret;
74         u32 val;
75
76         ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
77         if (!ret)
78                 *insnp = le32_to_cpu(val);
79
80         return ret;
81 }
82
83 int __kprobes aarch64_insn_write(void *addr, u32 insn)
84 {
85         insn = cpu_to_le32(insn);
86         return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
87 }
88
89 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
90 {
91         if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
92                 return false;
93
94         return  aarch64_insn_is_b(insn) ||
95                 aarch64_insn_is_bl(insn) ||
96                 aarch64_insn_is_svc(insn) ||
97                 aarch64_insn_is_hvc(insn) ||
98                 aarch64_insn_is_smc(insn) ||
99                 aarch64_insn_is_brk(insn) ||
100                 aarch64_insn_is_nop(insn);
101 }
102
103 /*
104  * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
105  * Section B2.6.5 "Concurrent modification and execution of instructions":
106  * Concurrent modification and execution of instructions can lead to the
107  * resulting instruction performing any behavior that can be achieved by
108  * executing any sequence of instructions that can be executed from the
109  * same Exception level, except where the instruction before modification
110  * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
111  * or SMC instruction.
112  */
113 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
114 {
115         return __aarch64_insn_hotpatch_safe(old_insn) &&
116                __aarch64_insn_hotpatch_safe(new_insn);
117 }
118
119 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
120 {
121         u32 *tp = addr;
122         int ret;
123
124         /* A64 instructions must be word aligned */
125         if ((uintptr_t)tp & 0x3)
126                 return -EINVAL;
127
128         ret = aarch64_insn_write(tp, insn);
129         if (ret == 0)
130                 flush_icache_range((uintptr_t)tp,
131                                    (uintptr_t)tp + AARCH64_INSN_SIZE);
132
133         return ret;
134 }
135
136 struct aarch64_insn_patch {
137         void            **text_addrs;
138         u32             *new_insns;
139         int             insn_cnt;
140         atomic_t        cpu_count;
141 };
142
143 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
144 {
145         int i, ret = 0;
146         struct aarch64_insn_patch *pp = arg;
147
148         /* The first CPU becomes master */
149         if (atomic_inc_return(&pp->cpu_count) == 1) {
150                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
151                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
152                                                              pp->new_insns[i]);
153                 /*
154                  * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
155                  * which ends with "dsb; isb" pair guaranteeing global
156                  * visibility.
157                  */
158                 atomic_set(&pp->cpu_count, -1);
159         } else {
160                 while (atomic_read(&pp->cpu_count) != -1)
161                         cpu_relax();
162                 isb();
163         }
164
165         return ret;
166 }
167
168 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
169 {
170         struct aarch64_insn_patch patch = {
171                 .text_addrs = addrs,
172                 .new_insns = insns,
173                 .insn_cnt = cnt,
174                 .cpu_count = ATOMIC_INIT(0),
175         };
176
177         if (cnt <= 0)
178                 return -EINVAL;
179
180         return stop_machine(aarch64_insn_patch_text_cb, &patch,
181                             cpu_online_mask);
182 }
183
184 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
185 {
186         int ret;
187         u32 insn;
188
189         /* Unsafe to patch multiple instructions without synchronizaiton */
190         if (cnt == 1) {
191                 ret = aarch64_insn_read(addrs[0], &insn);
192                 if (ret)
193                         return ret;
194
195                 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
196                         /*
197                          * ARMv8 architecture doesn't guarantee all CPUs see
198                          * the new instruction after returning from function
199                          * aarch64_insn_patch_text_nosync(). So send IPIs to
200                          * all other CPUs to achieve instruction
201                          * synchronization.
202                          */
203                         ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
204                         kick_all_cpus_sync();
205                         return ret;
206                 }
207         }
208
209         return aarch64_insn_patch_text_sync(addrs, insns, cnt);
210 }
211
212 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
213                                   u32 insn, u64 imm)
214 {
215         u32 immlo, immhi, lomask, himask, mask;
216         int shift;
217
218         switch (type) {
219         case AARCH64_INSN_IMM_ADR:
220                 lomask = 0x3;
221                 himask = 0x7ffff;
222                 immlo = imm & lomask;
223                 imm >>= 2;
224                 immhi = imm & himask;
225                 imm = (immlo << 24) | (immhi);
226                 mask = (lomask << 24) | (himask);
227                 shift = 5;
228                 break;
229         case AARCH64_INSN_IMM_26:
230                 mask = BIT(26) - 1;
231                 shift = 0;
232                 break;
233         case AARCH64_INSN_IMM_19:
234                 mask = BIT(19) - 1;
235                 shift = 5;
236                 break;
237         case AARCH64_INSN_IMM_16:
238                 mask = BIT(16) - 1;
239                 shift = 5;
240                 break;
241         case AARCH64_INSN_IMM_14:
242                 mask = BIT(14) - 1;
243                 shift = 5;
244                 break;
245         case AARCH64_INSN_IMM_12:
246                 mask = BIT(12) - 1;
247                 shift = 10;
248                 break;
249         case AARCH64_INSN_IMM_9:
250                 mask = BIT(9) - 1;
251                 shift = 12;
252                 break;
253         default:
254                 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
255                         type);
256                 return 0;
257         }
258
259         /* Update the immediate field. */
260         insn &= ~(mask << shift);
261         insn |= (imm & mask) << shift;
262
263         return insn;
264 }