2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/moduleloader.h>
16 #include <linux/netdevice.h>
17 #include <linux/string.h>
18 #include <linux/slab.h>
19 #include <asm/cacheflush.h>
20 #include <asm/hwcap.h>
22 #include "bpf_jit_32.h"
30 * r6 pointer to the skb
35 #define r_scratch ARM_R0
36 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
41 #define r_skb_data ARM_R7
42 #define r_skb_hl ARM_R8
44 #define SCRATCH_SP_OFFSET 0
45 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k))
47 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
48 #define SEEN_MEM_WORD(k) (1 << (k))
49 #define SEEN_X (1 << BPF_MEMWORDS)
50 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
51 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
52 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
54 #define FLAG_NEED_X_RESET (1 << 0)
57 const struct sk_filter *skf;
59 unsigned prologue_bytes;
65 #if __LINUX_ARM_ARCH__ < 7
72 int bpf_jit_enable __read_mostly;
74 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
79 err = skb_copy_bits(skb, offset, &ret, 1);
81 return (u64)err << 32 | ret;
84 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
89 err = skb_copy_bits(skb, offset, &ret, 2);
91 return (u64)err << 32 | ntohs(ret);
94 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
99 err = skb_copy_bits(skb, offset, &ret, 4);
101 return (u64)err << 32 | ntohl(ret);
105 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
106 * (where the assembly routines like __aeabi_uidiv could cause problems).
108 static u32 jit_udiv(u32 dividend, u32 divisor)
110 return dividend / divisor;
113 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
115 if (ctx->target != NULL)
116 ctx->target[ctx->idx] = inst | (cond << 28);
122 * Emit an instruction that will be executed unconditionally.
124 static inline void emit(u32 inst, struct jit_ctx *ctx)
126 _emit(ARM_COND_AL, inst, ctx);
129 static u16 saved_regs(struct jit_ctx *ctx)
133 if ((ctx->skf->len > 1) ||
134 (ctx->skf->insns[0].code == BPF_S_RET_A))
137 #ifdef CONFIG_FRAME_POINTER
138 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
140 if (ctx->seen & SEEN_CALL)
143 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
145 if (ctx->seen & SEEN_DATA)
146 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
147 if (ctx->seen & SEEN_X)
153 static inline int mem_words_used(struct jit_ctx *ctx)
155 /* yes, we do waste some stack space IF there are "holes" in the set" */
156 return fls(ctx->seen & SEEN_MEM);
159 static inline bool is_load_to_a(u16 inst)
167 case BPF_S_ANC_IFINDEX:
169 case BPF_S_ANC_PROTOCOL:
170 case BPF_S_ANC_RXHASH:
171 case BPF_S_ANC_QUEUE:
178 static void build_prologue(struct jit_ctx *ctx)
180 u16 reg_set = saved_regs(ctx);
181 u16 first_inst = ctx->skf->insns[0].code;
184 #ifdef CONFIG_FRAME_POINTER
185 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
186 emit(ARM_PUSH(reg_set), ctx);
187 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
190 emit(ARM_PUSH(reg_set), ctx);
193 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
194 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
196 if (ctx->seen & SEEN_DATA) {
197 off = offsetof(struct sk_buff, data);
198 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
199 /* headlen = len - data_len */
200 off = offsetof(struct sk_buff, len);
201 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
202 off = offsetof(struct sk_buff, data_len);
203 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
204 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
207 if (ctx->flags & FLAG_NEED_X_RESET)
208 emit(ARM_MOV_I(r_X, 0), ctx);
210 /* do not leak kernel data to userspace */
211 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
212 emit(ARM_MOV_I(r_A, 0), ctx);
214 /* stack space for the BPF_MEM words */
215 if (ctx->seen & SEEN_MEM)
216 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
219 static void build_epilogue(struct jit_ctx *ctx)
221 u16 reg_set = saved_regs(ctx);
223 if (ctx->seen & SEEN_MEM)
224 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
226 reg_set &= ~(1 << ARM_LR);
228 #ifdef CONFIG_FRAME_POINTER
229 /* the first instruction of the prologue was: mov ip, sp */
230 reg_set &= ~(1 << ARM_IP);
231 reg_set |= (1 << ARM_SP);
232 emit(ARM_LDM(ARM_SP, reg_set), ctx);
235 if (ctx->seen & SEEN_CALL)
236 reg_set |= 1 << ARM_PC;
237 emit(ARM_POP(reg_set), ctx);
240 if (!(ctx->seen & SEEN_CALL))
241 emit(ARM_BX(ARM_LR), ctx);
245 static int16_t imm8m(u32 x)
249 for (rot = 0; rot < 16; rot++)
250 if ((x & ~ror32(0xff, 2 * rot)) == 0)
251 return rol32(x, 2 * rot) | (rot << 8);
256 #if __LINUX_ARM_ARCH__ < 7
258 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
260 unsigned i = 0, offset;
263 /* on the "fake" run we just count them (duplicates included) */
264 if (ctx->target == NULL) {
269 while ((i < ctx->imm_count) && ctx->imms[i]) {
270 if (ctx->imms[i] == k)
275 if (ctx->imms[i] == 0)
278 /* constants go just after the epilogue */
279 offset = ctx->offsets[ctx->skf->len];
280 offset += ctx->prologue_bytes;
281 offset += ctx->epilogue_bytes;
284 ctx->target[offset / 4] = k;
286 /* PC in ARM mode == address of the instruction + 8 */
287 imm = offset - (8 + ctx->idx * 4);
292 #endif /* __LINUX_ARM_ARCH__ */
295 * Move an immediate that's not an imm8m to a core register.
297 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
299 #if __LINUX_ARM_ARCH__ < 7
300 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
302 emit(ARM_MOVW(rd, val & 0xffff), ctx);
304 emit(ARM_MOVT(rd, val >> 16), ctx);
308 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
310 int imm12 = imm8m(val);
313 emit(ARM_MOV_I(rd, imm12), ctx);
315 emit_mov_i_no8m(rd, val, ctx);
318 #if __LINUX_ARM_ARCH__ < 6
320 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
322 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
323 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
324 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
325 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
326 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
327 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
328 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
329 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
332 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
334 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
335 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
336 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
339 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
341 emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
342 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
343 emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
344 emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
349 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
351 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
352 #ifdef __LITTLE_ENDIAN
353 _emit(cond, ARM_REV(r_res, r_res), ctx);
357 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
359 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
360 #ifdef __LITTLE_ENDIAN
361 _emit(cond, ARM_REV16(r_res, r_res), ctx);
365 static inline void emit_swap16(u8 r_dst __maybe_unused,
366 u8 r_src __maybe_unused,
367 struct jit_ctx *ctx __maybe_unused)
369 #ifdef __LITTLE_ENDIAN
370 emit(ARM_REV16(r_dst, r_src), ctx);
374 #endif /* __LINUX_ARM_ARCH__ < 6 */
377 /* Compute the immediate value for a PC-relative branch. */
378 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
382 if (ctx->target == NULL)
385 * BPF allows only forward jumps and the offset of the target is
386 * still the one computed during the first pass.
388 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
393 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
395 imm12 = imm8m(imm_val); \
397 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
398 emit(op ## _R((r1), (r2), r_scratch), ctx); \
400 emit(op ## _I((r1), (r2), imm12), ctx); \
404 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
406 if (ctx->ret0_fp_idx >= 0) {
407 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
408 /* NOP to keep the size constant between passes */
409 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
411 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
412 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
416 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
418 #if __LINUX_ARM_ARCH__ < 5
419 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
421 if (elf_hwcap & HWCAP_THUMB)
422 emit(ARM_BX(tgt_reg), ctx);
424 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
426 emit(ARM_BLX_R(tgt_reg), ctx);
430 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
432 #if __LINUX_ARM_ARCH__ == 7
433 if (elf_hwcap & HWCAP_IDIVA) {
434 emit(ARM_UDIV(rd, rm, rn), ctx);
439 emit(ARM_MOV_R(ARM_R0, rm), ctx);
441 emit(ARM_MOV_R(ARM_R1, rn), ctx);
443 ctx->seen |= SEEN_CALL;
444 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
445 emit_blx_r(ARM_R3, ctx);
448 emit(ARM_MOV_R(rd, ARM_R0), ctx);
451 static inline void update_on_xread(struct jit_ctx *ctx)
453 if (!(ctx->seen & SEEN_X))
454 ctx->flags |= FLAG_NEED_X_RESET;
459 static int build_body(struct jit_ctx *ctx)
461 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
462 const struct sk_filter *prog = ctx->skf;
463 const struct sock_filter *inst;
464 unsigned i, load_order, off, condt;
468 for (i = 0; i < prog->len; i++) {
469 inst = &(prog->insns[i]);
470 /* K as an immediate value operand */
473 /* compute offsets only in the fake pass */
474 if (ctx->target == NULL)
475 ctx->offsets[i] = ctx->idx * 4;
477 switch (inst->code) {
479 emit_mov_i(r_A, k, ctx);
482 ctx->seen |= SEEN_SKB;
483 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
484 emit(ARM_LDR_I(r_A, r_skb,
485 offsetof(struct sk_buff, len)), ctx);
489 ctx->seen |= SEEN_MEM_WORD(k);
490 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
501 /* the interpreter will deal with the negative K */
504 emit_mov_i(r_off, k, ctx);
506 ctx->seen |= SEEN_DATA | SEEN_CALL;
508 if (load_order > 0) {
509 emit(ARM_SUB_I(r_scratch, r_skb_hl,
510 1 << load_order), ctx);
511 emit(ARM_CMP_R(r_scratch, r_off), ctx);
514 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
518 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
522 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
524 else if (load_order == 1)
525 emit_load_be16(condt, r_A, r_scratch, ctx);
526 else if (load_order == 2)
527 emit_load_be32(condt, r_A, r_scratch, ctx);
529 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
532 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
533 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
534 /* the offset is already in R1 */
535 emit_blx_r(ARM_R3, ctx);
536 /* check the result of skb_copy_bits */
537 emit(ARM_CMP_I(ARM_R1, 0), ctx);
538 emit_err_ret(ARM_COND_NE, ctx);
539 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
550 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
554 emit_mov_i(r_X, k, ctx);
556 case BPF_S_LDX_W_LEN:
557 ctx->seen |= SEEN_X | SEEN_SKB;
558 emit(ARM_LDR_I(r_X, r_skb,
559 offsetof(struct sk_buff, len)), ctx);
562 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
563 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
565 case BPF_S_LDX_B_MSH:
566 /* x = ((*(frame + k)) & 0xf) << 2; */
567 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
568 /* the interpreter should deal with the negative K */
571 /* offset in r1: we might have to take the slow path */
572 emit_mov_i(r_off, k, ctx);
573 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
575 /* load in r0: common with the slowpath */
576 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
579 * emit_mov_i() might generate one or two instructions,
580 * the same holds for emit_blx_r()
582 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
584 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
586 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
587 emit_blx_r(ARM_R3, ctx);
588 /* check the return value of skb_copy_bits */
589 emit(ARM_CMP_I(ARM_R1, 0), ctx);
590 emit_err_ret(ARM_COND_NE, ctx);
592 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
593 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
596 ctx->seen |= SEEN_MEM_WORD(k);
597 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
600 update_on_xread(ctx);
601 ctx->seen |= SEEN_MEM_WORD(k);
602 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
604 case BPF_S_ALU_ADD_K:
606 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
608 case BPF_S_ALU_ADD_X:
609 update_on_xread(ctx);
610 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
612 case BPF_S_ALU_SUB_K:
614 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
616 case BPF_S_ALU_SUB_X:
617 update_on_xread(ctx);
618 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
620 case BPF_S_ALU_MUL_K:
622 emit_mov_i(r_scratch, k, ctx);
623 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
625 case BPF_S_ALU_MUL_X:
626 update_on_xread(ctx);
627 emit(ARM_MUL(r_A, r_A, r_X), ctx);
629 case BPF_S_ALU_DIV_K:
630 /* current k == reciprocal_value(userspace k) */
631 emit_mov_i(r_scratch, k, ctx);
632 /* A = top 32 bits of the product */
633 emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
635 case BPF_S_ALU_DIV_X:
636 update_on_xread(ctx);
637 emit(ARM_CMP_I(r_X, 0), ctx);
638 emit_err_ret(ARM_COND_EQ, ctx);
639 emit_udiv(r_A, r_A, r_X, ctx);
643 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
646 update_on_xread(ctx);
647 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
649 case BPF_S_ALU_AND_K:
651 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
653 case BPF_S_ALU_AND_X:
654 update_on_xread(ctx);
655 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
657 case BPF_S_ALU_LSH_K:
658 if (unlikely(k > 31))
660 emit(ARM_LSL_I(r_A, r_A, k), ctx);
662 case BPF_S_ALU_LSH_X:
663 update_on_xread(ctx);
664 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
666 case BPF_S_ALU_RSH_K:
667 if (unlikely(k > 31))
669 emit(ARM_LSR_I(r_A, r_A, k), ctx);
671 case BPF_S_ALU_RSH_X:
672 update_on_xread(ctx);
673 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
677 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
681 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
683 case BPF_S_JMP_JEQ_K:
684 /* pc += (A == K) ? pc->jt : pc->jf */
687 case BPF_S_JMP_JGT_K:
688 /* pc += (A > K) ? pc->jt : pc->jf */
691 case BPF_S_JMP_JGE_K:
692 /* pc += (A >= K) ? pc->jt : pc->jf */
697 emit_mov_i_no8m(r_scratch, k, ctx);
698 emit(ARM_CMP_R(r_A, r_scratch), ctx);
700 emit(ARM_CMP_I(r_A, imm12), ctx);
704 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
707 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
710 case BPF_S_JMP_JEQ_X:
711 /* pc += (A == X) ? pc->jt : pc->jf */
714 case BPF_S_JMP_JGT_X:
715 /* pc += (A > X) ? pc->jt : pc->jf */
718 case BPF_S_JMP_JGE_X:
719 /* pc += (A >= X) ? pc->jt : pc->jf */
722 update_on_xread(ctx);
723 emit(ARM_CMP_R(r_A, r_X), ctx);
725 case BPF_S_JMP_JSET_K:
726 /* pc += (A & K) ? pc->jt : pc->jf */
728 /* not set iff all zeroes iff Z==1 iff EQ */
732 emit_mov_i_no8m(r_scratch, k, ctx);
733 emit(ARM_TST_R(r_A, r_scratch), ctx);
735 emit(ARM_TST_I(r_A, imm12), ctx);
738 case BPF_S_JMP_JSET_X:
739 /* pc += (A & X) ? pc->jt : pc->jf */
740 update_on_xread(ctx);
742 emit(ARM_TST_R(r_A, r_X), ctx);
745 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
748 if ((k == 0) && (ctx->ret0_fp_idx < 0))
749 ctx->ret0_fp_idx = i;
750 emit_mov_i(ARM_R0, k, ctx);
752 if (i != ctx->skf->len - 1)
753 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
758 emit(ARM_MOV_R(r_X, r_A), ctx);
762 update_on_xread(ctx);
763 emit(ARM_MOV_R(r_A, r_X), ctx);
765 case BPF_S_ANC_ALU_XOR_X:
767 update_on_xread(ctx);
768 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
770 case BPF_S_ANC_PROTOCOL:
771 /* A = ntohs(skb->protocol) */
772 ctx->seen |= SEEN_SKB;
773 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
775 off = offsetof(struct sk_buff, protocol);
776 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
777 emit_swap16(r_A, r_scratch, ctx);
780 /* r_scratch = current_thread_info() */
781 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
782 /* A = current_thread_info()->cpu */
783 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
784 off = offsetof(struct thread_info, cpu);
785 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
787 case BPF_S_ANC_IFINDEX:
788 /* A = skb->dev->ifindex */
789 ctx->seen |= SEEN_SKB;
790 off = offsetof(struct sk_buff, dev);
791 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
793 emit(ARM_CMP_I(r_scratch, 0), ctx);
794 emit_err_ret(ARM_COND_EQ, ctx);
796 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
798 off = offsetof(struct net_device, ifindex);
799 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
802 ctx->seen |= SEEN_SKB;
803 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
804 off = offsetof(struct sk_buff, mark);
805 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
807 case BPF_S_ANC_RXHASH:
808 ctx->seen |= SEEN_SKB;
809 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
810 off = offsetof(struct sk_buff, rxhash);
811 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
813 case BPF_S_ANC_QUEUE:
814 ctx->seen |= SEEN_SKB;
815 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
816 queue_mapping) != 2);
817 BUILD_BUG_ON(offsetof(struct sk_buff,
818 queue_mapping) > 0xff);
819 off = offsetof(struct sk_buff, queue_mapping);
820 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
827 /* compute offsets only during the first pass */
828 if (ctx->target == NULL)
829 ctx->offsets[i] = ctx->idx * 4;
835 void bpf_jit_compile(struct sk_filter *fp)
844 memset(&ctx, 0, sizeof(ctx));
846 ctx.ret0_fp_idx = -1;
848 ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1));
849 if (ctx.offsets == NULL)
852 /* fake pass to fill in the ctx->seen */
853 if (unlikely(build_body(&ctx)))
857 build_prologue(&ctx);
858 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
860 #if __LINUX_ARM_ARCH__ < 7
862 build_epilogue(&ctx);
863 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
865 ctx.idx += ctx.imm_count;
867 ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count);
868 if (ctx.imms == NULL)
872 /* there's nothing after the epilogue on ARMv7 */
873 build_epilogue(&ctx);
876 alloc_size = 4 * ctx.idx;
877 ctx.target = module_alloc(max(sizeof(struct work_struct),
879 if (unlikely(ctx.target == NULL))
883 build_prologue(&ctx);
885 build_epilogue(&ctx);
887 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
889 #if __LINUX_ARM_ARCH__ < 7
894 if (bpf_jit_enable > 1)
895 print_hex_dump(KERN_INFO, "BPF JIT code: ",
896 DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
899 fp->bpf_func = (void *)ctx.target;
905 static void bpf_jit_free_worker(struct work_struct *work)
907 module_free(NULL, work);
910 void bpf_jit_free(struct sk_filter *fp)
912 struct work_struct *work;
914 if (fp->bpf_func != sk_run_filter) {
915 work = (struct work_struct *)fp->bpf_func;
917 INIT_WORK(work, bpf_jit_free_worker);