blob: 32bfab4e21ebe88fbaa5eafa2c25d0da31b5a1c9 [file] [log] [blame]
Thomas Gleixnerb886d83c2019-06-01 10:08:55 +02001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnara2c7a982018-04-27 11:54:40 +02002/*
3 * bpf_jit_comp.c: BPF JIT compiler
Eric Dumazet0a148422011-04-20 09:27:32 +00004 *
Eric Dumazet3b589082013-01-30 17:51:44 -08005 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
Alexei Starovoitov62258272014-05-13 19:50:46 -07006 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Eric Dumazet0a148422011-04-20 09:27:32 +00007 */
Eric Dumazet0a148422011-04-20 09:27:32 +00008#include <linux/netdevice.h>
9#include <linux/filter.h>
Eric Dumazet855ddb52012-10-27 02:26:22 +000010#include <linux/if_vlan.h>
Daniel Borkmann71d22d52018-02-26 22:13:52 +010011#include <linux/bpf.h>
12
Laura Abbottd1163652017-05-08 15:58:11 -070013#include <asm/set_memory.h>
Daniel Borkmanna493a872018-02-22 15:12:53 +010014#include <asm/nospec-branch.h>
Eric Dumazet0a148422011-04-20 09:27:32 +000015
Joe Perches5cccc702014-12-04 17:01:24 -080016static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
Eric Dumazet0a148422011-04-20 09:27:32 +000017{
18 if (len == 1)
19 *ptr = bytes;
20 else if (len == 2)
21 *(u16 *)ptr = bytes;
22 else {
23 *(u32 *)ptr = bytes;
24 barrier();
25 }
26 return ptr + len;
27}
28
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -070029#define EMIT(bytes, len) \
30 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000031
32#define EMIT1(b1) EMIT(b1, 1)
33#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
35#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
Ingo Molnara2c7a982018-04-27 11:54:40 +020036
Alexei Starovoitov62258272014-05-13 19:50:46 -070037#define EMIT1_off32(b1, off) \
Ingo Molnara2c7a982018-04-27 11:54:40 +020038 do { EMIT1(b1); EMIT(off, 4); } while (0)
Alexei Starovoitov62258272014-05-13 19:50:46 -070039#define EMIT2_off32(b1, b2, off) \
Ingo Molnara2c7a982018-04-27 11:54:40 +020040 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
Alexei Starovoitov62258272014-05-13 19:50:46 -070041#define EMIT3_off32(b1, b2, b3, off) \
Ingo Molnara2c7a982018-04-27 11:54:40 +020042 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
Alexei Starovoitov62258272014-05-13 19:50:46 -070043#define EMIT4_off32(b1, b2, b3, b4, off) \
Ingo Molnara2c7a982018-04-27 11:54:40 +020044 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
Eric Dumazet0a148422011-04-20 09:27:32 +000045
Joe Perches5cccc702014-12-04 17:01:24 -080046static bool is_imm8(int value)
Eric Dumazet0a148422011-04-20 09:27:32 +000047{
48 return value <= 127 && value >= -128;
49}
50
Joe Perches5cccc702014-12-04 17:01:24 -080051static bool is_simm32(s64 value)
Eric Dumazet0a148422011-04-20 09:27:32 +000052{
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +010053 return value == (s64)(s32)value;
54}
55
56static bool is_uimm32(u64 value)
57{
58 return value == (u64)(u32)value;
Eric Dumazet0a148422011-04-20 09:27:32 +000059}
60
Alexei Starovoitove430f342014-06-06 14:46:06 -070061/* mov dst, src */
Ingo Molnara2c7a982018-04-27 11:54:40 +020062#define EMIT_mov(DST, SRC) \
63 do { \
64 if (DST != SRC) \
65 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
Alexei Starovoitov62258272014-05-13 19:50:46 -070066 } while (0)
67
68static int bpf_size_to_x86_bytes(int bpf_size)
69{
70 if (bpf_size == BPF_W)
71 return 4;
72 else if (bpf_size == BPF_H)
73 return 2;
74 else if (bpf_size == BPF_B)
75 return 1;
76 else if (bpf_size == BPF_DW)
77 return 4; /* imm32 */
78 else
79 return 0;
80}
Eric Dumazet0a148422011-04-20 09:27:32 +000081
Ingo Molnara2c7a982018-04-27 11:54:40 +020082/*
83 * List of x86 cond jumps opcodes (. + s8)
Eric Dumazet0a148422011-04-20 09:27:32 +000084 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
85 */
86#define X86_JB 0x72
87#define X86_JAE 0x73
88#define X86_JE 0x74
89#define X86_JNE 0x75
90#define X86_JBE 0x76
91#define X86_JA 0x77
Daniel Borkmann52afc512017-08-10 01:39:56 +020092#define X86_JL 0x7C
Alexei Starovoitov62258272014-05-13 19:50:46 -070093#define X86_JGE 0x7D
Daniel Borkmann52afc512017-08-10 01:39:56 +020094#define X86_JLE 0x7E
Alexei Starovoitov62258272014-05-13 19:50:46 -070095#define X86_JG 0x7F
Eric Dumazet0a148422011-04-20 09:27:32 +000096
Ingo Molnara2c7a982018-04-27 11:54:40 +020097/* Pick a register outside of BPF range for JIT internal work */
Daniel Borkmann959a7572016-05-13 19:08:33 +020098#define AUX_REG (MAX_BPF_JIT_REG + 1)
Alexei Starovoitov62258272014-05-13 19:50:46 -070099
Ingo Molnara2c7a982018-04-27 11:54:40 +0200100/*
101 * The following table maps BPF registers to x86-64 registers.
Daniel Borkmann959a7572016-05-13 19:08:33 +0200102 *
Ingo Molnara2c7a982018-04-27 11:54:40 +0200103 * x86-64 register R12 is unused, since if used as base address
Daniel Borkmann959a7572016-05-13 19:08:33 +0200104 * register in load/store instructions, it always needs an
105 * extra byte of encoding and is callee saved.
106 *
Daniel Borkmanne782bdc2018-05-04 01:08:16 +0200107 * Also x86-64 register R9 is unused. x86-64 register R10 is
108 * used for blinding (if enabled).
Alexei Starovoitov62258272014-05-13 19:50:46 -0700109 */
110static const int reg2hex[] = {
Ingo Molnara2c7a982018-04-27 11:54:40 +0200111 [BPF_REG_0] = 0, /* RAX */
112 [BPF_REG_1] = 7, /* RDI */
113 [BPF_REG_2] = 6, /* RSI */
114 [BPF_REG_3] = 2, /* RDX */
115 [BPF_REG_4] = 1, /* RCX */
116 [BPF_REG_5] = 0, /* R8 */
117 [BPF_REG_6] = 3, /* RBX callee saved */
118 [BPF_REG_7] = 5, /* R13 callee saved */
119 [BPF_REG_8] = 6, /* R14 callee saved */
120 [BPF_REG_9] = 7, /* R15 callee saved */
121 [BPF_REG_FP] = 5, /* RBP readonly */
122 [BPF_REG_AX] = 2, /* R10 temp register */
123 [AUX_REG] = 3, /* R11 temp register */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700124};
125
Ingo Molnara2c7a982018-04-27 11:54:40 +0200126/*
127 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
Alexei Starovoitov62258272014-05-13 19:50:46 -0700128 * which need extra byte of encoding.
129 * rax,rcx,...,rbp have simpler encoding
130 */
Joe Perches5cccc702014-12-04 17:01:24 -0800131static bool is_ereg(u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700132{
Joe Perchesd1481342014-12-04 15:00:48 -0800133 return (1 << reg) & (BIT(BPF_REG_5) |
134 BIT(AUX_REG) |
135 BIT(BPF_REG_7) |
136 BIT(BPF_REG_8) |
Daniel Borkmann959a7572016-05-13 19:08:33 +0200137 BIT(BPF_REG_9) |
138 BIT(BPF_REG_AX));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700139}
140
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100141static bool is_axreg(u32 reg)
142{
143 return reg == BPF_REG_0;
144}
145
Ingo Molnara2c7a982018-04-27 11:54:40 +0200146/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
Joe Perches5cccc702014-12-04 17:01:24 -0800147static u8 add_1mod(u8 byte, u32 reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700148{
149 if (is_ereg(reg))
150 byte |= 1;
151 return byte;
152}
153
Joe Perches5cccc702014-12-04 17:01:24 -0800154static u8 add_2mod(u8 byte, u32 r1, u32 r2)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700155{
156 if (is_ereg(r1))
157 byte |= 1;
158 if (is_ereg(r2))
159 byte |= 4;
160 return byte;
161}
162
Ingo Molnara2c7a982018-04-27 11:54:40 +0200163/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800164static u8 add_1reg(u8 byte, u32 dst_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700165{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700166 return byte + reg2hex[dst_reg];
Alexei Starovoitov62258272014-05-13 19:50:46 -0700167}
168
Ingo Molnara2c7a982018-04-27 11:54:40 +0200169/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
Joe Perches5cccc702014-12-04 17:01:24 -0800170static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700171{
Alexei Starovoitove430f342014-06-06 14:46:06 -0700172 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700173}
174
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200175static void jit_fill_hole(void *area, unsigned int size)
176{
Ingo Molnara2c7a982018-04-27 11:54:40 +0200177 /* Fill whole space with INT3 instructions */
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200178 memset(area, 0xcc, size);
179}
180
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700181struct jit_context {
Ingo Molnara2c7a982018-04-27 11:54:40 +0200182 int cleanup_addr; /* Epilogue code offset */
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -0700183};
184
Ingo Molnara2c7a982018-04-27 11:54:40 +0200185/* Maximum number of bytes emitted while JITing one eBPF insn */
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -0700186#define BPF_MAX_INSN_SIZE 128
187#define BPF_INSN_SAFETY 64
188
Daniel Borkmanne782bdc2018-05-04 01:08:16 +0200189#define AUX_STACK_SPACE 40 /* Space for RBX, R13, R14, R15, tailcnt */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700190
Daniel Borkmanne782bdc2018-05-04 01:08:16 +0200191#define PROLOGUE_SIZE 37
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700192
Ingo Molnara2c7a982018-04-27 11:54:40 +0200193/*
194 * Emit x86-64 prologue code for BPF program and check its size.
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700195 * bpf_tail_call helper will skip it while jumping into another program
196 */
Daniel Borkmann08691752018-02-24 01:08:02 +0100197static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
Eric Dumazet0a148422011-04-20 09:27:32 +0000198{
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700199 u8 *prog = *pprog;
200 int cnt = 0;
Eric Dumazet0a148422011-04-20 09:27:32 +0000201
Ingo Molnara2c7a982018-04-27 11:54:40 +0200202 /* push rbp */
203 EMIT1(0x55);
204
205 /* mov rbp,rsp */
206 EMIT3(0x48, 0x89, 0xE5);
Eric Dumazet0a148422011-04-20 09:27:32 +0000207
Alexei Starovoitov2960ae42017-05-30 13:31:35 -0700208 /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
209 EMIT3_off32(0x48, 0x81, 0xEC,
210 round_up(stack_depth, 8) + AUX_STACK_SPACE);
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700211
212 /* sub rbp, AUX_STACK_SPACE */
213 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
Eric Dumazet0a148422011-04-20 09:27:32 +0000214
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700215 /* mov qword ptr [rbp+0],rbx */
216 EMIT4(0x48, 0x89, 0x5D, 0);
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700217 /* mov qword ptr [rbp+8],r13 */
218 EMIT4(0x4C, 0x89, 0x6D, 8);
219 /* mov qword ptr [rbp+16],r14 */
220 EMIT4(0x4C, 0x89, 0x75, 16);
221 /* mov qword ptr [rbp+24],r15 */
222 EMIT4(0x4C, 0x89, 0x7D, 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700223
Daniel Borkmann08691752018-02-24 01:08:02 +0100224 if (!ebpf_from_cbpf) {
Ingo Molnara2c7a982018-04-27 11:54:40 +0200225 /*
226 * Clear the tail call counter (tail_call_cnt): for eBPF tail
Daniel Borkmann08691752018-02-24 01:08:02 +0100227 * calls we need to reset the counter to 0. It's done in two
Ingo Molnara2c7a982018-04-27 11:54:40 +0200228 * instructions, resetting RAX register to 0, and moving it
Daniel Borkmann08691752018-02-24 01:08:02 +0100229 * to the counter location.
230 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700231
Daniel Borkmann08691752018-02-24 01:08:02 +0100232 /* xor eax, eax */
233 EMIT2(0x31, 0xc0);
234 /* mov qword ptr [rbp+32], rax */
235 EMIT4(0x48, 0x89, 0x45, 32);
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700236
Daniel Borkmann08691752018-02-24 01:08:02 +0100237 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
238 }
239
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700240 *pprog = prog;
241}
242
Ingo Molnara2c7a982018-04-27 11:54:40 +0200243/*
244 * Generate the following code:
245 *
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700246 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
247 * if (index >= array->map.max_entries)
248 * goto out;
249 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
250 * goto out;
Wang Nan2a36f0b2015-08-06 07:02:33 +0000251 * prog = array->ptrs[index];
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700252 * if (prog == NULL)
253 * goto out;
254 * goto *(prog->bpf_func + prologue_size);
255 * out:
256 */
257static void emit_bpf_tail_call(u8 **pprog)
258{
259 u8 *prog = *pprog;
260 int label1, label2, label3;
261 int cnt = 0;
262
Ingo Molnara2c7a982018-04-27 11:54:40 +0200263 /*
264 * rdi - pointer to ctx
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700265 * rsi - pointer to bpf_array
266 * rdx - index in bpf_array
267 */
268
Ingo Molnara2c7a982018-04-27 11:54:40 +0200269 /*
270 * if (index >= array->map.max_entries)
271 * goto out;
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700272 */
Alexei Starovoitov90caccd2017-10-03 15:37:20 -0700273 EMIT2(0x89, 0xD2); /* mov edx, edx */
274 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700275 offsetof(struct bpf_array, map.max_entries));
Ingo Molnara2c7a982018-04-27 11:54:40 +0200276#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700277 EMIT2(X86_JBE, OFFSET1); /* jbe out */
278 label1 = cnt;
279
Ingo Molnara2c7a982018-04-27 11:54:40 +0200280 /*
281 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
282 * goto out;
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700283 */
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700284 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700285 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
Daniel Borkmanna493a872018-02-22 15:12:53 +0100286#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700287 EMIT2(X86_JA, OFFSET2); /* ja out */
288 label2 = cnt;
289 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
Alexei Starovoitov177366b2017-05-30 13:31:34 -0700290 EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700291
Wang Nan2a36f0b2015-08-06 07:02:33 +0000292 /* prog = array->ptrs[index]; */
Eric Dumazet84ccac62017-08-31 04:53:42 -0700293 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
Wang Nan2a36f0b2015-08-06 07:02:33 +0000294 offsetof(struct bpf_array, ptrs));
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700295
Ingo Molnara2c7a982018-04-27 11:54:40 +0200296 /*
297 * if (prog == NULL)
298 * goto out;
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700299 */
Eric Dumazet84ccac62017-08-31 04:53:42 -0700300 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
Daniel Borkmanna493a872018-02-22 15:12:53 +0100301#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700302 EMIT2(X86_JE, OFFSET3); /* je out */
303 label3 = cnt;
304
305 /* goto *(prog->bpf_func + prologue_size); */
306 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
307 offsetof(struct bpf_prog, bpf_func));
308 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
309
Ingo Molnara2c7a982018-04-27 11:54:40 +0200310 /*
311 * Wow we're ready to jump into next BPF program
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700312 * rdi == ctx (1st arg)
313 * rax == prog->bpf_func + prologue_size
314 */
Daniel Borkmanna493a872018-02-22 15:12:53 +0100315 RETPOLINE_RAX_BPF_JIT();
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700316
317 /* out: */
318 BUILD_BUG_ON(cnt - label1 != OFFSET1);
319 BUILD_BUG_ON(cnt - label2 != OFFSET2);
320 BUILD_BUG_ON(cnt - label3 != OFFSET3);
321 *pprog = prog;
322}
323
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100324static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
325 u32 dst_reg, const u32 imm32)
326{
327 u8 *prog = *pprog;
328 u8 b1, b2, b3;
329 int cnt = 0;
330
Ingo Molnara2c7a982018-04-27 11:54:40 +0200331 /*
332 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100333 * (which zero-extends imm32) to save 2 bytes.
334 */
335 if (sign_propagate && (s32)imm32 < 0) {
336 /* 'mov %rax, imm32' sign extends imm32 */
337 b1 = add_1mod(0x48, dst_reg);
338 b2 = 0xC7;
339 b3 = 0xC0;
340 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
341 goto done;
342 }
343
Ingo Molnara2c7a982018-04-27 11:54:40 +0200344 /*
345 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100346 * to save 3 bytes.
347 */
348 if (imm32 == 0) {
349 if (is_ereg(dst_reg))
350 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
351 b2 = 0x31; /* xor */
352 b3 = 0xC0;
353 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
354 goto done;
355 }
356
357 /* mov %eax, imm32 */
358 if (is_ereg(dst_reg))
359 EMIT1(add_1mod(0x40, dst_reg));
360 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
361done:
362 *pprog = prog;
363}
364
365static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
366 const u32 imm32_hi, const u32 imm32_lo)
367{
368 u8 *prog = *pprog;
369 int cnt = 0;
370
371 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
Ingo Molnara2c7a982018-04-27 11:54:40 +0200372 /*
373 * For emitting plain u32, where sign bit must not be
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100374 * propagated LLVM tends to load imm64 over mov32
375 * directly, so save couple of bytes by just doing
376 * 'mov %eax, imm32' instead.
377 */
378 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
379 } else {
380 /* movabsq %rax, imm64 */
381 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
382 EMIT(imm32_lo, 4);
383 EMIT(imm32_hi, 4);
384 }
385
386 *pprog = prog;
387}
388
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100389static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
390{
391 u8 *prog = *pprog;
392 int cnt = 0;
393
394 if (is64) {
395 /* mov dst, src */
396 EMIT_mov(dst_reg, src_reg);
397 } else {
398 /* mov32 dst, src */
399 if (is_ereg(dst_reg) || is_ereg(src_reg))
400 EMIT1(add_2mod(0x40, dst_reg, src_reg));
401 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
402 }
403
404 *pprog = prog;
405}
406
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700407static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
408 int oldproglen, struct jit_context *ctx)
409{
410 struct bpf_insn *insn = bpf_prog->insnsi;
411 int insn_cnt = bpf_prog->len;
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700412 bool seen_exit = false;
413 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
414 int i, cnt = 0;
415 int proglen = 0;
416 u8 *prog = temp;
417
Daniel Borkmann08691752018-02-24 01:08:02 +0100418 emit_prologue(&prog, bpf_prog->aux->stack_depth,
419 bpf_prog_was_classic(bpf_prog));
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700420
Alexei Starovoitov62258272014-05-13 19:50:46 -0700421 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700422 const s32 imm32 = insn->imm;
423 u32 dst_reg = insn->dst_reg;
424 u32 src_reg = insn->src_reg;
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100425 u8 b2 = 0, b3 = 0;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700426 s64 jmp_offset;
427 u8 jmp_cond;
428 int ilen;
429 u8 *func;
430
431 switch (insn->code) {
432 /* ALU */
433 case BPF_ALU | BPF_ADD | BPF_X:
434 case BPF_ALU | BPF_SUB | BPF_X:
435 case BPF_ALU | BPF_AND | BPF_X:
436 case BPF_ALU | BPF_OR | BPF_X:
437 case BPF_ALU | BPF_XOR | BPF_X:
438 case BPF_ALU64 | BPF_ADD | BPF_X:
439 case BPF_ALU64 | BPF_SUB | BPF_X:
440 case BPF_ALU64 | BPF_AND | BPF_X:
441 case BPF_ALU64 | BPF_OR | BPF_X:
442 case BPF_ALU64 | BPF_XOR | BPF_X:
443 switch (BPF_OP(insn->code)) {
444 case BPF_ADD: b2 = 0x01; break;
445 case BPF_SUB: b2 = 0x29; break;
446 case BPF_AND: b2 = 0x21; break;
447 case BPF_OR: b2 = 0x09; break;
448 case BPF_XOR: b2 = 0x31; break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000449 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700450 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700451 EMIT1(add_2mod(0x48, dst_reg, src_reg));
452 else if (is_ereg(dst_reg) || is_ereg(src_reg))
453 EMIT1(add_2mod(0x40, dst_reg, src_reg));
454 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000455 break;
Eric Dumazet0a148422011-04-20 09:27:32 +0000456
Alexei Starovoitov62258272014-05-13 19:50:46 -0700457 case BPF_ALU64 | BPF_MOV | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700458 case BPF_ALU | BPF_MOV | BPF_X:
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100459 emit_mov_reg(&prog,
460 BPF_CLASS(insn->code) == BPF_ALU64,
461 dst_reg, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700462 break;
Eric Dumazet3b589082013-01-30 17:51:44 -0800463
Alexei Starovoitove430f342014-06-06 14:46:06 -0700464 /* neg dst */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700465 case BPF_ALU | BPF_NEG:
466 case BPF_ALU64 | BPF_NEG:
467 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700468 EMIT1(add_1mod(0x48, dst_reg));
469 else if (is_ereg(dst_reg))
470 EMIT1(add_1mod(0x40, dst_reg));
471 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700472 break;
473
474 case BPF_ALU | BPF_ADD | BPF_K:
475 case BPF_ALU | BPF_SUB | BPF_K:
476 case BPF_ALU | BPF_AND | BPF_K:
477 case BPF_ALU | BPF_OR | BPF_K:
478 case BPF_ALU | BPF_XOR | BPF_K:
479 case BPF_ALU64 | BPF_ADD | BPF_K:
480 case BPF_ALU64 | BPF_SUB | BPF_K:
481 case BPF_ALU64 | BPF_AND | BPF_K:
482 case BPF_ALU64 | BPF_OR | BPF_K:
483 case BPF_ALU64 | BPF_XOR | BPF_K:
484 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700485 EMIT1(add_1mod(0x48, dst_reg));
486 else if (is_ereg(dst_reg))
487 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700488
Ingo Molnara2c7a982018-04-27 11:54:40 +0200489 /*
490 * b3 holds 'normal' opcode, b2 short form only valid
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100491 * in case dst is eax/rax.
492 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700493 switch (BPF_OP(insn->code)) {
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100494 case BPF_ADD:
495 b3 = 0xC0;
496 b2 = 0x05;
497 break;
498 case BPF_SUB:
499 b3 = 0xE8;
500 b2 = 0x2D;
501 break;
502 case BPF_AND:
503 b3 = 0xE0;
504 b2 = 0x25;
505 break;
506 case BPF_OR:
507 b3 = 0xC8;
508 b2 = 0x0D;
509 break;
510 case BPF_XOR:
511 b3 = 0xF0;
512 b2 = 0x35;
513 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700514 }
515
Alexei Starovoitove430f342014-06-06 14:46:06 -0700516 if (is_imm8(imm32))
517 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
Daniel Borkmannde0a4442018-01-20 01:24:35 +0100518 else if (is_axreg(dst_reg))
519 EMIT1_off32(b2, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700520 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700521 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700522 break;
523
524 case BPF_ALU64 | BPF_MOV | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700525 case BPF_ALU | BPF_MOV | BPF_K:
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100526 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
527 dst_reg, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700528 break;
529
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700530 case BPF_LD | BPF_IMM | BPF_DW:
Daniel Borkmann6fe8b9c2018-02-24 01:07:59 +0100531 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700532 insn++;
533 i++;
534 break;
535
Alexei Starovoitove430f342014-06-06 14:46:06 -0700536 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700537 case BPF_ALU | BPF_MOD | BPF_X:
538 case BPF_ALU | BPF_DIV | BPF_X:
539 case BPF_ALU | BPF_MOD | BPF_K:
540 case BPF_ALU | BPF_DIV | BPF_K:
541 case BPF_ALU64 | BPF_MOD | BPF_X:
542 case BPF_ALU64 | BPF_DIV | BPF_X:
543 case BPF_ALU64 | BPF_MOD | BPF_K:
544 case BPF_ALU64 | BPF_DIV | BPF_K:
545 EMIT1(0x50); /* push rax */
546 EMIT1(0x52); /* push rdx */
547
548 if (BPF_SRC(insn->code) == BPF_X)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700549 /* mov r11, src_reg */
550 EMIT_mov(AUX_REG, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700551 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700552 /* mov r11, imm32 */
553 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700554
Alexei Starovoitove430f342014-06-06 14:46:06 -0700555 /* mov rax, dst_reg */
556 EMIT_mov(BPF_REG_0, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700557
Ingo Molnara2c7a982018-04-27 11:54:40 +0200558 /*
559 * xor edx, edx
Alexei Starovoitov62258272014-05-13 19:50:46 -0700560 * equivalent to 'xor rdx, rdx', but one byte less
561 */
562 EMIT2(0x31, 0xd2);
563
Alexei Starovoitov62258272014-05-13 19:50:46 -0700564 if (BPF_CLASS(insn->code) == BPF_ALU64)
565 /* div r11 */
566 EMIT3(0x49, 0xF7, 0xF3);
567 else
568 /* div r11d */
569 EMIT3(0x41, 0xF7, 0xF3);
570
571 if (BPF_OP(insn->code) == BPF_MOD)
572 /* mov r11, rdx */
573 EMIT3(0x49, 0x89, 0xD3);
574 else
575 /* mov r11, rax */
576 EMIT3(0x49, 0x89, 0xC3);
577
578 EMIT1(0x5A); /* pop rdx */
579 EMIT1(0x58); /* pop rax */
580
Alexei Starovoitove430f342014-06-06 14:46:06 -0700581 /* mov dst_reg, r11 */
582 EMIT_mov(dst_reg, AUX_REG);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700583 break;
584
585 case BPF_ALU | BPF_MUL | BPF_K:
586 case BPF_ALU | BPF_MUL | BPF_X:
587 case BPF_ALU64 | BPF_MUL | BPF_K:
588 case BPF_ALU64 | BPF_MUL | BPF_X:
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100589 {
590 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
591
Daniel Borkmannd806a0c2018-02-24 01:08:00 +0100592 if (dst_reg != BPF_REG_0)
593 EMIT1(0x50); /* push rax */
594 if (dst_reg != BPF_REG_3)
595 EMIT1(0x52); /* push rdx */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700596
Alexei Starovoitove430f342014-06-06 14:46:06 -0700597 /* mov r11, dst_reg */
598 EMIT_mov(AUX_REG, dst_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700599
600 if (BPF_SRC(insn->code) == BPF_X)
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100601 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700602 else
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100603 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700604
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100605 if (is64)
Alexei Starovoitov62258272014-05-13 19:50:46 -0700606 EMIT1(add_1mod(0x48, AUX_REG));
607 else if (is_ereg(AUX_REG))
608 EMIT1(add_1mod(0x40, AUX_REG));
609 /* mul(q) r11 */
610 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
611
Daniel Borkmannd806a0c2018-02-24 01:08:00 +0100612 if (dst_reg != BPF_REG_3)
613 EMIT1(0x5A); /* pop rdx */
614 if (dst_reg != BPF_REG_0) {
615 /* mov dst_reg, rax */
616 EMIT_mov(dst_reg, BPF_REG_0);
617 EMIT1(0x58); /* pop rax */
618 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700619 break;
Daniel Borkmann4c38e2f2018-02-24 01:08:01 +0100620 }
Ingo Molnara2c7a982018-04-27 11:54:40 +0200621 /* Shifts */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700622 case BPF_ALU | BPF_LSH | BPF_K:
623 case BPF_ALU | BPF_RSH | BPF_K:
624 case BPF_ALU | BPF_ARSH | BPF_K:
625 case BPF_ALU64 | BPF_LSH | BPF_K:
626 case BPF_ALU64 | BPF_RSH | BPF_K:
627 case BPF_ALU64 | BPF_ARSH | BPF_K:
628 if (BPF_CLASS(insn->code) == BPF_ALU64)
Alexei Starovoitove430f342014-06-06 14:46:06 -0700629 EMIT1(add_1mod(0x48, dst_reg));
630 else if (is_ereg(dst_reg))
631 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700632
633 switch (BPF_OP(insn->code)) {
634 case BPF_LSH: b3 = 0xE0; break;
635 case BPF_RSH: b3 = 0xE8; break;
636 case BPF_ARSH: b3 = 0xF8; break;
637 }
Daniel Borkmann88e69a12018-02-24 01:07:58 +0100638
639 if (imm32 == 1)
640 EMIT2(0xD1, add_1reg(b3, dst_reg));
641 else
642 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700643 break;
644
Alexei Starovoitov72b603e2014-08-25 12:27:02 -0700645 case BPF_ALU | BPF_LSH | BPF_X:
646 case BPF_ALU | BPF_RSH | BPF_X:
647 case BPF_ALU | BPF_ARSH | BPF_X:
648 case BPF_ALU64 | BPF_LSH | BPF_X:
649 case BPF_ALU64 | BPF_RSH | BPF_X:
650 case BPF_ALU64 | BPF_ARSH | BPF_X:
651
Ingo Molnara2c7a982018-04-27 11:54:40 +0200652 /* Check for bad case when dst_reg == rcx */
Alexei Starovoitov72b603e2014-08-25 12:27:02 -0700653 if (dst_reg == BPF_REG_4) {
654 /* mov r11, dst_reg */
655 EMIT_mov(AUX_REG, dst_reg);
656 dst_reg = AUX_REG;
657 }
658
659 if (src_reg != BPF_REG_4) { /* common case */
660 EMIT1(0x51); /* push rcx */
661
662 /* mov rcx, src_reg */
663 EMIT_mov(BPF_REG_4, src_reg);
664 }
665
666 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
667 if (BPF_CLASS(insn->code) == BPF_ALU64)
668 EMIT1(add_1mod(0x48, dst_reg));
669 else if (is_ereg(dst_reg))
670 EMIT1(add_1mod(0x40, dst_reg));
671
672 switch (BPF_OP(insn->code)) {
673 case BPF_LSH: b3 = 0xE0; break;
674 case BPF_RSH: b3 = 0xE8; break;
675 case BPF_ARSH: b3 = 0xF8; break;
676 }
677 EMIT2(0xD3, add_1reg(b3, dst_reg));
678
679 if (src_reg != BPF_REG_4)
680 EMIT1(0x59); /* pop rcx */
681
682 if (insn->dst_reg == BPF_REG_4)
683 /* mov dst_reg, r11 */
684 EMIT_mov(insn->dst_reg, AUX_REG);
685 break;
686
Alexei Starovoitov62258272014-05-13 19:50:46 -0700687 case BPF_ALU | BPF_END | BPF_FROM_BE:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700688 switch (imm32) {
Alexei Starovoitov62258272014-05-13 19:50:46 -0700689 case 16:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200690 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700691 EMIT1(0x66);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700692 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700693 EMIT1(0x41);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700694 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700695
Ingo Molnara2c7a982018-04-27 11:54:40 +0200696 /* Emit 'movzwl eax, ax' */
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700697 if (is_ereg(dst_reg))
698 EMIT3(0x45, 0x0F, 0xB7);
699 else
700 EMIT2(0x0F, 0xB7);
701 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000702 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700703 case 32:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200704 /* Emit 'bswap eax' to swap lower 4 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700705 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700706 EMIT2(0x41, 0x0F);
707 else
708 EMIT1(0x0F);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700709 EMIT1(add_1reg(0xC8, dst_reg));
Eric Dumazet0a148422011-04-20 09:27:32 +0000710 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700711 case 64:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200712 /* Emit 'bswap rax' to swap 8 bytes */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700713 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
714 add_1reg(0xC8, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700715 break;
716 }
717 break;
718
719 case BPF_ALU | BPF_END | BPF_FROM_LE:
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700720 switch (imm32) {
721 case 16:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200722 /*
723 * Emit 'movzwl eax, ax' to zero extend 16-bit
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700724 * into 64 bit
725 */
726 if (is_ereg(dst_reg))
727 EMIT3(0x45, 0x0F, 0xB7);
728 else
729 EMIT2(0x0F, 0xB7);
730 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
731 break;
732 case 32:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200733 /* Emit 'mov eax, eax' to clear upper 32-bits */
Alexei Starovoitov343f8452015-05-11 23:25:16 -0700734 if (is_ereg(dst_reg))
735 EMIT1(0x45);
736 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
737 break;
738 case 64:
739 /* nop */
740 break;
741 }
Alexei Starovoitov62258272014-05-13 19:50:46 -0700742 break;
743
Alexei Starovoitove430f342014-06-06 14:46:06 -0700744 /* ST: *(u8*)(dst_reg + off) = imm */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700745 case BPF_ST | BPF_MEM | BPF_B:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700746 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700747 EMIT2(0x41, 0xC6);
748 else
749 EMIT1(0xC6);
750 goto st;
751 case BPF_ST | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700752 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700753 EMIT3(0x66, 0x41, 0xC7);
754 else
755 EMIT2(0x66, 0xC7);
756 goto st;
757 case BPF_ST | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700758 if (is_ereg(dst_reg))
Alexei Starovoitov62258272014-05-13 19:50:46 -0700759 EMIT2(0x41, 0xC7);
760 else
761 EMIT1(0xC7);
762 goto st;
763 case BPF_ST | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700764 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700765
766st: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700767 EMIT2(add_1reg(0x40, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700768 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700769 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700770
Alexei Starovoitove430f342014-06-06 14:46:06 -0700771 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700772 break;
773
Alexei Starovoitove430f342014-06-06 14:46:06 -0700774 /* STX: *(u8*)(dst_reg + off) = src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700775 case BPF_STX | BPF_MEM | BPF_B:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200776 /* Emit 'mov byte ptr [rax + off], al' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700777 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
Ingo Molnara2c7a982018-04-27 11:54:40 +0200778 /* We have to add extra byte for x86 SIL, DIL regs */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700779 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
780 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700781 else
782 EMIT1(0x88);
783 goto stx;
784 case BPF_STX | BPF_MEM | BPF_H:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700785 if (is_ereg(dst_reg) || is_ereg(src_reg))
786 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700787 else
788 EMIT2(0x66, 0x89);
789 goto stx;
790 case BPF_STX | BPF_MEM | BPF_W:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700791 if (is_ereg(dst_reg) || is_ereg(src_reg))
792 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700793 else
794 EMIT1(0x89);
795 goto stx;
796 case BPF_STX | BPF_MEM | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700797 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700798stx: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700799 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700800 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700801 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700802 insn->off);
803 break;
804
Alexei Starovoitove430f342014-06-06 14:46:06 -0700805 /* LDX: dst_reg = *(u8*)(src_reg + off) */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700806 case BPF_LDX | BPF_MEM | BPF_B:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200807 /* Emit 'movzx rax, byte ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700808 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700809 goto ldx;
810 case BPF_LDX | BPF_MEM | BPF_H:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200811 /* Emit 'movzx rax, word ptr [rax + off]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700812 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700813 goto ldx;
814 case BPF_LDX | BPF_MEM | BPF_W:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200815 /* Emit 'mov eax, dword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700816 if (is_ereg(dst_reg) || is_ereg(src_reg))
817 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700818 else
819 EMIT1(0x8B);
820 goto ldx;
821 case BPF_LDX | BPF_MEM | BPF_DW:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200822 /* Emit 'mov rax, qword ptr [rax+0x14]' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700823 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
Ingo Molnara2c7a982018-04-27 11:54:40 +0200824ldx: /*
825 * If insn->off == 0 we can save one extra byte, but
826 * special case of x86 R13 which always needs an offset
Alexei Starovoitov62258272014-05-13 19:50:46 -0700827 * is not worth the hassle
828 */
829 if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700830 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700831 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700832 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700833 insn->off);
834 break;
835
Alexei Starovoitove430f342014-06-06 14:46:06 -0700836 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700837 case BPF_STX | BPF_XADD | BPF_W:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200838 /* Emit 'lock add dword ptr [rax + off], eax' */
Alexei Starovoitove430f342014-06-06 14:46:06 -0700839 if (is_ereg(dst_reg) || is_ereg(src_reg))
840 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700841 else
842 EMIT2(0xF0, 0x01);
843 goto xadd;
844 case BPF_STX | BPF_XADD | BPF_DW:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700845 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700846xadd: if (is_imm8(insn->off))
Alexei Starovoitove430f342014-06-06 14:46:06 -0700847 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700848 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700849 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
Alexei Starovoitov62258272014-05-13 19:50:46 -0700850 insn->off);
851 break;
852
853 /* call */
854 case BPF_JMP | BPF_CALL:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700855 func = (u8 *) __bpf_call_base + imm32;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700856 jmp_offset = func - (image + addrs[i]);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700857 if (!imm32 || !is_simm32(jmp_offset)) {
Ingo Molnara2c7a982018-04-27 11:54:40 +0200858 pr_err("unsupported BPF func %d addr %p image %p\n",
Alexei Starovoitove430f342014-06-06 14:46:06 -0700859 imm32, func, image);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700860 return -EINVAL;
861 }
862 EMIT1_off32(0xE8, jmp_offset);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700863 break;
864
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700865 case BPF_JMP | BPF_TAIL_CALL:
Alexei Starovoitovb52f00e2015-05-19 16:59:04 -0700866 emit_bpf_tail_call(&prog);
867 break;
868
Alexei Starovoitov62258272014-05-13 19:50:46 -0700869 /* cond jump */
870 case BPF_JMP | BPF_JEQ | BPF_X:
871 case BPF_JMP | BPF_JNE | BPF_X:
872 case BPF_JMP | BPF_JGT | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200873 case BPF_JMP | BPF_JLT | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700874 case BPF_JMP | BPF_JGE | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200875 case BPF_JMP | BPF_JLE | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700876 case BPF_JMP | BPF_JSGT | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200877 case BPF_JMP | BPF_JSLT | BPF_X:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700878 case BPF_JMP | BPF_JSGE | BPF_X:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200879 case BPF_JMP | BPF_JSLE | BPF_X:
Jiong Wang3f5d6522019-01-26 12:26:06 -0500880 case BPF_JMP32 | BPF_JEQ | BPF_X:
881 case BPF_JMP32 | BPF_JNE | BPF_X:
882 case BPF_JMP32 | BPF_JGT | BPF_X:
883 case BPF_JMP32 | BPF_JLT | BPF_X:
884 case BPF_JMP32 | BPF_JGE | BPF_X:
885 case BPF_JMP32 | BPF_JLE | BPF_X:
886 case BPF_JMP32 | BPF_JSGT | BPF_X:
887 case BPF_JMP32 | BPF_JSLT | BPF_X:
888 case BPF_JMP32 | BPF_JSGE | BPF_X:
889 case BPF_JMP32 | BPF_JSLE | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700890 /* cmp dst_reg, src_reg */
Jiong Wang3f5d6522019-01-26 12:26:06 -0500891 if (BPF_CLASS(insn->code) == BPF_JMP)
892 EMIT1(add_2mod(0x48, dst_reg, src_reg));
893 else if (is_ereg(dst_reg) || is_ereg(src_reg))
894 EMIT1(add_2mod(0x40, dst_reg, src_reg));
895 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700896 goto emit_cond_jmp;
897
898 case BPF_JMP | BPF_JSET | BPF_X:
Jiong Wang3f5d6522019-01-26 12:26:06 -0500899 case BPF_JMP32 | BPF_JSET | BPF_X:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700900 /* test dst_reg, src_reg */
Jiong Wang3f5d6522019-01-26 12:26:06 -0500901 if (BPF_CLASS(insn->code) == BPF_JMP)
902 EMIT1(add_2mod(0x48, dst_reg, src_reg));
903 else if (is_ereg(dst_reg) || is_ereg(src_reg))
904 EMIT1(add_2mod(0x40, dst_reg, src_reg));
905 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700906 goto emit_cond_jmp;
907
908 case BPF_JMP | BPF_JSET | BPF_K:
Jiong Wang3f5d6522019-01-26 12:26:06 -0500909 case BPF_JMP32 | BPF_JSET | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700910 /* test dst_reg, imm32 */
Jiong Wang3f5d6522019-01-26 12:26:06 -0500911 if (BPF_CLASS(insn->code) == BPF_JMP)
912 EMIT1(add_1mod(0x48, dst_reg));
913 else if (is_ereg(dst_reg))
914 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitove430f342014-06-06 14:46:06 -0700915 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700916 goto emit_cond_jmp;
917
918 case BPF_JMP | BPF_JEQ | BPF_K:
919 case BPF_JMP | BPF_JNE | BPF_K:
920 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200921 case BPF_JMP | BPF_JLT | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700922 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200923 case BPF_JMP | BPF_JLE | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700924 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200925 case BPF_JMP | BPF_JSLT | BPF_K:
Alexei Starovoitov62258272014-05-13 19:50:46 -0700926 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann52afc512017-08-10 01:39:56 +0200927 case BPF_JMP | BPF_JSLE | BPF_K:
Jiong Wang3f5d6522019-01-26 12:26:06 -0500928 case BPF_JMP32 | BPF_JEQ | BPF_K:
929 case BPF_JMP32 | BPF_JNE | BPF_K:
930 case BPF_JMP32 | BPF_JGT | BPF_K:
931 case BPF_JMP32 | BPF_JLT | BPF_K:
932 case BPF_JMP32 | BPF_JGE | BPF_K:
933 case BPF_JMP32 | BPF_JLE | BPF_K:
934 case BPF_JMP32 | BPF_JSGT | BPF_K:
935 case BPF_JMP32 | BPF_JSLT | BPF_K:
936 case BPF_JMP32 | BPF_JSGE | BPF_K:
937 case BPF_JMP32 | BPF_JSLE | BPF_K:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700938 /* cmp dst_reg, imm8/32 */
Jiong Wang3f5d6522019-01-26 12:26:06 -0500939 if (BPF_CLASS(insn->code) == BPF_JMP)
940 EMIT1(add_1mod(0x48, dst_reg));
941 else if (is_ereg(dst_reg))
942 EMIT1(add_1mod(0x40, dst_reg));
Alexei Starovoitov62258272014-05-13 19:50:46 -0700943
Alexei Starovoitove430f342014-06-06 14:46:06 -0700944 if (is_imm8(imm32))
945 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700946 else
Alexei Starovoitove430f342014-06-06 14:46:06 -0700947 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
Alexei Starovoitov62258272014-05-13 19:50:46 -0700948
Ingo Molnara2c7a982018-04-27 11:54:40 +0200949emit_cond_jmp: /* Convert BPF opcode to x86 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700950 switch (BPF_OP(insn->code)) {
951 case BPF_JEQ:
952 jmp_cond = X86_JE;
953 break;
954 case BPF_JSET:
955 case BPF_JNE:
956 jmp_cond = X86_JNE;
957 break;
958 case BPF_JGT:
959 /* GT is unsigned '>', JA in x86 */
960 jmp_cond = X86_JA;
961 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +0200962 case BPF_JLT:
963 /* LT is unsigned '<', JB in x86 */
964 jmp_cond = X86_JB;
965 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700966 case BPF_JGE:
967 /* GE is unsigned '>=', JAE in x86 */
968 jmp_cond = X86_JAE;
969 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +0200970 case BPF_JLE:
971 /* LE is unsigned '<=', JBE in x86 */
972 jmp_cond = X86_JBE;
973 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700974 case BPF_JSGT:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200975 /* Signed '>', GT in x86 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700976 jmp_cond = X86_JG;
977 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +0200978 case BPF_JSLT:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200979 /* Signed '<', LT in x86 */
Daniel Borkmann52afc512017-08-10 01:39:56 +0200980 jmp_cond = X86_JL;
981 break;
Alexei Starovoitov62258272014-05-13 19:50:46 -0700982 case BPF_JSGE:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200983 /* Signed '>=', GE in x86 */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700984 jmp_cond = X86_JGE;
985 break;
Daniel Borkmann52afc512017-08-10 01:39:56 +0200986 case BPF_JSLE:
Ingo Molnara2c7a982018-04-27 11:54:40 +0200987 /* Signed '<=', LE in x86 */
Daniel Borkmann52afc512017-08-10 01:39:56 +0200988 jmp_cond = X86_JLE;
989 break;
Ingo Molnara2c7a982018-04-27 11:54:40 +0200990 default: /* to silence GCC warning */
Alexei Starovoitov62258272014-05-13 19:50:46 -0700991 return -EFAULT;
992 }
993 jmp_offset = addrs[i + insn->off] - addrs[i];
994 if (is_imm8(jmp_offset)) {
995 EMIT2(jmp_cond, jmp_offset);
996 } else if (is_simm32(jmp_offset)) {
997 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
998 } else {
999 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1000 return -EFAULT;
1001 }
1002
1003 break;
1004
1005 case BPF_JMP | BPF_JA:
Gianluca Borello1612a982018-04-25 05:42:16 +00001006 if (insn->off == -1)
1007 /* -1 jmp instructions will always jump
1008 * backwards two bytes. Explicitly handling
1009 * this case avoids wasting too many passes
1010 * when there are long sequences of replaced
1011 * dead code.
1012 */
1013 jmp_offset = -2;
1014 else
1015 jmp_offset = addrs[i + insn->off] - addrs[i];
1016
Alexei Starovoitov62258272014-05-13 19:50:46 -07001017 if (!jmp_offset)
Ingo Molnara2c7a982018-04-27 11:54:40 +02001018 /* Optimize out nop jumps */
Alexei Starovoitov62258272014-05-13 19:50:46 -07001019 break;
1020emit_jmp:
1021 if (is_imm8(jmp_offset)) {
1022 EMIT2(0xEB, jmp_offset);
1023 } else if (is_simm32(jmp_offset)) {
1024 EMIT1_off32(0xE9, jmp_offset);
1025 } else {
1026 pr_err("jmp gen bug %llx\n", jmp_offset);
1027 return -EFAULT;
1028 }
1029 break;
1030
Alexei Starovoitov62258272014-05-13 19:50:46 -07001031 case BPF_JMP | BPF_EXIT:
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001032 if (seen_exit) {
Alexei Starovoitov62258272014-05-13 19:50:46 -07001033 jmp_offset = ctx->cleanup_addr - addrs[i];
1034 goto emit_jmp;
1035 }
Alexei Starovoitov769e0de2014-11-29 14:46:13 -08001036 seen_exit = true;
Ingo Molnara2c7a982018-04-27 11:54:40 +02001037 /* Update cleanup_addr */
Alexei Starovoitov62258272014-05-13 19:50:46 -07001038 ctx->cleanup_addr = proglen;
Alexei Starovoitov177366b2017-05-30 13:31:34 -07001039 /* mov rbx, qword ptr [rbp+0] */
1040 EMIT4(0x48, 0x8B, 0x5D, 0);
1041 /* mov r13, qword ptr [rbp+8] */
1042 EMIT4(0x4C, 0x8B, 0x6D, 8);
1043 /* mov r14, qword ptr [rbp+16] */
1044 EMIT4(0x4C, 0x8B, 0x75, 16);
1045 /* mov r15, qword ptr [rbp+24] */
1046 EMIT4(0x4C, 0x8B, 0x7D, 24);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001047
Alexei Starovoitov177366b2017-05-30 13:31:34 -07001048 /* add rbp, AUX_STACK_SPACE */
1049 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
Alexei Starovoitov62258272014-05-13 19:50:46 -07001050 EMIT1(0xC9); /* leave */
1051 EMIT1(0xC3); /* ret */
1052 break;
1053
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001054 default:
Ingo Molnara2c7a982018-04-27 11:54:40 +02001055 /*
1056 * By design x86-64 JIT should support all BPF instructions.
Alexei Starovoitov62258272014-05-13 19:50:46 -07001057 * This error will be seen if new instruction was added
Ingo Molnara2c7a982018-04-27 11:54:40 +02001058 * to the interpreter, but not to the JIT, or if there is
1059 * junk in bpf_prog.
Alexei Starovoitov62258272014-05-13 19:50:46 -07001060 */
1061 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001062 return -EINVAL;
Eric Dumazet0a148422011-04-20 09:27:32 +00001063 }
Alexei Starovoitov62258272014-05-13 19:50:46 -07001064
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001065 ilen = prog - temp;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001066 if (ilen > BPF_MAX_INSN_SIZE) {
Daniel Borkmann93831912017-02-16 22:24:49 +01001067 pr_err("bpf_jit: fatal insn size error\n");
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001068 return -EFAULT;
1069 }
1070
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001071 if (image) {
1072 if (unlikely(proglen + ilen > oldproglen)) {
Daniel Borkmann93831912017-02-16 22:24:49 +01001073 pr_err("bpf_jit: fatal error\n");
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001074 return -EFAULT;
1075 }
1076 memcpy(image + proglen, temp, ilen);
1077 }
1078 proglen += ilen;
1079 addrs[i] = proglen;
1080 prog = temp;
1081 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001082 return proglen;
1083}
1084
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001085struct x64_jit_data {
1086 struct bpf_binary_header *header;
1087 int *addrs;
1088 u8 *image;
1089 int proglen;
1090 struct jit_context ctx;
1091};
1092
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001093struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
Alexei Starovoitov62258272014-05-13 19:50:46 -07001094{
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001095 struct bpf_binary_header *header = NULL;
Daniel Borkmann959a7572016-05-13 19:08:33 +02001096 struct bpf_prog *tmp, *orig_prog = prog;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001097 struct x64_jit_data *jit_data;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001098 int proglen, oldproglen = 0;
1099 struct jit_context ctx = {};
Daniel Borkmann959a7572016-05-13 19:08:33 +02001100 bool tmp_blinded = false;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001101 bool extra_pass = false;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001102 u8 *image = NULL;
1103 int *addrs;
1104 int pass;
1105 int i;
1106
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -08001107 if (!prog->jit_requested)
Daniel Borkmann959a7572016-05-13 19:08:33 +02001108 return orig_prog;
1109
1110 tmp = bpf_jit_blind_constants(prog);
Ingo Molnara2c7a982018-04-27 11:54:40 +02001111 /*
1112 * If blinding was requested and we failed during blinding,
Daniel Borkmann959a7572016-05-13 19:08:33 +02001113 * we must fall back to the interpreter.
1114 */
1115 if (IS_ERR(tmp))
1116 return orig_prog;
1117 if (tmp != prog) {
1118 tmp_blinded = true;
1119 prog = tmp;
1120 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001121
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001122 jit_data = prog->aux->jit_data;
1123 if (!jit_data) {
1124 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1125 if (!jit_data) {
1126 prog = orig_prog;
1127 goto out;
1128 }
1129 prog->aux->jit_data = jit_data;
1130 }
1131 addrs = jit_data->addrs;
1132 if (addrs) {
1133 ctx = jit_data->ctx;
1134 oldproglen = jit_data->proglen;
1135 image = jit_data->image;
1136 header = jit_data->header;
1137 extra_pass = true;
1138 goto skip_init_addrs;
1139 }
Kees Cook6da2ec52018-06-12 13:55:00 -07001140 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001141 if (!addrs) {
1142 prog = orig_prog;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001143 goto out_addrs;
Daniel Borkmann959a7572016-05-13 19:08:33 +02001144 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001145
Ingo Molnara2c7a982018-04-27 11:54:40 +02001146 /*
1147 * Before first pass, make a rough estimation of addrs[]
1148 * each BPF instruction is translated to less than 64 bytes
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001149 */
1150 for (proglen = 0, i = 0; i < prog->len; i++) {
1151 proglen += 64;
1152 addrs[i] = proglen;
1153 }
1154 ctx.cleanup_addr = proglen;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001155skip_init_addrs:
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001156
Ingo Molnara2c7a982018-04-27 11:54:40 +02001157 /*
1158 * JITed image shrinks with every pass and the loop iterates
1159 * until the image stops shrinking. Very large BPF programs
Alexei Starovoitov3f7352b2015-05-22 15:42:55 -07001160 * may converge on the last pass. In such case do one more
Ingo Molnara2c7a982018-04-27 11:54:40 +02001161 * pass to emit the final image.
Alexei Starovoitov3f7352b2015-05-22 15:42:55 -07001162 */
Daniel Borkmann6007b082018-03-07 22:10:01 +01001163 for (pass = 0; pass < 20 || image; pass++) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001164 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1165 if (proglen <= 0) {
Daniel Borkmann3aab8882018-05-02 20:12:22 +02001166out_image:
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001167 image = NULL;
1168 if (header)
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001169 bpf_jit_binary_free(header);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001170 prog = orig_prog;
1171 goto out_addrs;
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001172 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001173 if (image) {
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001174 if (proglen != oldproglen) {
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001175 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1176 proglen, oldproglen);
Daniel Borkmann3aab8882018-05-02 20:12:22 +02001177 goto out_image;
Alexei Starovoitove0ee9c12014-10-10 20:30:23 -07001178 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001179 break;
1180 }
1181 if (proglen == oldproglen) {
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001182 header = bpf_jit_binary_alloc(proglen, &image,
1183 1, jit_fill_hole);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001184 if (!header) {
1185 prog = orig_prog;
1186 goto out_addrs;
1187 }
Eric Dumazet0a148422011-04-20 09:27:32 +00001188 }
1189 oldproglen = proglen;
Daniel Borkmann6007b082018-03-07 22:10:01 +01001190 cond_resched();
Eric Dumazet0a148422011-04-20 09:27:32 +00001191 }
Daniel Borkmann79617802013-03-21 22:22:03 +01001192
Eric Dumazet0a148422011-04-20 09:27:32 +00001193 if (bpf_jit_enable > 1)
Daniel Borkmann485d6512015-07-30 12:42:48 +02001194 bpf_jit_dump(prog->len, proglen, pass + 1, image);
Eric Dumazet0a148422011-04-20 09:27:32 +00001195
1196 if (image) {
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001197 if (!prog->is_func || extra_pass) {
1198 bpf_jit_binary_lock_ro(header);
1199 } else {
1200 jit_data->addrs = addrs;
1201 jit_data->ctx = ctx;
1202 jit_data->proglen = proglen;
1203 jit_data->image = image;
1204 jit_data->header = header;
1205 }
Alexei Starovoitovf3c2af72014-05-13 19:50:45 -07001206 prog->bpf_func = (void *)image;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001207 prog->jited = 1;
Martin KaFai Lau783d28dd12017-06-05 12:15:51 -07001208 prog->jited_len = proglen;
Daniel Borkmann9d5ecb02017-01-07 00:26:33 +01001209 } else {
1210 prog = orig_prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001211 }
Daniel Borkmann959a7572016-05-13 19:08:33 +02001212
Daniel Borkmann39f56ca2018-05-02 20:12:23 +02001213 if (!image || !prog->is_func || extra_pass) {
Martin KaFai Lauc454a462018-12-07 16:42:25 -08001214 if (image)
1215 bpf_prog_fill_jited_linfo(prog, addrs);
Daniel Borkmann959a7572016-05-13 19:08:33 +02001216out_addrs:
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001217 kfree(addrs);
1218 kfree(jit_data);
1219 prog->aux->jit_data = NULL;
1220 }
Daniel Borkmann959a7572016-05-13 19:08:33 +02001221out:
1222 if (tmp_blinded)
1223 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1224 tmp : orig_prog);
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001225 return prog;
Eric Dumazet0a148422011-04-20 09:27:32 +00001226}