He Kuang | bfc077b | 2015-11-16 12:10:12 +0000 | [diff] [blame] | 1 | /* |
| 2 | * bpf-prologue.c |
| 3 | * |
| 4 | * Copyright (C) 2015 He Kuang <hekuang@huawei.com> |
| 5 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
| 6 | * Copyright (C) 2015 Huawei Inc. |
| 7 | */ |
| 8 | |
| 9 | #include <bpf/libbpf.h> |
| 10 | #include "perf.h" |
| 11 | #include "debug.h" |
| 12 | #include "bpf-loader.h" |
| 13 | #include "bpf-prologue.h" |
| 14 | #include "probe-finder.h" |
Arnaldo Carvalho de Melo | a43783a | 2017-04-18 10:46:11 -0300 | [diff] [blame] | 15 | #include <errno.h> |
He Kuang | bfc077b | 2015-11-16 12:10:12 +0000 | [diff] [blame] | 16 | #include <dwarf-regs.h> |
| 17 | #include <linux/filter.h> |
| 18 | |
| 19 | #define BPF_REG_SIZE 8 |
| 20 | |
| 21 | #define JMP_TO_ERROR_CODE -1 |
| 22 | #define JMP_TO_SUCCESS_CODE -2 |
| 23 | #define JMP_TO_USER_CODE -3 |
| 24 | |
| 25 | struct bpf_insn_pos { |
| 26 | struct bpf_insn *begin; |
| 27 | struct bpf_insn *end; |
| 28 | struct bpf_insn *pos; |
| 29 | }; |
| 30 | |
| 31 | static inline int |
| 32 | pos_get_cnt(struct bpf_insn_pos *pos) |
| 33 | { |
| 34 | return pos->pos - pos->begin; |
| 35 | } |
| 36 | |
| 37 | static int |
| 38 | append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos) |
| 39 | { |
| 40 | if (!pos->pos) |
| 41 | return -BPF_LOADER_ERRNO__PROLOGUE2BIG; |
| 42 | |
| 43 | if (pos->pos + 1 >= pos->end) { |
| 44 | pr_err("bpf prologue: prologue too long\n"); |
| 45 | pos->pos = NULL; |
| 46 | return -BPF_LOADER_ERRNO__PROLOGUE2BIG; |
| 47 | } |
| 48 | |
| 49 | *(pos->pos)++ = new_insn; |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | static int |
| 54 | check_pos(struct bpf_insn_pos *pos) |
| 55 | { |
| 56 | if (!pos->pos || pos->pos >= pos->end) |
| 57 | return -BPF_LOADER_ERRNO__PROLOGUE2BIG; |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | /* Give it a shorter name */ |
| 62 | #define ins(i, p) append_insn((i), (p)) |
| 63 | |
| 64 | /* |
| 65 | * Give a register name (in 'reg'), generate instruction to |
| 66 | * load register into an eBPF register rd: |
| 67 | * 'ldd target_reg, offset(ctx_reg)', where: |
| 68 | * ctx_reg is pre initialized to pointer of 'struct pt_regs'. |
| 69 | */ |
| 70 | static int |
| 71 | gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg, |
| 72 | const char *reg, int target_reg) |
| 73 | { |
| 74 | int offset = regs_query_register_offset(reg); |
| 75 | |
| 76 | if (offset < 0) { |
| 77 | pr_err("bpf: prologue: failed to get register %s\n", |
| 78 | reg); |
| 79 | return offset; |
| 80 | } |
| 81 | ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos); |
| 82 | |
| 83 | return check_pos(pos); |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Generate a BPF_FUNC_probe_read function call. |
| 88 | * |
| 89 | * src_base_addr_reg is a register holding base address, |
| 90 | * dst_addr_reg is a register holding dest address (on stack), |
| 91 | * result is: |
| 92 | * |
| 93 | * *[dst_addr_reg] = *([src_base_addr_reg] + offset) |
| 94 | * |
| 95 | * Arguments of BPF_FUNC_probe_read: |
| 96 | * ARG1: ptr to stack (dest) |
| 97 | * ARG2: size (8) |
| 98 | * ARG3: unsafe ptr (src) |
| 99 | */ |
| 100 | static int |
| 101 | gen_read_mem(struct bpf_insn_pos *pos, |
| 102 | int src_base_addr_reg, |
| 103 | int dst_addr_reg, |
| 104 | long offset) |
| 105 | { |
| 106 | /* mov arg3, src_base_addr_reg */ |
| 107 | if (src_base_addr_reg != BPF_REG_ARG3) |
| 108 | ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos); |
| 109 | /* add arg3, #offset */ |
| 110 | if (offset) |
| 111 | ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos); |
| 112 | |
| 113 | /* mov arg2, #reg_size */ |
| 114 | ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos); |
| 115 | |
| 116 | /* mov arg1, dst_addr_reg */ |
| 117 | if (dst_addr_reg != BPF_REG_ARG1) |
| 118 | ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos); |
| 119 | |
| 120 | /* Call probe_read */ |
| 121 | ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos); |
| 122 | /* |
| 123 | * Error processing: if read fail, goto error code, |
| 124 | * will be relocated. Target should be the start of |
| 125 | * error processing code. |
| 126 | */ |
| 127 | ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE), |
| 128 | pos); |
| 129 | |
| 130 | return check_pos(pos); |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Each arg should be bare register. Fetch and save them into argument |
| 135 | * registers (r3 - r5). |
| 136 | * |
| 137 | * BPF_REG_1 should have been initialized with pointer to |
| 138 | * 'struct pt_regs'. |
| 139 | */ |
| 140 | static int |
| 141 | gen_prologue_fastpath(struct bpf_insn_pos *pos, |
| 142 | struct probe_trace_arg *args, int nargs) |
| 143 | { |
| 144 | int i, err = 0; |
| 145 | |
| 146 | for (i = 0; i < nargs; i++) { |
| 147 | err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value, |
| 148 | BPF_PROLOGUE_START_ARG_REG + i); |
| 149 | if (err) |
| 150 | goto errout; |
| 151 | } |
| 152 | |
| 153 | return check_pos(pos); |
| 154 | errout: |
| 155 | return err; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Slow path: |
| 160 | * At least one argument has the form of 'offset($rx)'. |
| 161 | * |
| 162 | * Following code first stores them into stack, then loads all of then |
| 163 | * to r2 - r5. |
| 164 | * Before final loading, the final result should be: |
| 165 | * |
| 166 | * low address |
| 167 | * BPF_REG_FP - 24 ARG3 |
| 168 | * BPF_REG_FP - 16 ARG2 |
| 169 | * BPF_REG_FP - 8 ARG1 |
| 170 | * BPF_REG_FP |
| 171 | * high address |
| 172 | * |
| 173 | * For each argument (described as: offn(...off2(off1(reg)))), |
| 174 | * generates following code: |
| 175 | * |
| 176 | * r7 <- fp |
| 177 | * r7 <- r7 - stack_offset // Ideal code should initialize r7 using |
| 178 | * // fp before generating args. However, |
| 179 | * // eBPF won't regard r7 as stack pointer |
| 180 | * // if it is generated by minus 8 from |
| 181 | * // another stack pointer except fp. |
| 182 | * // This is why we have to set r7 |
| 183 | * // to fp for each variable. |
| 184 | * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx() |
| 185 | * (r7) <- r3 // skip following instructions for bare reg |
| 186 | * r3 <- r3 + off1 . // skip if off1 == 0 |
| 187 | * r2 <- 8 \ |
| 188 | * r1 <- r7 |-> generated by gen_read_mem() |
| 189 | * call probe_read / |
| 190 | * jnei r0, 0, err ./ |
| 191 | * r3 <- (r7) |
| 192 | * r3 <- r3 + off2 . // skip if off2 == 0 |
| 193 | * r2 <- 8 \ // r2 may be broken by probe_read, so set again |
| 194 | * r1 <- r7 |-> generated by gen_read_mem() |
| 195 | * call probe_read / |
| 196 | * jnei r0, 0, err ./ |
| 197 | * ... |
| 198 | */ |
| 199 | static int |
| 200 | gen_prologue_slowpath(struct bpf_insn_pos *pos, |
| 201 | struct probe_trace_arg *args, int nargs) |
| 202 | { |
| 203 | int err, i; |
| 204 | |
| 205 | for (i = 0; i < nargs; i++) { |
| 206 | struct probe_trace_arg *arg = &args[i]; |
| 207 | const char *reg = arg->value; |
| 208 | struct probe_trace_arg_ref *ref = NULL; |
| 209 | int stack_offset = (i + 1) * -8; |
| 210 | |
| 211 | pr_debug("prologue: fetch arg %d, base reg is %s\n", |
| 212 | i, reg); |
| 213 | |
| 214 | /* value of base register is stored into ARG3 */ |
| 215 | err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg, |
| 216 | BPF_REG_ARG3); |
| 217 | if (err) { |
| 218 | pr_err("prologue: failed to get offset of register %s\n", |
| 219 | reg); |
| 220 | goto errout; |
| 221 | } |
| 222 | |
| 223 | /* Make r7 the stack pointer. */ |
| 224 | ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos); |
| 225 | /* r7 += -8 */ |
| 226 | ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos); |
| 227 | /* |
| 228 | * Store r3 (base register) onto stack |
| 229 | * Ensure fp[offset] is set. |
| 230 | * fp is the only valid base register when storing |
| 231 | * into stack. We are not allowed to use r7 as base |
| 232 | * register here. |
| 233 | */ |
| 234 | ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3, |
| 235 | stack_offset), pos); |
| 236 | |
| 237 | ref = arg->ref; |
| 238 | while (ref) { |
| 239 | pr_debug("prologue: arg %d: offset %ld\n", |
| 240 | i, ref->offset); |
| 241 | err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7, |
| 242 | ref->offset); |
| 243 | if (err) { |
| 244 | pr_err("prologue: failed to generate probe_read function call\n"); |
| 245 | goto errout; |
| 246 | } |
| 247 | |
| 248 | ref = ref->next; |
| 249 | /* |
| 250 | * Load previous result into ARG3. Use |
| 251 | * BPF_REG_FP instead of r7 because verifier |
| 252 | * allows FP based addressing only. |
| 253 | */ |
| 254 | if (ref) |
| 255 | ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, |
| 256 | BPF_REG_FP, stack_offset), pos); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | /* Final pass: read to registers */ |
| 261 | for (i = 0; i < nargs; i++) |
| 262 | ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i, |
| 263 | BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos); |
| 264 | |
| 265 | ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos); |
| 266 | |
| 267 | return check_pos(pos); |
| 268 | errout: |
| 269 | return err; |
| 270 | } |
| 271 | |
| 272 | static int |
| 273 | prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code, |
| 274 | struct bpf_insn *success_code, struct bpf_insn *user_code) |
| 275 | { |
| 276 | struct bpf_insn *insn; |
| 277 | |
| 278 | if (check_pos(pos)) |
| 279 | return -BPF_LOADER_ERRNO__PROLOGUE2BIG; |
| 280 | |
| 281 | for (insn = pos->begin; insn < pos->pos; insn++) { |
| 282 | struct bpf_insn *target; |
| 283 | u8 class = BPF_CLASS(insn->code); |
| 284 | u8 opcode; |
| 285 | |
| 286 | if (class != BPF_JMP) |
| 287 | continue; |
| 288 | opcode = BPF_OP(insn->code); |
| 289 | if (opcode == BPF_CALL) |
| 290 | continue; |
| 291 | |
| 292 | switch (insn->off) { |
| 293 | case JMP_TO_ERROR_CODE: |
| 294 | target = error_code; |
| 295 | break; |
| 296 | case JMP_TO_SUCCESS_CODE: |
| 297 | target = success_code; |
| 298 | break; |
| 299 | case JMP_TO_USER_CODE: |
| 300 | target = user_code; |
| 301 | break; |
| 302 | default: |
| 303 | pr_err("bpf prologue: internal error: relocation failed\n"); |
| 304 | return -BPF_LOADER_ERRNO__PROLOGUE; |
| 305 | } |
| 306 | |
| 307 | insn->off = target - (insn + 1); |
| 308 | } |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | int bpf__gen_prologue(struct probe_trace_arg *args, int nargs, |
| 313 | struct bpf_insn *new_prog, size_t *new_cnt, |
| 314 | size_t cnt_space) |
| 315 | { |
| 316 | struct bpf_insn *success_code = NULL; |
| 317 | struct bpf_insn *error_code = NULL; |
| 318 | struct bpf_insn *user_code = NULL; |
| 319 | struct bpf_insn_pos pos; |
| 320 | bool fastpath = true; |
| 321 | int err = 0, i; |
| 322 | |
| 323 | if (!new_prog || !new_cnt) |
| 324 | return -EINVAL; |
| 325 | |
| 326 | if (cnt_space > BPF_MAXINSNS) |
| 327 | cnt_space = BPF_MAXINSNS; |
| 328 | |
| 329 | pos.begin = new_prog; |
| 330 | pos.end = new_prog + cnt_space; |
| 331 | pos.pos = new_prog; |
| 332 | |
| 333 | if (!nargs) { |
| 334 | ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), |
| 335 | &pos); |
| 336 | |
| 337 | if (check_pos(&pos)) |
| 338 | goto errout; |
| 339 | |
| 340 | *new_cnt = pos_get_cnt(&pos); |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | if (nargs > BPF_PROLOGUE_MAX_ARGS) { |
| 345 | pr_warning("bpf: prologue: %d arguments are dropped\n", |
| 346 | nargs - BPF_PROLOGUE_MAX_ARGS); |
| 347 | nargs = BPF_PROLOGUE_MAX_ARGS; |
| 348 | } |
| 349 | |
| 350 | /* First pass: validation */ |
| 351 | for (i = 0; i < nargs; i++) { |
| 352 | struct probe_trace_arg_ref *ref = args[i].ref; |
| 353 | |
| 354 | if (args[i].value[0] == '@') { |
| 355 | /* TODO: fetch global variable */ |
| 356 | pr_err("bpf: prologue: global %s%+ld not support\n", |
| 357 | args[i].value, ref ? ref->offset : 0); |
| 358 | return -ENOTSUP; |
| 359 | } |
| 360 | |
| 361 | while (ref) { |
| 362 | /* fastpath is true if all args has ref == NULL */ |
| 363 | fastpath = false; |
| 364 | |
| 365 | /* |
| 366 | * Instruction encodes immediate value using |
| 367 | * s32, ref->offset is long. On systems which |
| 368 | * can't fill long in s32, refuse to process if |
| 369 | * ref->offset too large (or small). |
| 370 | */ |
| 371 | #ifdef __LP64__ |
| 372 | #define OFFSET_MAX ((1LL << 31) - 1) |
| 373 | #define OFFSET_MIN ((1LL << 31) * -1) |
| 374 | if (ref->offset > OFFSET_MAX || |
| 375 | ref->offset < OFFSET_MIN) { |
| 376 | pr_err("bpf: prologue: offset out of bound: %ld\n", |
| 377 | ref->offset); |
| 378 | return -BPF_LOADER_ERRNO__PROLOGUEOOB; |
| 379 | } |
| 380 | #endif |
| 381 | ref = ref->next; |
| 382 | } |
| 383 | } |
| 384 | pr_debug("prologue: pass validation\n"); |
| 385 | |
| 386 | if (fastpath) { |
| 387 | /* If all variables are registers... */ |
| 388 | pr_debug("prologue: fast path\n"); |
| 389 | err = gen_prologue_fastpath(&pos, args, nargs); |
| 390 | if (err) |
| 391 | goto errout; |
| 392 | } else { |
| 393 | pr_debug("prologue: slow path\n"); |
| 394 | |
| 395 | /* Initialization: move ctx to a callee saved register. */ |
| 396 | ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos); |
| 397 | |
| 398 | err = gen_prologue_slowpath(&pos, args, nargs); |
| 399 | if (err) |
| 400 | goto errout; |
| 401 | /* |
| 402 | * start of ERROR_CODE (only slow pass needs error code) |
| 403 | * mov r2 <- 1 // r2 is error number |
| 404 | * mov r3 <- 0 // r3, r4... should be touched or |
| 405 | * // verifier would complain |
| 406 | * mov r4 <- 0 |
| 407 | * ... |
| 408 | * goto usercode |
| 409 | */ |
| 410 | error_code = pos.pos; |
| 411 | ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1), |
| 412 | &pos); |
| 413 | |
| 414 | for (i = 0; i < nargs; i++) |
| 415 | ins(BPF_ALU64_IMM(BPF_MOV, |
| 416 | BPF_PROLOGUE_START_ARG_REG + i, |
| 417 | 0), |
| 418 | &pos); |
| 419 | ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE), |
| 420 | &pos); |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * start of SUCCESS_CODE: |
| 425 | * mov r2 <- 0 |
| 426 | * goto usercode // skip |
| 427 | */ |
| 428 | success_code = pos.pos; |
| 429 | ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos); |
| 430 | |
| 431 | /* |
| 432 | * start of USER_CODE: |
| 433 | * Restore ctx to r1 |
| 434 | */ |
| 435 | user_code = pos.pos; |
| 436 | if (!fastpath) { |
| 437 | /* |
| 438 | * Only slow path needs restoring of ctx. In fast path, |
| 439 | * register are loaded directly from r1. |
| 440 | */ |
| 441 | ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos); |
| 442 | err = prologue_relocate(&pos, error_code, success_code, |
| 443 | user_code); |
| 444 | if (err) |
| 445 | goto errout; |
| 446 | } |
| 447 | |
| 448 | err = check_pos(&pos); |
| 449 | if (err) |
| 450 | goto errout; |
| 451 | |
| 452 | *new_cnt = pos_get_cnt(&pos); |
| 453 | return 0; |
| 454 | errout: |
| 455 | return err; |
| 456 | } |