blob: a1645d7a403371206d61eb2b508d51dfc9c27aef [file] [log] [blame]
Naveen N. Rao156d0e22016-06-22 21:55:07 +05301/*
2 * bpf_jit64.h: BPF JIT compiler for PPC64
3 *
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#ifndef _BPF_JIT64_H
13#define _BPF_JIT64_H
14
15#include "bpf_jit.h"
16
17/*
18 * Stack layout:
Naveen N. Rao7b847f52016-09-24 02:05:00 +053019 * Ensure the top half (upto local_tmp_var) stays consistent
20 * with our redzone usage.
Naveen N. Rao156d0e22016-06-22 21:55:07 +053021 *
22 * [ prev sp ] <-------------
23 * [ nv gpr save area ] 8*8 |
Naveen N. Rao7b847f52016-09-24 02:05:00 +053024 * [ tail_call_cnt ] 8 |
25 * [ local_tmp_var ] 8 |
Naveen N. Rao156d0e22016-06-22 21:55:07 +053026 * fp (r31) --> [ ebpf stack space ] 512 |
Naveen N. Rao156d0e22016-06-22 21:55:07 +053027 * [ frame header ] 32/112 |
28 * sp (r1) ---> [ stack pointer ] --------------
29 */
30
Naveen N. Rao156d0e22016-06-22 21:55:07 +053031/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
32#define BPF_PPC_STACK_SAVE (8*8)
Naveen N. Rao7b847f52016-09-24 02:05:00 +053033/* for bpf JIT code internal usage */
34#define BPF_PPC_STACK_LOCALS 16
Naveen N. Rao156d0e22016-06-22 21:55:07 +053035/* Ensure this is quadword aligned */
Naveen N. Rao7b847f52016-09-24 02:05:00 +053036#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
37 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
Naveen N. Rao156d0e22016-06-22 21:55:07 +053038
39#ifndef __ASSEMBLY__
40
41/* BPF register usage */
42#define SKB_HLEN_REG (MAX_BPF_REG + 0)
43#define SKB_DATA_REG (MAX_BPF_REG + 1)
44#define TMP_REG_1 (MAX_BPF_REG + 2)
45#define TMP_REG_2 (MAX_BPF_REG + 3)
46
47/* BPF to ppc register mappings */
48static const int b2p[] = {
49 /* function return value */
50 [BPF_REG_0] = 8,
51 /* function arguments */
52 [BPF_REG_1] = 3,
53 [BPF_REG_2] = 4,
54 [BPF_REG_3] = 5,
55 [BPF_REG_4] = 6,
56 [BPF_REG_5] = 7,
57 /* non volatile registers */
58 [BPF_REG_6] = 27,
59 [BPF_REG_7] = 28,
60 [BPF_REG_8] = 29,
61 [BPF_REG_9] = 30,
62 /* frame pointer aka BPF_REG_10 */
63 [BPF_REG_FP] = 31,
64 /* eBPF jit internal registers */
65 [SKB_HLEN_REG] = 25,
66 [SKB_DATA_REG] = 26,
67 [TMP_REG_1] = 9,
68 [TMP_REG_2] = 10
69};
70
Naveen N. Rao7b847f52016-09-24 02:05:00 +053071/* PPC NVR range -- update this if we ever use NVRs below r24 */
72#define BPF_PPC_NVR_MIN 24
73
Naveen N. Rao156d0e22016-06-22 21:55:07 +053074/* Assembly helpers */
75#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
76 u64 func##_negative_offset(u64 r3, u64 r4); \
77 u64 func##_positive_offset(u64 r3, u64 r4);
78
79DECLARE_LOAD_FUNC(sk_load_word);
80DECLARE_LOAD_FUNC(sk_load_half);
81DECLARE_LOAD_FUNC(sk_load_byte);
82
83#define CHOOSE_LOAD_FUNC(imm, func) \
84 (imm < 0 ? \
85 (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
86 func##_positive_offset)
87
88#define SEEN_FUNC 0x1000 /* might call external helpers */
89#define SEEN_STACK 0x2000 /* uses BPF stack */
90#define SEEN_SKB 0x4000 /* uses sk_buff */
91
92struct codegen_context {
93 /*
94 * This is used to track register usage as well
95 * as calls to external helpers.
96 * - register usage is tracked with corresponding
97 * bits (r3-r10 and r25-r31)
98 * - rest of the bits can be used to track other
99 * things -- for now, we use bits 16 to 23
100 * encoded in SEEN_* macros above
101 */
102 unsigned int seen;
103 unsigned int idx;
104};
105
106#endif /* !__ASSEMBLY__ */
107
108#endif