blob: 7654a81bc5774f1363e19833a3c4e283baeeefcc [file] [log] [blame]
David Howells4262a722012-10-11 11:05:13 +01001/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef _UAPI__ASM_SIGCONTEXT_H
17#define _UAPI__ASM_SIGCONTEXT_H
18
Dave Martind0b8cd32017-10-31 15:51:03 +000019#ifndef __ASSEMBLY__
20
David Howells4262a722012-10-11 11:05:13 +010021#include <linux/types.h>
22
23/*
24 * Signal context structure - contains all info to do with the state
25 * before the signal handler was invoked.
26 */
27struct sigcontext {
28 __u64 fault_address;
29 /* AArch64 registers */
30 __u64 regs[31];
31 __u64 sp;
32 __u64 pc;
33 __u64 pstate;
34 /* 4K reserved for FP/SIMD state and future expansion */
35 __u8 __reserved[4096] __attribute__((__aligned__(16)));
36};
37
38/*
Dave Martinbb4322f2017-06-15 15:03:41 +010039 * Allocation of __reserved[]:
40 * (Note: records do not necessarily occur in the order shown here.)
41 *
42 * size description
43 *
44 * 0x210 fpsimd_context
45 * 0x10 esr_context
Dave Martind0b8cd32017-10-31 15:51:03 +000046 * 0x8a0 sve_context (vl <= 64) (optional)
Dave Martin33f08262017-06-20 18:23:39 +010047 * 0x20 extra_context (optional)
Dave Martinbb4322f2017-06-15 15:03:41 +010048 * 0x10 terminator (null _aarch64_ctx)
49 *
Dave Martind0b8cd32017-10-31 15:51:03 +000050 * 0x510 (reserved for future allocation)
Dave Martinbb4322f2017-06-15 15:03:41 +010051 *
52 * New records that can exceed this space need to be opt-in for userspace, so
53 * that an expanded signal frame is not generated unexpectedly. The mechanism
54 * for opting in will depend on the extension that generates each new record.
55 * The above table documents the maximum set and sizes of records than can be
56 * generated when userspace does not opt in for any such extension.
57 */
58
59/*
David Howells4262a722012-10-11 11:05:13 +010060 * Header to be used at the beginning of structures extending the user
61 * context. Such structures must be placed after the rt_sigframe on the stack
62 * and be 16-byte aligned. The last structure must be a dummy one with the
63 * magic and size set to 0.
64 */
65struct _aarch64_ctx {
66 __u32 magic;
67 __u32 size;
68};
69
70#define FPSIMD_MAGIC 0x46508001
71
72struct fpsimd_context {
73 struct _aarch64_ctx head;
74 __u32 fpsr;
75 __u32 fpcr;
76 __uint128_t vregs[32];
77};
78
Catalin Marinas15af1942013-09-16 15:19:27 +010079/* ESR_EL1 context */
80#define ESR_MAGIC 0x45535201
81
82struct esr_context {
83 struct _aarch64_ctx head;
Mark Salter5e406452014-06-11 21:14:42 +010084 __u64 esr;
Catalin Marinas15af1942013-09-16 15:19:27 +010085};
David Howells4262a722012-10-11 11:05:13 +010086
Dave Martin33f08262017-06-20 18:23:39 +010087/*
88 * extra_context: describes extra space in the signal frame for
89 * additional structures that don't fit in sigcontext.__reserved[].
90 *
91 * Note:
92 *
93 * 1) fpsimd_context, esr_context and extra_context must be placed in
94 * sigcontext.__reserved[] if present. They cannot be placed in the
95 * extra space. Any other record can be placed either in the extra
96 * space or in sigcontext.__reserved[], unless otherwise specified in
97 * this file.
98 *
99 * 2) There must not be more than one extra_context.
100 *
101 * 3) If extra_context is present, it must be followed immediately in
102 * sigcontext.__reserved[] by the terminating null _aarch64_ctx.
103 *
104 * 4) The extra space to which datap points must start at the first
105 * 16-byte aligned address immediately after the terminating null
106 * _aarch64_ctx that follows the extra_context structure in
107 * __reserved[]. The extra space may overrun the end of __reserved[],
108 * as indicated by a sufficiently large value for the size field.
109 *
110 * 5) The extra space must itself be terminated with a null
111 * _aarch64_ctx.
112 */
113#define EXTRA_MAGIC 0x45585401
114
115struct extra_context {
116 struct _aarch64_ctx head;
117 __u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */
118 __u32 size; /* size in bytes of the extra space */
119 __u32 __reserved[3];
120};
121
Dave Martind0b8cd32017-10-31 15:51:03 +0000122#define SVE_MAGIC 0x53564501
123
124struct sve_context {
125 struct _aarch64_ctx head;
126 __u16 vl;
127 __u16 __reserved[3];
128};
129
130#endif /* !__ASSEMBLY__ */
131
132/*
133 * The SVE architecture leaves space for future expansion of the
134 * vector length beyond its initial architectural limit of 2048 bits
135 * (16 quadwords).
136 */
137#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
138
139#define SVE_VQ_MIN 1
140#define SVE_VQ_MAX 512
141
142#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
143#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
144
145#define SVE_NUM_ZREGS 32
146#define SVE_NUM_PREGS 16
147
148#define sve_vl_valid(vl) \
149 ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
150#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
151#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
152
153/*
154 * If the SVE registers are currently live for the thread at signal delivery,
155 * sve_context.head.size >=
156 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
157 * and the register data may be accessed using the SVE_SIG_*() macros.
158 *
159 * If sve_context.head.size <
160 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
161 * the SVE registers were not live for the thread and no register data
162 * is included: in this case, the SVE_SIG_*() macros should not be
163 * used except for this check.
164 *
165 * The same convention applies when returning from a signal: a caller
166 * will need to remove or resize the sve_context block if it wants to
167 * make the SVE registers live when they were previously non-live or
168 * vice-versa. This may require the the caller to allocate fresh
169 * memory and/or move other context blocks in the signal frame.
170 *
171 * Changing the vector length during signal return is not permitted:
172 * sve_context.vl must equal the thread's current vector length when
173 * doing a sigreturn.
174 *
175 *
176 * Note: for all these macros, the "vq" argument denotes the SVE
177 * vector length in quadwords (i.e., units of 128 bits).
178 *
179 * The correct way to obtain vq is to use sve_vq_from_vl(vl). The
180 * result is valid if and only if sve_vl_valid(vl) is true. This is
181 * guaranteed for a struct sve_context written by the kernel.
182 *
183 *
184 * Additional macros describe the contents and layout of the payload.
185 * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
186 * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
187 * size in bytes:
188 *
189 * x type description
190 * - ---- -----------
191 * REGS the entire SVE context
192 *
193 * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers
194 * ZREG __uint128_t[vq] individual Z-register Zn
195 *
196 * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers
197 * PREG uint16_t[vq] individual P-register Pn
198 *
199 * FFR uint16_t[vq] first-fault status register
200 *
201 * Additional data might be appended in the future.
202 */
203
204#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES)
205#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8))
206#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
207
208#define SVE_SIG_REGS_OFFSET \
209 ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
210 / SVE_VQ_BYTES * SVE_VQ_BYTES)
211
212#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
213#define SVE_SIG_ZREG_OFFSET(vq, n) \
214 (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
215#define SVE_SIG_ZREGS_SIZE(vq) \
216 (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
217
218#define SVE_SIG_PREGS_OFFSET(vq) \
219 (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
220#define SVE_SIG_PREG_OFFSET(vq, n) \
221 (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
222#define SVE_SIG_PREGS_SIZE(vq) \
223 (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
224
225#define SVE_SIG_FFR_OFFSET(vq) \
226 (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
227
228#define SVE_SIG_REGS_SIZE(vq) \
229 (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
230
231#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
232
233
David Howells4262a722012-10-11 11:05:13 +0100234#endif /* _UAPI__ASM_SIGCONTEXT_H */