blob: d76a23170dbb7d0a6e8cd1aef093ba48431e3b84 [file] [log] [blame]
Matt Flemingbd353862009-08-14 01:58:43 +09001/*
2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
11 *
12 * TODO:
13 * - DWARF64 doesn't work.
Matt Fleming97efbbd2009-08-16 15:56:35 +010014 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
Matt Flemingbd353862009-08-14 01:58:43 +090015 */
16
17/* #define DEBUG */
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/list.h>
Matt Flemingfb3f3e72009-08-16 15:44:08 +010021#include <linux/mempool.h>
Matt Flemingbd353862009-08-14 01:58:43 +090022#include <linux/mm.h>
Matt Fleming60339fa2009-10-24 18:56:57 +000023#include <linux/ftrace.h>
Matt Flemingbd353862009-08-14 01:58:43 +090024#include <asm/dwarf.h>
25#include <asm/unwinder.h>
26#include <asm/sections.h>
Paul Mundt34974472009-08-14 02:10:59 +090027#include <asm/unaligned.h>
Matt Flemingbd353862009-08-14 01:58:43 +090028#include <asm/stacktrace.h>
29
Matt Flemingfb3f3e72009-08-16 15:44:08 +010030/* Reserve enough memory for two stack frames */
31#define DWARF_FRAME_MIN_REQ 2
32/* ... with 4 registers per frame. */
33#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
34
35static struct kmem_cache *dwarf_frame_cachep;
36static mempool_t *dwarf_frame_pool;
37
38static struct kmem_cache *dwarf_reg_cachep;
39static mempool_t *dwarf_reg_pool;
40
Matt Flemingbd353862009-08-14 01:58:43 +090041static LIST_HEAD(dwarf_cie_list);
Paul Mundt97f361e2009-08-17 05:07:38 +090042static DEFINE_SPINLOCK(dwarf_cie_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090043
44static LIST_HEAD(dwarf_fde_list);
Paul Mundt97f361e2009-08-17 05:07:38 +090045static DEFINE_SPINLOCK(dwarf_fde_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090046
47static struct dwarf_cie *cached_cie;
48
Matt Flemingfb3f3e72009-08-16 15:44:08 +010049/**
50 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
51 * @frame: the DWARF frame whose list of registers we insert on
52 * @reg_num: the register number
Matt Flemingbd353862009-08-14 01:58:43 +090053 *
Matt Flemingfb3f3e72009-08-16 15:44:08 +010054 * Allocate space for, and initialise, a dwarf reg from
55 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
56 * dwarf registers for @frame.
57 *
58 * Return the initialised DWARF reg.
Matt Flemingbd353862009-08-14 01:58:43 +090059 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +010060static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
61 unsigned int reg_num)
Matt Flemingbd353862009-08-14 01:58:43 +090062{
Matt Flemingfb3f3e72009-08-16 15:44:08 +010063 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +090064
Matt Flemingfb3f3e72009-08-16 15:44:08 +010065 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
66 if (!reg) {
67 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
Matt Flemingbd353862009-08-14 01:58:43 +090068 /*
69 * Let's just bomb hard here, we have no way to
70 * gracefully recover.
71 */
Matt Flemingb344e24a2009-08-16 21:54:48 +010072 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +090073 }
74
Matt Flemingfb3f3e72009-08-16 15:44:08 +010075 reg->number = reg_num;
76 reg->addr = 0;
77 reg->flags = 0;
78
79 list_add(&reg->link, &frame->reg_list);
80
81 return reg;
82}
83
84static void dwarf_frame_free_regs(struct dwarf_frame *frame)
85{
86 struct dwarf_reg *reg, *n;
87
88 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
89 list_del(&reg->link);
90 mempool_free(reg, dwarf_reg_pool);
91 }
92}
93
94/**
95 * dwarf_frame_reg - return a DWARF register
96 * @frame: the DWARF frame to search in for @reg_num
97 * @reg_num: the register number to search for
98 *
99 * Lookup and return the dwarf reg @reg_num for this frame. Return
100 * NULL if @reg_num is an register invalid number.
101 */
102static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
103 unsigned int reg_num)
104{
105 struct dwarf_reg *reg;
106
107 list_for_each_entry(reg, &frame->reg_list, link) {
108 if (reg->number == reg_num)
109 return reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900110 }
111
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100112 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900113}
114
115/**
116 * dwarf_read_addr - read dwarf data
117 * @src: source address of data
118 * @dst: destination address to store the data to
119 *
120 * Read 'n' bytes from @src, where 'n' is the size of an address on
121 * the native machine. We return the number of bytes read, which
122 * should always be 'n'. We also have to be careful when reading
123 * from @src and writing to @dst, because they can be arbitrarily
124 * aligned. Return 'n' - the number of bytes read.
125 */
Paul Mundt34974472009-08-14 02:10:59 +0900126static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
Matt Flemingbd353862009-08-14 01:58:43 +0900127{
Paul Mundtbf43a162009-08-14 03:06:13 +0900128 u32 val = get_unaligned(src);
129 put_unaligned(val, dst);
Matt Flemingbd353862009-08-14 01:58:43 +0900130 return sizeof(unsigned long *);
131}
132
133/**
134 * dwarf_read_uleb128 - read unsigned LEB128 data
135 * @addr: the address where the ULEB128 data is stored
136 * @ret: address to store the result
137 *
138 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
139 * from Appendix C of the DWARF 3 spec. For information on the
140 * encodings refer to section "7.6 - Variable Length Data". Return
141 * the number of bytes read.
142 */
143static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
144{
145 unsigned int result;
146 unsigned char byte;
147 int shift, count;
148
149 result = 0;
150 shift = 0;
151 count = 0;
152
153 while (1) {
154 byte = __raw_readb(addr);
155 addr++;
156 count++;
157
158 result |= (byte & 0x7f) << shift;
159 shift += 7;
160
161 if (!(byte & 0x80))
162 break;
163 }
164
165 *ret = result;
166
167 return count;
168}
169
170/**
171 * dwarf_read_leb128 - read signed LEB128 data
172 * @addr: the address of the LEB128 encoded data
173 * @ret: address to store the result
174 *
175 * Decode signed LEB128 data. The algorithm is taken from Appendix
176 * C of the DWARF 3 spec. Return the number of bytes read.
177 */
178static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
179{
180 unsigned char byte;
181 int result, shift;
182 int num_bits;
183 int count;
184
185 result = 0;
186 shift = 0;
187 count = 0;
188
189 while (1) {
190 byte = __raw_readb(addr);
191 addr++;
192 result |= (byte & 0x7f) << shift;
193 shift += 7;
194 count++;
195
196 if (!(byte & 0x80))
197 break;
198 }
199
200 /* The number of bits in a signed integer. */
201 num_bits = 8 * sizeof(result);
202
203 if ((shift < num_bits) && (byte & 0x40))
204 result |= (-1 << shift);
205
206 *ret = result;
207
208 return count;
209}
210
211/**
212 * dwarf_read_encoded_value - return the decoded value at @addr
213 * @addr: the address of the encoded value
214 * @val: where to write the decoded value
215 * @encoding: the encoding with which we can decode @addr
216 *
217 * GCC emits encoded address in the .eh_frame FDE entries. Decode
218 * the value at @addr using @encoding. The decoded value is written
219 * to @val and the number of bytes read is returned.
220 */
221static int dwarf_read_encoded_value(char *addr, unsigned long *val,
222 char encoding)
223{
224 unsigned long decoded_addr = 0;
225 int count = 0;
226
227 switch (encoding & 0x70) {
228 case DW_EH_PE_absptr:
229 break;
230 case DW_EH_PE_pcrel:
231 decoded_addr = (unsigned long)addr;
232 break;
233 default:
234 pr_debug("encoding=0x%x\n", (encoding & 0x70));
Matt Flemingb344e24a2009-08-16 21:54:48 +0100235 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900236 }
237
238 if ((encoding & 0x07) == 0x00)
239 encoding |= DW_EH_PE_udata4;
240
241 switch (encoding & 0x0f) {
242 case DW_EH_PE_sdata4:
243 case DW_EH_PE_udata4:
244 count += 4;
Paul Mundt34974472009-08-14 02:10:59 +0900245 decoded_addr += get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900246 __raw_writel(decoded_addr, val);
247 break;
248 default:
249 pr_debug("encoding=0x%x\n", encoding);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100250 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900251 }
252
253 return count;
254}
255
256/**
257 * dwarf_entry_len - return the length of an FDE or CIE
258 * @addr: the address of the entry
259 * @len: the length of the entry
260 *
261 * Read the initial_length field of the entry and store the size of
262 * the entry in @len. We return the number of bytes read. Return a
263 * count of 0 on error.
264 */
265static inline int dwarf_entry_len(char *addr, unsigned long *len)
266{
267 u32 initial_len;
268 int count;
269
Paul Mundt34974472009-08-14 02:10:59 +0900270 initial_len = get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900271 count = 4;
272
273 /*
274 * An initial length field value in the range DW_LEN_EXT_LO -
275 * DW_LEN_EXT_HI indicates an extension, and should not be
276 * interpreted as a length. The only extension that we currently
277 * understand is the use of DWARF64 addresses.
278 */
279 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
280 /*
281 * The 64-bit length field immediately follows the
282 * compulsory 32-bit length field.
283 */
284 if (initial_len == DW_EXT_DWARF64) {
Paul Mundt34974472009-08-14 02:10:59 +0900285 *len = get_unaligned((u64 *)addr + 4);
Matt Flemingbd353862009-08-14 01:58:43 +0900286 count = 12;
287 } else {
288 printk(KERN_WARNING "Unknown DWARF extension\n");
289 count = 0;
290 }
291 } else
292 *len = initial_len;
293
294 return count;
295}
296
297/**
298 * dwarf_lookup_cie - locate the cie
299 * @cie_ptr: pointer to help with lookup
300 */
301static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
302{
Paul Mundt97f361e2009-08-17 05:07:38 +0900303 struct dwarf_cie *cie;
Matt Flemingbd353862009-08-14 01:58:43 +0900304 unsigned long flags;
305
306 spin_lock_irqsave(&dwarf_cie_lock, flags);
307
308 /*
309 * We've cached the last CIE we looked up because chances are
310 * that the FDE wants this CIE.
311 */
312 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
313 cie = cached_cie;
314 goto out;
315 }
316
Paul Mundt97f361e2009-08-17 05:07:38 +0900317 list_for_each_entry(cie, &dwarf_cie_list, link) {
Matt Flemingbd353862009-08-14 01:58:43 +0900318 if (cie->cie_pointer == cie_ptr) {
319 cached_cie = cie;
320 break;
321 }
322 }
323
324 /* Couldn't find the entry in the list. */
325 if (&cie->link == &dwarf_cie_list)
326 cie = NULL;
327out:
328 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
329 return cie;
330}
331
332/**
333 * dwarf_lookup_fde - locate the FDE that covers pc
334 * @pc: the program counter
335 */
336struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
337{
Paul Mundt97f361e2009-08-17 05:07:38 +0900338 struct dwarf_fde *fde;
Matt Flemingbd353862009-08-14 01:58:43 +0900339 unsigned long flags;
Matt Flemingbd353862009-08-14 01:58:43 +0900340
341 spin_lock_irqsave(&dwarf_fde_lock, flags);
Paul Mundt97f361e2009-08-17 05:07:38 +0900342
343 list_for_each_entry(fde, &dwarf_fde_list, link) {
Matt Flemingbd353862009-08-14 01:58:43 +0900344 unsigned long start, end;
345
346 start = fde->initial_location;
347 end = fde->initial_location + fde->address_range;
348
349 if (pc >= start && pc < end)
350 break;
351 }
352
353 /* Couldn't find the entry in the list. */
354 if (&fde->link == &dwarf_fde_list)
355 fde = NULL;
356
357 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
358
359 return fde;
360}
361
362/**
363 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
364 * @insn_start: address of the first instruction
365 * @insn_end: address of the last instruction
366 * @cie: the CIE for this function
367 * @fde: the FDE for this function
368 * @frame: the instructions calculate the CFA for this frame
369 * @pc: the program counter of the address we're interested in
370 *
371 * Execute the Call Frame instruction sequence starting at
372 * @insn_start and ending at @insn_end. The instructions describe
373 * how to calculate the Canonical Frame Address of a stackframe.
374 * Store the results in @frame.
375 */
376static int dwarf_cfa_execute_insns(unsigned char *insn_start,
377 unsigned char *insn_end,
378 struct dwarf_cie *cie,
379 struct dwarf_fde *fde,
380 struct dwarf_frame *frame,
Matt Flemingb9558732009-08-15 23:10:57 +0100381 unsigned long pc)
Matt Flemingbd353862009-08-14 01:58:43 +0900382{
383 unsigned char insn;
384 unsigned char *current_insn;
385 unsigned int count, delta, reg, expr_len, offset;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100386 struct dwarf_reg *regp;
Matt Flemingbd353862009-08-14 01:58:43 +0900387
388 current_insn = insn_start;
389
Matt Flemingb9558732009-08-15 23:10:57 +0100390 while (current_insn < insn_end && frame->pc <= pc) {
Matt Flemingbd353862009-08-14 01:58:43 +0900391 insn = __raw_readb(current_insn++);
392
393 /*
394 * Firstly, handle the opcodes that embed their operands
395 * in the instructions.
396 */
397 switch (DW_CFA_opcode(insn)) {
398 case DW_CFA_advance_loc:
399 delta = DW_CFA_operand(insn);
400 delta *= cie->code_alignment_factor;
401 frame->pc += delta;
402 continue;
403 /* NOTREACHED */
404 case DW_CFA_offset:
405 reg = DW_CFA_operand(insn);
406 count = dwarf_read_uleb128(current_insn, &offset);
407 current_insn += count;
408 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100409 regp = dwarf_frame_alloc_reg(frame, reg);
410 regp->addr = offset;
411 regp->flags |= DWARF_REG_OFFSET;
Matt Flemingbd353862009-08-14 01:58:43 +0900412 continue;
413 /* NOTREACHED */
414 case DW_CFA_restore:
415 reg = DW_CFA_operand(insn);
416 continue;
417 /* NOTREACHED */
418 }
419
420 /*
421 * Secondly, handle the opcodes that don't embed their
422 * operands in the instruction.
423 */
424 switch (insn) {
425 case DW_CFA_nop:
426 continue;
427 case DW_CFA_advance_loc1:
428 delta = *current_insn++;
429 frame->pc += delta * cie->code_alignment_factor;
430 break;
431 case DW_CFA_advance_loc2:
Paul Mundt34974472009-08-14 02:10:59 +0900432 delta = get_unaligned((u16 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900433 current_insn += 2;
434 frame->pc += delta * cie->code_alignment_factor;
435 break;
436 case DW_CFA_advance_loc4:
Paul Mundt34974472009-08-14 02:10:59 +0900437 delta = get_unaligned((u32 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900438 current_insn += 4;
439 frame->pc += delta * cie->code_alignment_factor;
440 break;
441 case DW_CFA_offset_extended:
442 count = dwarf_read_uleb128(current_insn, &reg);
443 current_insn += count;
444 count = dwarf_read_uleb128(current_insn, &offset);
445 current_insn += count;
446 offset *= cie->data_alignment_factor;
447 break;
448 case DW_CFA_restore_extended:
449 count = dwarf_read_uleb128(current_insn, &reg);
450 current_insn += count;
451 break;
452 case DW_CFA_undefined:
453 count = dwarf_read_uleb128(current_insn, &reg);
454 current_insn += count;
Matt Fleming5580e902009-08-20 19:53:49 +0100455 regp = dwarf_frame_alloc_reg(frame, reg);
456 regp->flags |= DWARF_UNDEFINED;
Matt Flemingbd353862009-08-14 01:58:43 +0900457 break;
458 case DW_CFA_def_cfa:
459 count = dwarf_read_uleb128(current_insn,
460 &frame->cfa_register);
461 current_insn += count;
462 count = dwarf_read_uleb128(current_insn,
463 &frame->cfa_offset);
464 current_insn += count;
465
466 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
467 break;
468 case DW_CFA_def_cfa_register:
469 count = dwarf_read_uleb128(current_insn,
470 &frame->cfa_register);
471 current_insn += count;
472 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
473 break;
474 case DW_CFA_def_cfa_offset:
475 count = dwarf_read_uleb128(current_insn, &offset);
476 current_insn += count;
477 frame->cfa_offset = offset;
478 break;
479 case DW_CFA_def_cfa_expression:
480 count = dwarf_read_uleb128(current_insn, &expr_len);
481 current_insn += count;
482
483 frame->cfa_expr = current_insn;
484 frame->cfa_expr_len = expr_len;
485 current_insn += expr_len;
486
487 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
488 break;
489 case DW_CFA_offset_extended_sf:
490 count = dwarf_read_uleb128(current_insn, &reg);
491 current_insn += count;
492 count = dwarf_read_leb128(current_insn, &offset);
493 current_insn += count;
494 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100495 regp = dwarf_frame_alloc_reg(frame, reg);
496 regp->flags |= DWARF_REG_OFFSET;
497 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900498 break;
499 case DW_CFA_val_offset:
500 count = dwarf_read_uleb128(current_insn, &reg);
501 current_insn += count;
502 count = dwarf_read_leb128(current_insn, &offset);
503 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100504 regp = dwarf_frame_alloc_reg(frame, reg);
Matt Fleming97efbbd2009-08-16 15:56:35 +0100505 regp->flags |= DWARF_VAL_OFFSET;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100506 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900507 break;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100508 case DW_CFA_GNU_args_size:
509 count = dwarf_read_uleb128(current_insn, &offset);
510 current_insn += count;
511 break;
512 case DW_CFA_GNU_negative_offset_extended:
513 count = dwarf_read_uleb128(current_insn, &reg);
514 current_insn += count;
515 count = dwarf_read_uleb128(current_insn, &offset);
516 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100517
518 regp = dwarf_frame_alloc_reg(frame, reg);
519 regp->flags |= DWARF_REG_OFFSET;
520 regp->addr = -offset;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100521 break;
Matt Flemingbd353862009-08-14 01:58:43 +0900522 default:
523 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100524 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900525 break;
526 }
527 }
528
529 return 0;
530}
531
532/**
533 * dwarf_unwind_stack - recursively unwind the stack
534 * @pc: address of the function to unwind
535 * @prev: struct dwarf_frame of the previous stackframe on the callstack
536 *
537 * Return a struct dwarf_frame representing the most recent frame
538 * on the callstack. Each of the lower (older) stack frames are
539 * linked via the "prev" member.
540 */
Matt Flemingb344e24a2009-08-16 21:54:48 +0100541struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
542 struct dwarf_frame *prev)
Matt Flemingbd353862009-08-14 01:58:43 +0900543{
544 struct dwarf_frame *frame;
545 struct dwarf_cie *cie;
546 struct dwarf_fde *fde;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100547 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900548 unsigned long addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900549
550 /*
551 * If this is the first invocation of this recursive function we
552 * need get the contents of a physical register to get the CFA
553 * in order to begin the virtual unwinding of the stack.
554 *
Matt Flemingf8264662009-08-13 20:41:31 +0100555 * NOTE: the return address is guaranteed to be setup by the
556 * time this function makes its first function call.
Matt Flemingbd353862009-08-14 01:58:43 +0900557 */
Paul Mundt421b5412009-11-06 17:23:33 +0900558 if (!pc || !prev)
Matt Flemingb9558732009-08-15 23:10:57 +0100559 pc = (unsigned long)current_text_addr();
Matt Flemingbd353862009-08-14 01:58:43 +0900560
Matt Fleming60339fa2009-10-24 18:56:57 +0000561#ifdef CONFIG_FUNCTION_GRAPH_TRACER
562 /*
563 * If our stack has been patched by the function graph tracer
564 * then we might see the address of return_to_handler() where we
565 * expected to find the real return address.
566 */
567 if (pc == (unsigned long)&return_to_handler) {
568 int index = current->curr_ret_stack;
569
570 /*
571 * We currently have no way of tracking how many
572 * return_to_handler()'s we've seen. If there is more
573 * than one patched return address on our stack,
574 * complain loudly.
575 */
576 WARN_ON(index > 0);
577
578 pc = current->ret_stack[index].ret;
579 }
580#endif
581
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100582 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
583 if (!frame) {
584 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
Matt Flemingb344e24a2009-08-16 21:54:48 +0100585 UNWINDER_BUG();
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100586 }
Matt Flemingbd353862009-08-14 01:58:43 +0900587
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100588 INIT_LIST_HEAD(&frame->reg_list);
589 frame->flags = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900590 frame->prev = prev;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100591 frame->return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900592
593 fde = dwarf_lookup_fde(pc);
594 if (!fde) {
595 /*
596 * This is our normal exit path - the one that stops the
597 * recursion. There's two reasons why we might exit
598 * here,
599 *
600 * a) pc has no asscociated DWARF frame info and so
601 * we don't know how to unwind this frame. This is
602 * usually the case when we're trying to unwind a
603 * frame that was called from some assembly code
604 * that has no DWARF info, e.g. syscalls.
605 *
606 * b) the DEBUG info for pc is bogus. There's
607 * really no way to distinguish this case from the
608 * case above, which sucks because we could print a
609 * warning here.
610 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100611 goto bail;
Matt Flemingbd353862009-08-14 01:58:43 +0900612 }
613
614 cie = dwarf_lookup_cie(fde->cie_pointer);
615
616 frame->pc = fde->initial_location;
617
618 /* CIE initial instructions */
619 dwarf_cfa_execute_insns(cie->initial_instructions,
Matt Flemingf8264662009-08-13 20:41:31 +0100620 cie->instructions_end, cie, fde,
Matt Flemingb9558732009-08-15 23:10:57 +0100621 frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900622
623 /* FDE instructions */
624 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
Matt Flemingb9558732009-08-15 23:10:57 +0100625 fde, frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900626
627 /* Calculate the CFA */
628 switch (frame->flags) {
629 case DWARF_FRAME_CFA_REG_OFFSET:
630 if (prev) {
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100631 reg = dwarf_frame_reg(prev, frame->cfa_register);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100632 UNWINDER_BUG_ON(!reg);
633 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900634
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100635 addr = prev->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900636 frame->cfa = __raw_readl(addr);
637
638 } else {
639 /*
640 * Again, this is the first invocation of this
641 * recurisve function. We need to physically
642 * read the contents of a register in order to
643 * get the Canonical Frame Address for this
644 * function.
645 */
646 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
647 }
648
649 frame->cfa += frame->cfa_offset;
650 break;
651 default:
Matt Flemingb344e24a2009-08-16 21:54:48 +0100652 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900653 }
654
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100655 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
Matt Fleming5580e902009-08-20 19:53:49 +0100656
657 /*
658 * If we haven't seen the return address register or the return
659 * address column is undefined then we must assume that this is
660 * the end of the callstack.
661 */
662 if (!reg || reg->flags == DWARF_UNDEFINED)
663 goto bail;
664
Matt Flemingb344e24a2009-08-16 21:54:48 +0100665 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900666
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100667 addr = frame->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900668 frame->return_addr = __raw_readl(addr);
669
Matt Flemingbd353862009-08-14 01:58:43 +0900670 return frame;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100671
672bail:
673 dwarf_frame_free_regs(frame);
674 mempool_free(frame, dwarf_frame_pool);
675 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900676}
677
678static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
679 unsigned char *end)
680{
681 struct dwarf_cie *cie;
682 unsigned long flags;
683 int count;
684
685 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
686 if (!cie)
687 return -ENOMEM;
688
689 cie->length = len;
690
691 /*
692 * Record the offset into the .eh_frame section
693 * for this CIE. It allows this CIE to be
694 * quickly and easily looked up from the
695 * corresponding FDE.
696 */
697 cie->cie_pointer = (unsigned long)entry;
698
699 cie->version = *(char *)p++;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100700 UNWINDER_BUG_ON(cie->version != 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900701
702 cie->augmentation = p;
703 p += strlen(cie->augmentation) + 1;
704
705 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
706 p += count;
707
708 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
709 p += count;
710
711 /*
712 * Which column in the rule table contains the
713 * return address?
714 */
715 if (cie->version == 1) {
716 cie->return_address_reg = __raw_readb(p);
717 p++;
718 } else {
719 count = dwarf_read_uleb128(p, &cie->return_address_reg);
720 p += count;
721 }
722
723 if (cie->augmentation[0] == 'z') {
724 unsigned int length, count;
725 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
726
727 count = dwarf_read_uleb128(p, &length);
728 p += count;
729
Matt Flemingb344e24a2009-08-16 21:54:48 +0100730 UNWINDER_BUG_ON((unsigned char *)p > end);
Matt Flemingbd353862009-08-14 01:58:43 +0900731
732 cie->initial_instructions = p + length;
733 cie->augmentation++;
734 }
735
736 while (*cie->augmentation) {
737 /*
738 * "L" indicates a byte showing how the
739 * LSDA pointer is encoded. Skip it.
740 */
741 if (*cie->augmentation == 'L') {
742 p++;
743 cie->augmentation++;
744 } else if (*cie->augmentation == 'R') {
745 /*
746 * "R" indicates a byte showing
747 * how FDE addresses are
748 * encoded.
749 */
750 cie->encoding = *(char *)p++;
751 cie->augmentation++;
752 } else if (*cie->augmentation == 'P') {
753 /*
754 * "R" indicates a personality
755 * routine in the CIE
756 * augmentation.
757 */
Matt Flemingb344e24a2009-08-16 21:54:48 +0100758 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900759 } else if (*cie->augmentation == 'S') {
Matt Flemingb344e24a2009-08-16 21:54:48 +0100760 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900761 } else {
762 /*
763 * Unknown augmentation. Assume
764 * 'z' augmentation.
765 */
766 p = cie->initial_instructions;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100767 UNWINDER_BUG_ON(!p);
Matt Flemingbd353862009-08-14 01:58:43 +0900768 break;
769 }
770 }
771
772 cie->initial_instructions = p;
773 cie->instructions_end = end;
774
775 /* Add to list */
776 spin_lock_irqsave(&dwarf_cie_lock, flags);
777 list_add_tail(&cie->link, &dwarf_cie_list);
778 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
779
780 return 0;
781}
782
783static int dwarf_parse_fde(void *entry, u32 entry_type,
Matt Fleming54806752009-08-20 19:42:34 +0100784 void *start, unsigned long len,
785 unsigned char *end)
Matt Flemingbd353862009-08-14 01:58:43 +0900786{
787 struct dwarf_fde *fde;
788 struct dwarf_cie *cie;
789 unsigned long flags;
790 int count;
791 void *p = start;
792
793 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
794 if (!fde)
795 return -ENOMEM;
796
797 fde->length = len;
798
799 /*
800 * In a .eh_frame section the CIE pointer is the
801 * delta between the address within the FDE
802 */
803 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
804
805 cie = dwarf_lookup_cie(fde->cie_pointer);
806 fde->cie = cie;
807
808 if (cie->encoding)
809 count = dwarf_read_encoded_value(p, &fde->initial_location,
810 cie->encoding);
811 else
812 count = dwarf_read_addr(p, &fde->initial_location);
813
814 p += count;
815
816 if (cie->encoding)
817 count = dwarf_read_encoded_value(p, &fde->address_range,
818 cie->encoding & 0x0f);
819 else
820 count = dwarf_read_addr(p, &fde->address_range);
821
822 p += count;
823
824 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
825 unsigned int length;
826 count = dwarf_read_uleb128(p, &length);
827 p += count + length;
828 }
829
830 /* Call frame instructions. */
831 fde->instructions = p;
Matt Fleming54806752009-08-20 19:42:34 +0100832 fde->end = end;
Matt Flemingbd353862009-08-14 01:58:43 +0900833
834 /* Add to list. */
835 spin_lock_irqsave(&dwarf_fde_lock, flags);
836 list_add_tail(&fde->link, &dwarf_fde_list);
837 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
838
839 return 0;
840}
841
Matt Flemingb344e24a2009-08-16 21:54:48 +0100842static void dwarf_unwinder_dump(struct task_struct *task,
843 struct pt_regs *regs,
Matt Flemingbd353862009-08-14 01:58:43 +0900844 unsigned long *sp,
Matt Flemingb344e24a2009-08-16 21:54:48 +0100845 const struct stacktrace_ops *ops,
846 void *data)
Matt Flemingbd353862009-08-14 01:58:43 +0900847{
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100848 struct dwarf_frame *frame, *_frame;
849 unsigned long return_addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900850
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100851 _frame = NULL;
852 return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900853
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100854 while (1) {
855 frame = dwarf_unwind_stack(return_addr, _frame);
856
857 if (_frame) {
858 dwarf_frame_free_regs(_frame);
859 mempool_free(_frame, dwarf_frame_pool);
860 }
861
862 _frame = frame;
863
864 if (!frame || !frame->return_addr)
865 break;
866
867 return_addr = frame->return_addr;
868 ops->address(data, return_addr, 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900869 }
870}
871
872static struct unwinder dwarf_unwinder = {
873 .name = "dwarf-unwinder",
874 .dump = dwarf_unwinder_dump,
875 .rating = 150,
876};
877
878static void dwarf_unwinder_cleanup(void)
879{
Paul Mundt97f361e2009-08-17 05:07:38 +0900880 struct dwarf_cie *cie;
881 struct dwarf_fde *fde;
Matt Flemingbd353862009-08-14 01:58:43 +0900882
883 /*
884 * Deallocate all the memory allocated for the DWARF unwinder.
885 * Traverse all the FDE/CIE lists and remove and free all the
886 * memory associated with those data structures.
887 */
Paul Mundt97f361e2009-08-17 05:07:38 +0900888 list_for_each_entry(cie, &dwarf_cie_list, link)
Matt Flemingbd353862009-08-14 01:58:43 +0900889 kfree(cie);
Matt Flemingbd353862009-08-14 01:58:43 +0900890
Paul Mundt97f361e2009-08-17 05:07:38 +0900891 list_for_each_entry(fde, &dwarf_fde_list, link)
Matt Flemingbd353862009-08-14 01:58:43 +0900892 kfree(fde);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100893
894 kmem_cache_destroy(dwarf_reg_cachep);
895 kmem_cache_destroy(dwarf_frame_cachep);
Matt Flemingbd353862009-08-14 01:58:43 +0900896}
897
898/**
899 * dwarf_unwinder_init - initialise the dwarf unwinder
900 *
901 * Build the data structures describing the .dwarf_frame section to
902 * make it easier to lookup CIE and FDE entries. Because the
903 * .eh_frame section is packed as tightly as possible it is not
904 * easy to lookup the FDE for a given PC, so we build a list of FDE
905 * and CIE entries that make it easier.
906 */
Paul Mundt97f361e2009-08-17 05:07:38 +0900907static int __init dwarf_unwinder_init(void)
Matt Flemingbd353862009-08-14 01:58:43 +0900908{
909 u32 entry_type;
910 void *p, *entry;
Paul Mundt2f6dafc2009-08-31 13:47:06 +0900911 int count, err = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900912 unsigned long len;
913 unsigned int c_entries, f_entries;
914 unsigned char *end;
915 INIT_LIST_HEAD(&dwarf_cie_list);
916 INIT_LIST_HEAD(&dwarf_fde_list);
917
918 c_entries = 0;
919 f_entries = 0;
920 entry = &__start_eh_frame;
921
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100922 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
Paul Mundt4f896ff2009-08-22 19:03:25 +0900923 sizeof(struct dwarf_frame), 0,
924 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
925
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100926 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
Paul Mundt4f896ff2009-08-22 19:03:25 +0900927 sizeof(struct dwarf_reg), 0,
928 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100929
930 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
931 mempool_alloc_slab,
932 mempool_free_slab,
933 dwarf_frame_cachep);
934
935 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
936 mempool_alloc_slab,
937 mempool_free_slab,
938 dwarf_reg_cachep);
939
Matt Flemingbd353862009-08-14 01:58:43 +0900940 while ((char *)entry < __stop_eh_frame) {
941 p = entry;
942
943 count = dwarf_entry_len(p, &len);
944 if (count == 0) {
945 /*
946 * We read a bogus length field value. There is
947 * nothing we can do here apart from disabling
948 * the DWARF unwinder. We can't even skip this
949 * entry and move to the next one because 'len'
950 * tells us where our next entry is.
951 */
952 goto out;
953 } else
954 p += count;
955
956 /* initial length does not include itself */
957 end = p + len;
958
Paul Mundt34974472009-08-14 02:10:59 +0900959 entry_type = get_unaligned((u32 *)p);
Matt Flemingbd353862009-08-14 01:58:43 +0900960 p += 4;
961
962 if (entry_type == DW_EH_FRAME_CIE) {
963 err = dwarf_parse_cie(entry, p, len, end);
964 if (err < 0)
965 goto out;
966 else
967 c_entries++;
968 } else {
Matt Fleming54806752009-08-20 19:42:34 +0100969 err = dwarf_parse_fde(entry, entry_type, p, len, end);
Matt Flemingbd353862009-08-14 01:58:43 +0900970 if (err < 0)
971 goto out;
972 else
973 f_entries++;
974 }
975
976 entry = (char *)entry + len + 4;
977 }
978
979 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
980 c_entries, f_entries);
981
982 err = unwinder_register(&dwarf_unwinder);
983 if (err)
984 goto out;
985
Paul Mundt97f361e2009-08-17 05:07:38 +0900986 return 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900987
988out:
989 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
990 dwarf_unwinder_cleanup();
Paul Mundt97f361e2009-08-17 05:07:38 +0900991 return -EINVAL;
Matt Flemingbd353862009-08-14 01:58:43 +0900992}
Paul Mundt97f361e2009-08-17 05:07:38 +0900993early_initcall(dwarf_unwinder_init);