blob: 886d7d83ace38912aa4966da50159d43686b0fde [file] [log] [blame]
Matt Flemingbd353862009-08-14 01:58:43 +09001/*
2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
11 *
12 * TODO:
13 * - DWARF64 doesn't work.
Matt Fleming97efbbd2009-08-16 15:56:35 +010014 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
Matt Flemingbd353862009-08-14 01:58:43 +090015 */
16
17/* #define DEBUG */
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/list.h>
Matt Flemingfb3f3e72009-08-16 15:44:08 +010021#include <linux/mempool.h>
Matt Flemingbd353862009-08-14 01:58:43 +090022#include <linux/mm.h>
Paul Mundt5a3abba72009-10-13 13:32:19 +090023#include <linux/elf.h>
Matt Fleming60339fa2009-10-24 18:56:57 +000024#include <linux/ftrace.h>
Paul Mundt1d5cc5502010-04-20 14:34:15 +090025#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Matt Flemingbd353862009-08-14 01:58:43 +090027#include <asm/dwarf.h>
28#include <asm/unwinder.h>
29#include <asm/sections.h>
Paul Mundt34974472009-08-14 02:10:59 +090030#include <asm/unaligned.h>
Matt Flemingbd353862009-08-14 01:58:43 +090031#include <asm/stacktrace.h>
32
Matt Flemingfb3f3e72009-08-16 15:44:08 +010033/* Reserve enough memory for two stack frames */
34#define DWARF_FRAME_MIN_REQ 2
35/* ... with 4 registers per frame. */
36#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
37
38static struct kmem_cache *dwarf_frame_cachep;
39static mempool_t *dwarf_frame_pool;
40
41static struct kmem_cache *dwarf_reg_cachep;
42static mempool_t *dwarf_reg_pool;
43
Matt Fleming858918b2010-02-07 12:40:36 +000044static struct rb_root cie_root;
Paul Mundt97f361e2009-08-17 05:07:38 +090045static DEFINE_SPINLOCK(dwarf_cie_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090046
Matt Fleming858918b2010-02-07 12:40:36 +000047static struct rb_root fde_root;
Paul Mundt97f361e2009-08-17 05:07:38 +090048static DEFINE_SPINLOCK(dwarf_fde_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090049
50static struct dwarf_cie *cached_cie;
51
Matt Flemingfb3f3e72009-08-16 15:44:08 +010052/**
53 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
54 * @frame: the DWARF frame whose list of registers we insert on
55 * @reg_num: the register number
Matt Flemingbd353862009-08-14 01:58:43 +090056 *
Matt Flemingfb3f3e72009-08-16 15:44:08 +010057 * Allocate space for, and initialise, a dwarf reg from
58 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
59 * dwarf registers for @frame.
60 *
61 * Return the initialised DWARF reg.
Matt Flemingbd353862009-08-14 01:58:43 +090062 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +010063static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
64 unsigned int reg_num)
Matt Flemingbd353862009-08-14 01:58:43 +090065{
Matt Flemingfb3f3e72009-08-16 15:44:08 +010066 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +090067
Matt Flemingfb3f3e72009-08-16 15:44:08 +010068 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
69 if (!reg) {
70 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
Matt Flemingbd353862009-08-14 01:58:43 +090071 /*
72 * Let's just bomb hard here, we have no way to
73 * gracefully recover.
74 */
Matt Flemingb344e24a2009-08-16 21:54:48 +010075 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +090076 }
77
Matt Flemingfb3f3e72009-08-16 15:44:08 +010078 reg->number = reg_num;
79 reg->addr = 0;
80 reg->flags = 0;
81
82 list_add(&reg->link, &frame->reg_list);
83
84 return reg;
85}
86
87static void dwarf_frame_free_regs(struct dwarf_frame *frame)
88{
89 struct dwarf_reg *reg, *n;
90
91 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
92 list_del(&reg->link);
93 mempool_free(reg, dwarf_reg_pool);
94 }
95}
96
97/**
98 * dwarf_frame_reg - return a DWARF register
99 * @frame: the DWARF frame to search in for @reg_num
100 * @reg_num: the register number to search for
101 *
102 * Lookup and return the dwarf reg @reg_num for this frame. Return
103 * NULL if @reg_num is an register invalid number.
104 */
105static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
106 unsigned int reg_num)
107{
108 struct dwarf_reg *reg;
109
110 list_for_each_entry(reg, &frame->reg_list, link) {
111 if (reg->number == reg_num)
112 return reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900113 }
114
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100115 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900116}
117
118/**
119 * dwarf_read_addr - read dwarf data
120 * @src: source address of data
121 * @dst: destination address to store the data to
122 *
123 * Read 'n' bytes from @src, where 'n' is the size of an address on
124 * the native machine. We return the number of bytes read, which
125 * should always be 'n'. We also have to be careful when reading
126 * from @src and writing to @dst, because they can be arbitrarily
127 * aligned. Return 'n' - the number of bytes read.
128 */
Paul Mundt34974472009-08-14 02:10:59 +0900129static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
Matt Flemingbd353862009-08-14 01:58:43 +0900130{
Paul Mundtbf43a162009-08-14 03:06:13 +0900131 u32 val = get_unaligned(src);
132 put_unaligned(val, dst);
Matt Flemingbd353862009-08-14 01:58:43 +0900133 return sizeof(unsigned long *);
134}
135
136/**
137 * dwarf_read_uleb128 - read unsigned LEB128 data
138 * @addr: the address where the ULEB128 data is stored
139 * @ret: address to store the result
140 *
141 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
142 * from Appendix C of the DWARF 3 spec. For information on the
143 * encodings refer to section "7.6 - Variable Length Data". Return
144 * the number of bytes read.
145 */
146static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
147{
148 unsigned int result;
149 unsigned char byte;
150 int shift, count;
151
152 result = 0;
153 shift = 0;
154 count = 0;
155
156 while (1) {
157 byte = __raw_readb(addr);
158 addr++;
159 count++;
160
161 result |= (byte & 0x7f) << shift;
162 shift += 7;
163
164 if (!(byte & 0x80))
165 break;
166 }
167
168 *ret = result;
169
170 return count;
171}
172
173/**
174 * dwarf_read_leb128 - read signed LEB128 data
175 * @addr: the address of the LEB128 encoded data
176 * @ret: address to store the result
177 *
178 * Decode signed LEB128 data. The algorithm is taken from Appendix
179 * C of the DWARF 3 spec. Return the number of bytes read.
180 */
181static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
182{
183 unsigned char byte;
184 int result, shift;
185 int num_bits;
186 int count;
187
188 result = 0;
189 shift = 0;
190 count = 0;
191
192 while (1) {
193 byte = __raw_readb(addr);
194 addr++;
195 result |= (byte & 0x7f) << shift;
196 shift += 7;
197 count++;
198
199 if (!(byte & 0x80))
200 break;
201 }
202
203 /* The number of bits in a signed integer. */
204 num_bits = 8 * sizeof(result);
205
206 if ((shift < num_bits) && (byte & 0x40))
207 result |= (-1 << shift);
208
209 *ret = result;
210
211 return count;
212}
213
214/**
215 * dwarf_read_encoded_value - return the decoded value at @addr
216 * @addr: the address of the encoded value
217 * @val: where to write the decoded value
218 * @encoding: the encoding with which we can decode @addr
219 *
220 * GCC emits encoded address in the .eh_frame FDE entries. Decode
221 * the value at @addr using @encoding. The decoded value is written
222 * to @val and the number of bytes read is returned.
223 */
224static int dwarf_read_encoded_value(char *addr, unsigned long *val,
225 char encoding)
226{
227 unsigned long decoded_addr = 0;
228 int count = 0;
229
230 switch (encoding & 0x70) {
231 case DW_EH_PE_absptr:
232 break;
233 case DW_EH_PE_pcrel:
234 decoded_addr = (unsigned long)addr;
235 break;
236 default:
237 pr_debug("encoding=0x%x\n", (encoding & 0x70));
Matt Flemingb344e24a2009-08-16 21:54:48 +0100238 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900239 }
240
241 if ((encoding & 0x07) == 0x00)
242 encoding |= DW_EH_PE_udata4;
243
244 switch (encoding & 0x0f) {
245 case DW_EH_PE_sdata4:
246 case DW_EH_PE_udata4:
247 count += 4;
Paul Mundt34974472009-08-14 02:10:59 +0900248 decoded_addr += get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900249 __raw_writel(decoded_addr, val);
250 break;
251 default:
252 pr_debug("encoding=0x%x\n", encoding);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100253 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900254 }
255
256 return count;
257}
258
259/**
260 * dwarf_entry_len - return the length of an FDE or CIE
261 * @addr: the address of the entry
262 * @len: the length of the entry
263 *
264 * Read the initial_length field of the entry and store the size of
265 * the entry in @len. We return the number of bytes read. Return a
266 * count of 0 on error.
267 */
268static inline int dwarf_entry_len(char *addr, unsigned long *len)
269{
270 u32 initial_len;
271 int count;
272
Paul Mundt34974472009-08-14 02:10:59 +0900273 initial_len = get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900274 count = 4;
275
276 /*
277 * An initial length field value in the range DW_LEN_EXT_LO -
278 * DW_LEN_EXT_HI indicates an extension, and should not be
279 * interpreted as a length. The only extension that we currently
280 * understand is the use of DWARF64 addresses.
281 */
282 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
283 /*
284 * The 64-bit length field immediately follows the
285 * compulsory 32-bit length field.
286 */
287 if (initial_len == DW_EXT_DWARF64) {
Paul Mundt34974472009-08-14 02:10:59 +0900288 *len = get_unaligned((u64 *)addr + 4);
Matt Flemingbd353862009-08-14 01:58:43 +0900289 count = 12;
290 } else {
291 printk(KERN_WARNING "Unknown DWARF extension\n");
292 count = 0;
293 }
294 } else
295 *len = initial_len;
296
297 return count;
298}
299
300/**
301 * dwarf_lookup_cie - locate the cie
302 * @cie_ptr: pointer to help with lookup
303 */
304static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
305{
Matt Fleming858918b2010-02-07 12:40:36 +0000306 struct rb_node **rb_node = &cie_root.rb_node;
307 struct dwarf_cie *cie = NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900308 unsigned long flags;
309
310 spin_lock_irqsave(&dwarf_cie_lock, flags);
311
312 /*
313 * We've cached the last CIE we looked up because chances are
314 * that the FDE wants this CIE.
315 */
316 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
317 cie = cached_cie;
318 goto out;
319 }
320
Matt Fleming858918b2010-02-07 12:40:36 +0000321 while (*rb_node) {
322 struct dwarf_cie *cie_tmp;
323
324 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
325 BUG_ON(!cie_tmp);
326
327 if (cie_ptr == cie_tmp->cie_pointer) {
328 cie = cie_tmp;
329 cached_cie = cie_tmp;
330 goto out;
331 } else {
332 if (cie_ptr < cie_tmp->cie_pointer)
333 rb_node = &(*rb_node)->rb_left;
334 else
335 rb_node = &(*rb_node)->rb_right;
Matt Flemingbd353862009-08-14 01:58:43 +0900336 }
337 }
338
Matt Flemingbd353862009-08-14 01:58:43 +0900339out:
340 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
341 return cie;
342}
343
344/**
345 * dwarf_lookup_fde - locate the FDE that covers pc
346 * @pc: the program counter
347 */
348struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
349{
Matt Fleming858918b2010-02-07 12:40:36 +0000350 struct rb_node **rb_node = &fde_root.rb_node;
351 struct dwarf_fde *fde = NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900352 unsigned long flags;
Matt Flemingbd353862009-08-14 01:58:43 +0900353
354 spin_lock_irqsave(&dwarf_fde_lock, flags);
Paul Mundt97f361e2009-08-17 05:07:38 +0900355
Matt Fleming858918b2010-02-07 12:40:36 +0000356 while (*rb_node) {
357 struct dwarf_fde *fde_tmp;
358 unsigned long tmp_start, tmp_end;
Matt Flemingbd353862009-08-14 01:58:43 +0900359
Matt Fleming858918b2010-02-07 12:40:36 +0000360 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
361 BUG_ON(!fde_tmp);
Matt Flemingbd353862009-08-14 01:58:43 +0900362
Matt Fleming858918b2010-02-07 12:40:36 +0000363 tmp_start = fde_tmp->initial_location;
364 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
365
366 if (pc < tmp_start) {
367 rb_node = &(*rb_node)->rb_left;
368 } else {
369 if (pc < tmp_end) {
370 fde = fde_tmp;
371 goto out;
372 } else
373 rb_node = &(*rb_node)->rb_right;
374 }
Matt Flemingbd353862009-08-14 01:58:43 +0900375 }
376
Matt Fleming858918b2010-02-07 12:40:36 +0000377out:
Matt Flemingbd353862009-08-14 01:58:43 +0900378 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
379
380 return fde;
381}
382
383/**
384 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
385 * @insn_start: address of the first instruction
386 * @insn_end: address of the last instruction
387 * @cie: the CIE for this function
388 * @fde: the FDE for this function
389 * @frame: the instructions calculate the CFA for this frame
390 * @pc: the program counter of the address we're interested in
391 *
392 * Execute the Call Frame instruction sequence starting at
393 * @insn_start and ending at @insn_end. The instructions describe
394 * how to calculate the Canonical Frame Address of a stackframe.
395 * Store the results in @frame.
396 */
397static int dwarf_cfa_execute_insns(unsigned char *insn_start,
398 unsigned char *insn_end,
399 struct dwarf_cie *cie,
400 struct dwarf_fde *fde,
401 struct dwarf_frame *frame,
Matt Flemingb9558732009-08-15 23:10:57 +0100402 unsigned long pc)
Matt Flemingbd353862009-08-14 01:58:43 +0900403{
404 unsigned char insn;
405 unsigned char *current_insn;
406 unsigned int count, delta, reg, expr_len, offset;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100407 struct dwarf_reg *regp;
Matt Flemingbd353862009-08-14 01:58:43 +0900408
409 current_insn = insn_start;
410
Matt Flemingb9558732009-08-15 23:10:57 +0100411 while (current_insn < insn_end && frame->pc <= pc) {
Matt Flemingbd353862009-08-14 01:58:43 +0900412 insn = __raw_readb(current_insn++);
413
414 /*
415 * Firstly, handle the opcodes that embed their operands
416 * in the instructions.
417 */
418 switch (DW_CFA_opcode(insn)) {
419 case DW_CFA_advance_loc:
420 delta = DW_CFA_operand(insn);
421 delta *= cie->code_alignment_factor;
422 frame->pc += delta;
423 continue;
424 /* NOTREACHED */
425 case DW_CFA_offset:
426 reg = DW_CFA_operand(insn);
427 count = dwarf_read_uleb128(current_insn, &offset);
428 current_insn += count;
429 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100430 regp = dwarf_frame_alloc_reg(frame, reg);
431 regp->addr = offset;
432 regp->flags |= DWARF_REG_OFFSET;
Matt Flemingbd353862009-08-14 01:58:43 +0900433 continue;
434 /* NOTREACHED */
435 case DW_CFA_restore:
436 reg = DW_CFA_operand(insn);
437 continue;
438 /* NOTREACHED */
439 }
440
441 /*
442 * Secondly, handle the opcodes that don't embed their
443 * operands in the instruction.
444 */
445 switch (insn) {
446 case DW_CFA_nop:
447 continue;
448 case DW_CFA_advance_loc1:
449 delta = *current_insn++;
450 frame->pc += delta * cie->code_alignment_factor;
451 break;
452 case DW_CFA_advance_loc2:
Paul Mundt34974472009-08-14 02:10:59 +0900453 delta = get_unaligned((u16 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900454 current_insn += 2;
455 frame->pc += delta * cie->code_alignment_factor;
456 break;
457 case DW_CFA_advance_loc4:
Paul Mundt34974472009-08-14 02:10:59 +0900458 delta = get_unaligned((u32 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900459 current_insn += 4;
460 frame->pc += delta * cie->code_alignment_factor;
461 break;
462 case DW_CFA_offset_extended:
463 count = dwarf_read_uleb128(current_insn, &reg);
464 current_insn += count;
465 count = dwarf_read_uleb128(current_insn, &offset);
466 current_insn += count;
467 offset *= cie->data_alignment_factor;
468 break;
469 case DW_CFA_restore_extended:
470 count = dwarf_read_uleb128(current_insn, &reg);
471 current_insn += count;
472 break;
473 case DW_CFA_undefined:
474 count = dwarf_read_uleb128(current_insn, &reg);
475 current_insn += count;
Matt Fleming5580e902009-08-20 19:53:49 +0100476 regp = dwarf_frame_alloc_reg(frame, reg);
477 regp->flags |= DWARF_UNDEFINED;
Matt Flemingbd353862009-08-14 01:58:43 +0900478 break;
479 case DW_CFA_def_cfa:
480 count = dwarf_read_uleb128(current_insn,
481 &frame->cfa_register);
482 current_insn += count;
483 count = dwarf_read_uleb128(current_insn,
484 &frame->cfa_offset);
485 current_insn += count;
486
487 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
488 break;
489 case DW_CFA_def_cfa_register:
490 count = dwarf_read_uleb128(current_insn,
491 &frame->cfa_register);
492 current_insn += count;
493 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
494 break;
495 case DW_CFA_def_cfa_offset:
496 count = dwarf_read_uleb128(current_insn, &offset);
497 current_insn += count;
498 frame->cfa_offset = offset;
499 break;
500 case DW_CFA_def_cfa_expression:
501 count = dwarf_read_uleb128(current_insn, &expr_len);
502 current_insn += count;
503
504 frame->cfa_expr = current_insn;
505 frame->cfa_expr_len = expr_len;
506 current_insn += expr_len;
507
508 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
509 break;
510 case DW_CFA_offset_extended_sf:
511 count = dwarf_read_uleb128(current_insn, &reg);
512 current_insn += count;
513 count = dwarf_read_leb128(current_insn, &offset);
514 current_insn += count;
515 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100516 regp = dwarf_frame_alloc_reg(frame, reg);
517 regp->flags |= DWARF_REG_OFFSET;
518 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900519 break;
520 case DW_CFA_val_offset:
521 count = dwarf_read_uleb128(current_insn, &reg);
522 current_insn += count;
523 count = dwarf_read_leb128(current_insn, &offset);
524 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100525 regp = dwarf_frame_alloc_reg(frame, reg);
Matt Fleming97efbbd2009-08-16 15:56:35 +0100526 regp->flags |= DWARF_VAL_OFFSET;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100527 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900528 break;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100529 case DW_CFA_GNU_args_size:
530 count = dwarf_read_uleb128(current_insn, &offset);
531 current_insn += count;
532 break;
533 case DW_CFA_GNU_negative_offset_extended:
534 count = dwarf_read_uleb128(current_insn, &reg);
535 current_insn += count;
536 count = dwarf_read_uleb128(current_insn, &offset);
537 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100538
539 regp = dwarf_frame_alloc_reg(frame, reg);
540 regp->flags |= DWARF_REG_OFFSET;
541 regp->addr = -offset;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100542 break;
Matt Flemingbd353862009-08-14 01:58:43 +0900543 default:
544 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100545 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900546 break;
547 }
548 }
549
550 return 0;
551}
552
553/**
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100554 * dwarf_free_frame - free the memory allocated for @frame
555 * @frame: the frame to free
556 */
557void dwarf_free_frame(struct dwarf_frame *frame)
558{
559 dwarf_frame_free_regs(frame);
560 mempool_free(frame, dwarf_frame_pool);
561}
562
Matt Fleming944a3432010-01-30 17:36:20 +0000563extern void ret_from_irq(void);
564
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100565/**
Matt Flemingc2d474d62009-10-10 16:17:06 +0100566 * dwarf_unwind_stack - unwind the stack
567 *
Matt Flemingbd353862009-08-14 01:58:43 +0900568 * @pc: address of the function to unwind
569 * @prev: struct dwarf_frame of the previous stackframe on the callstack
570 *
571 * Return a struct dwarf_frame representing the most recent frame
572 * on the callstack. Each of the lower (older) stack frames are
573 * linked via the "prev" member.
574 */
Matt Fleming858918b2010-02-07 12:40:36 +0000575struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
576 struct dwarf_frame *prev)
Matt Flemingbd353862009-08-14 01:58:43 +0900577{
578 struct dwarf_frame *frame;
579 struct dwarf_cie *cie;
580 struct dwarf_fde *fde;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100581 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900582 unsigned long addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900583
584 /*
Matt Flemingc2d474d62009-10-10 16:17:06 +0100585 * If we're starting at the top of the stack we need get the
586 * contents of a physical register to get the CFA in order to
587 * begin the virtual unwinding of the stack.
Matt Flemingbd353862009-08-14 01:58:43 +0900588 *
Matt Flemingf8264662009-08-13 20:41:31 +0100589 * NOTE: the return address is guaranteed to be setup by the
590 * time this function makes its first function call.
Matt Flemingbd353862009-08-14 01:58:43 +0900591 */
Paul Mundt421b5412009-11-06 17:23:33 +0900592 if (!pc || !prev)
Matt Flemingb9558732009-08-15 23:10:57 +0100593 pc = (unsigned long)current_text_addr();
Matt Flemingbd353862009-08-14 01:58:43 +0900594
Matt Fleming60339fa2009-10-24 18:56:57 +0000595#ifdef CONFIG_FUNCTION_GRAPH_TRACER
596 /*
597 * If our stack has been patched by the function graph tracer
598 * then we might see the address of return_to_handler() where we
599 * expected to find the real return address.
600 */
601 if (pc == (unsigned long)&return_to_handler) {
602 int index = current->curr_ret_stack;
603
604 /*
605 * We currently have no way of tracking how many
606 * return_to_handler()'s we've seen. If there is more
607 * than one patched return address on our stack,
608 * complain loudly.
609 */
610 WARN_ON(index > 0);
611
612 pc = current->ret_stack[index].ret;
613 }
614#endif
615
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100616 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
617 if (!frame) {
618 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
Matt Flemingb344e24a2009-08-16 21:54:48 +0100619 UNWINDER_BUG();
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100620 }
Matt Flemingbd353862009-08-14 01:58:43 +0900621
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100622 INIT_LIST_HEAD(&frame->reg_list);
623 frame->flags = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900624 frame->prev = prev;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100625 frame->return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900626
627 fde = dwarf_lookup_fde(pc);
628 if (!fde) {
629 /*
Matt Flemingc2d474d62009-10-10 16:17:06 +0100630 * This is our normal exit path. There are two reasons
631 * why we might exit here,
Matt Flemingbd353862009-08-14 01:58:43 +0900632 *
633 * a) pc has no asscociated DWARF frame info and so
634 * we don't know how to unwind this frame. This is
635 * usually the case when we're trying to unwind a
636 * frame that was called from some assembly code
637 * that has no DWARF info, e.g. syscalls.
638 *
639 * b) the DEBUG info for pc is bogus. There's
640 * really no way to distinguish this case from the
641 * case above, which sucks because we could print a
642 * warning here.
643 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100644 goto bail;
Matt Flemingbd353862009-08-14 01:58:43 +0900645 }
646
647 cie = dwarf_lookup_cie(fde->cie_pointer);
648
649 frame->pc = fde->initial_location;
650
651 /* CIE initial instructions */
652 dwarf_cfa_execute_insns(cie->initial_instructions,
Matt Flemingf8264662009-08-13 20:41:31 +0100653 cie->instructions_end, cie, fde,
Matt Flemingb9558732009-08-15 23:10:57 +0100654 frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900655
656 /* FDE instructions */
657 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
Matt Flemingb9558732009-08-15 23:10:57 +0100658 fde, frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900659
660 /* Calculate the CFA */
661 switch (frame->flags) {
662 case DWARF_FRAME_CFA_REG_OFFSET:
663 if (prev) {
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100664 reg = dwarf_frame_reg(prev, frame->cfa_register);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100665 UNWINDER_BUG_ON(!reg);
666 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900667
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100668 addr = prev->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900669 frame->cfa = __raw_readl(addr);
670
671 } else {
672 /*
Matt Flemingc2d474d62009-10-10 16:17:06 +0100673 * Again, we're starting from the top of the
674 * stack. We need to physically read
675 * the contents of a register in order to get
676 * the Canonical Frame Address for this
Matt Flemingbd353862009-08-14 01:58:43 +0900677 * function.
678 */
679 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
680 }
681
682 frame->cfa += frame->cfa_offset;
683 break;
684 default:
Matt Flemingb344e24a2009-08-16 21:54:48 +0100685 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900686 }
687
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100688 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
Matt Fleming5580e902009-08-20 19:53:49 +0100689
690 /*
691 * If we haven't seen the return address register or the return
692 * address column is undefined then we must assume that this is
693 * the end of the callstack.
694 */
695 if (!reg || reg->flags == DWARF_UNDEFINED)
696 goto bail;
697
Matt Flemingb344e24a2009-08-16 21:54:48 +0100698 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900699
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100700 addr = frame->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900701 frame->return_addr = __raw_readl(addr);
702
Matt Fleming944a3432010-01-30 17:36:20 +0000703 /*
704 * Ah, the joys of unwinding through interrupts.
705 *
706 * Interrupts are tricky - the DWARF info needs to be _really_
707 * accurate and unfortunately I'm seeing a lot of bogus DWARF
708 * info. For example, I've seen interrupts occur in epilogues
709 * just after the frame pointer (r14) had been restored. The
710 * problem was that the DWARF info claimed that the CFA could be
711 * reached by using the value of the frame pointer before it was
712 * restored.
713 *
714 * So until the compiler can be trusted to produce reliable
715 * DWARF info when it really matters, let's stop unwinding once
716 * we've calculated the function that was interrupted.
717 */
718 if (prev && prev->pc == (unsigned long)ret_from_irq)
719 frame->return_addr = 0;
720
Matt Flemingbd353862009-08-14 01:58:43 +0900721 return frame;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100722
723bail:
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100724 dwarf_free_frame(frame);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100725 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900726}
727
728static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100729 unsigned char *end, struct module *mod)
Matt Flemingbd353862009-08-14 01:58:43 +0900730{
Matt Fleming858918b2010-02-07 12:40:36 +0000731 struct rb_node **rb_node = &cie_root.rb_node;
Paul Mundt4e1a2592010-03-23 17:07:41 +0900732 struct rb_node *parent = *rb_node;
Matt Flemingbd353862009-08-14 01:58:43 +0900733 struct dwarf_cie *cie;
734 unsigned long flags;
735 int count;
736
737 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
738 if (!cie)
739 return -ENOMEM;
740
741 cie->length = len;
742
743 /*
744 * Record the offset into the .eh_frame section
745 * for this CIE. It allows this CIE to be
746 * quickly and easily looked up from the
747 * corresponding FDE.
748 */
749 cie->cie_pointer = (unsigned long)entry;
750
751 cie->version = *(char *)p++;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100752 UNWINDER_BUG_ON(cie->version != 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900753
754 cie->augmentation = p;
755 p += strlen(cie->augmentation) + 1;
756
757 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
758 p += count;
759
760 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
761 p += count;
762
763 /*
764 * Which column in the rule table contains the
765 * return address?
766 */
767 if (cie->version == 1) {
768 cie->return_address_reg = __raw_readb(p);
769 p++;
770 } else {
771 count = dwarf_read_uleb128(p, &cie->return_address_reg);
772 p += count;
773 }
774
775 if (cie->augmentation[0] == 'z') {
776 unsigned int length, count;
777 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
778
779 count = dwarf_read_uleb128(p, &length);
780 p += count;
781
Matt Flemingb344e24a2009-08-16 21:54:48 +0100782 UNWINDER_BUG_ON((unsigned char *)p > end);
Matt Flemingbd353862009-08-14 01:58:43 +0900783
784 cie->initial_instructions = p + length;
785 cie->augmentation++;
786 }
787
788 while (*cie->augmentation) {
789 /*
790 * "L" indicates a byte showing how the
791 * LSDA pointer is encoded. Skip it.
792 */
793 if (*cie->augmentation == 'L') {
794 p++;
795 cie->augmentation++;
796 } else if (*cie->augmentation == 'R') {
797 /*
798 * "R" indicates a byte showing
799 * how FDE addresses are
800 * encoded.
801 */
802 cie->encoding = *(char *)p++;
803 cie->augmentation++;
804 } else if (*cie->augmentation == 'P') {
805 /*
806 * "R" indicates a personality
807 * routine in the CIE
808 * augmentation.
809 */
Matt Flemingb344e24a2009-08-16 21:54:48 +0100810 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900811 } else if (*cie->augmentation == 'S') {
Matt Flemingb344e24a2009-08-16 21:54:48 +0100812 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900813 } else {
814 /*
815 * Unknown augmentation. Assume
816 * 'z' augmentation.
817 */
818 p = cie->initial_instructions;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100819 UNWINDER_BUG_ON(!p);
Matt Flemingbd353862009-08-14 01:58:43 +0900820 break;
821 }
822 }
823
824 cie->initial_instructions = p;
825 cie->instructions_end = end;
826
827 /* Add to list */
828 spin_lock_irqsave(&dwarf_cie_lock, flags);
Matt Fleming858918b2010-02-07 12:40:36 +0000829
830 while (*rb_node) {
831 struct dwarf_cie *cie_tmp;
832
833 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
834
835 parent = *rb_node;
836
837 if (cie->cie_pointer < cie_tmp->cie_pointer)
838 rb_node = &parent->rb_left;
839 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
840 rb_node = &parent->rb_right;
841 else
842 WARN_ON(1);
843 }
844
845 rb_link_node(&cie->node, parent, rb_node);
846 rb_insert_color(&cie->node, &cie_root);
847
Paul Mundtd8252d62010-05-20 20:46:27 +0900848#ifdef CONFIG_MODULES
Matt Fleming858918b2010-02-07 12:40:36 +0000849 if (mod != NULL)
850 list_add_tail(&cie->link, &mod->arch.cie_list);
Paul Mundtd8252d62010-05-20 20:46:27 +0900851#endif
Matt Fleming858918b2010-02-07 12:40:36 +0000852
Matt Flemingbd353862009-08-14 01:58:43 +0900853 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
854
855 return 0;
856}
857
858static int dwarf_parse_fde(void *entry, u32 entry_type,
Matt Fleming54806752009-08-20 19:42:34 +0100859 void *start, unsigned long len,
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100860 unsigned char *end, struct module *mod)
Matt Flemingbd353862009-08-14 01:58:43 +0900861{
Matt Fleming858918b2010-02-07 12:40:36 +0000862 struct rb_node **rb_node = &fde_root.rb_node;
Paul Mundt4e1a2592010-03-23 17:07:41 +0900863 struct rb_node *parent = *rb_node;
Matt Flemingbd353862009-08-14 01:58:43 +0900864 struct dwarf_fde *fde;
865 struct dwarf_cie *cie;
866 unsigned long flags;
867 int count;
868 void *p = start;
869
870 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
871 if (!fde)
872 return -ENOMEM;
873
874 fde->length = len;
875
876 /*
877 * In a .eh_frame section the CIE pointer is the
878 * delta between the address within the FDE
879 */
880 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
881
882 cie = dwarf_lookup_cie(fde->cie_pointer);
883 fde->cie = cie;
884
885 if (cie->encoding)
886 count = dwarf_read_encoded_value(p, &fde->initial_location,
887 cie->encoding);
888 else
889 count = dwarf_read_addr(p, &fde->initial_location);
890
891 p += count;
892
893 if (cie->encoding)
894 count = dwarf_read_encoded_value(p, &fde->address_range,
895 cie->encoding & 0x0f);
896 else
897 count = dwarf_read_addr(p, &fde->address_range);
898
899 p += count;
900
901 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
902 unsigned int length;
903 count = dwarf_read_uleb128(p, &length);
904 p += count + length;
905 }
906
907 /* Call frame instructions. */
908 fde->instructions = p;
Matt Fleming54806752009-08-20 19:42:34 +0100909 fde->end = end;
Matt Flemingbd353862009-08-14 01:58:43 +0900910
911 /* Add to list. */
912 spin_lock_irqsave(&dwarf_fde_lock, flags);
Matt Fleming858918b2010-02-07 12:40:36 +0000913
914 while (*rb_node) {
915 struct dwarf_fde *fde_tmp;
916 unsigned long tmp_start, tmp_end;
917 unsigned long start, end;
918
919 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
920
921 start = fde->initial_location;
922 end = fde->initial_location + fde->address_range;
923
924 tmp_start = fde_tmp->initial_location;
925 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
926
927 parent = *rb_node;
928
929 if (start < tmp_start)
930 rb_node = &parent->rb_left;
931 else if (start >= tmp_end)
932 rb_node = &parent->rb_right;
933 else
934 WARN_ON(1);
935 }
936
937 rb_link_node(&fde->node, parent, rb_node);
938 rb_insert_color(&fde->node, &fde_root);
939
Paul Mundtd8252d62010-05-20 20:46:27 +0900940#ifdef CONFIG_MODULES
Matt Fleming858918b2010-02-07 12:40:36 +0000941 if (mod != NULL)
942 list_add_tail(&fde->link, &mod->arch.fde_list);
Paul Mundtd8252d62010-05-20 20:46:27 +0900943#endif
Matt Fleming858918b2010-02-07 12:40:36 +0000944
Matt Flemingbd353862009-08-14 01:58:43 +0900945 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
946
947 return 0;
948}
949
Matt Flemingb344e24a2009-08-16 21:54:48 +0100950static void dwarf_unwinder_dump(struct task_struct *task,
951 struct pt_regs *regs,
Matt Flemingbd353862009-08-14 01:58:43 +0900952 unsigned long *sp,
Matt Flemingb344e24a2009-08-16 21:54:48 +0100953 const struct stacktrace_ops *ops,
954 void *data)
Matt Flemingbd353862009-08-14 01:58:43 +0900955{
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100956 struct dwarf_frame *frame, *_frame;
957 unsigned long return_addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900958
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100959 _frame = NULL;
960 return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900961
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100962 while (1) {
963 frame = dwarf_unwind_stack(return_addr, _frame);
964
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100965 if (_frame)
966 dwarf_free_frame(_frame);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100967
968 _frame = frame;
969
970 if (!frame || !frame->return_addr)
971 break;
972
973 return_addr = frame->return_addr;
974 ops->address(data, return_addr, 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900975 }
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100976
977 if (frame)
978 dwarf_free_frame(frame);
Matt Flemingbd353862009-08-14 01:58:43 +0900979}
980
981static struct unwinder dwarf_unwinder = {
982 .name = "dwarf-unwinder",
983 .dump = dwarf_unwinder_dump,
984 .rating = 150,
985};
986
987static void dwarf_unwinder_cleanup(void)
988{
Matt Fleming858918b2010-02-07 12:40:36 +0000989 struct rb_node **fde_rb_node = &fde_root.rb_node;
990 struct rb_node **cie_rb_node = &cie_root.rb_node;
Matt Flemingbd353862009-08-14 01:58:43 +0900991
992 /*
993 * Deallocate all the memory allocated for the DWARF unwinder.
994 * Traverse all the FDE/CIE lists and remove and free all the
995 * memory associated with those data structures.
996 */
Matt Fleming858918b2010-02-07 12:40:36 +0000997 while (*fde_rb_node) {
998 struct dwarf_fde *fde;
Matt Flemingbd353862009-08-14 01:58:43 +0900999
Matt Fleming858918b2010-02-07 12:40:36 +00001000 fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
1001 rb_erase(*fde_rb_node, &fde_root);
Matt Flemingbd353862009-08-14 01:58:43 +09001002 kfree(fde);
Matt Fleming858918b2010-02-07 12:40:36 +00001003 }
1004
1005 while (*cie_rb_node) {
1006 struct dwarf_cie *cie;
1007
1008 cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
1009 rb_erase(*cie_rb_node, &cie_root);
1010 kfree(cie);
1011 }
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001012
1013 kmem_cache_destroy(dwarf_reg_cachep);
1014 kmem_cache_destroy(dwarf_frame_cachep);
Matt Flemingbd353862009-08-14 01:58:43 +09001015}
1016
1017/**
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001018 * dwarf_parse_section - parse DWARF section
1019 * @eh_frame_start: start address of the .eh_frame section
1020 * @eh_frame_end: end address of the .eh_frame section
1021 * @mod: the kernel module containing the .eh_frame section
1022 *
1023 * Parse the information in a .eh_frame section.
1024 */
Paul Mundt5a3abba72009-10-13 13:32:19 +09001025static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
1026 struct module *mod)
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001027{
1028 u32 entry_type;
1029 void *p, *entry;
Paul Mundt8ec006c2009-10-12 08:50:07 +09001030 int count, err = 0;
Paul Mundteca28e32009-10-19 15:51:21 +09001031 unsigned long len = 0;
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001032 unsigned int c_entries, f_entries;
1033 unsigned char *end;
1034
1035 c_entries = 0;
1036 f_entries = 0;
1037 entry = eh_frame_start;
1038
1039 while ((char *)entry < eh_frame_end) {
1040 p = entry;
1041
1042 count = dwarf_entry_len(p, &len);
1043 if (count == 0) {
1044 /*
1045 * We read a bogus length field value. There is
1046 * nothing we can do here apart from disabling
1047 * the DWARF unwinder. We can't even skip this
1048 * entry and move to the next one because 'len'
1049 * tells us where our next entry is.
1050 */
1051 err = -EINVAL;
1052 goto out;
1053 } else
1054 p += count;
1055
1056 /* initial length does not include itself */
1057 end = p + len;
1058
1059 entry_type = get_unaligned((u32 *)p);
1060 p += 4;
1061
1062 if (entry_type == DW_EH_FRAME_CIE) {
1063 err = dwarf_parse_cie(entry, p, len, end, mod);
1064 if (err < 0)
1065 goto out;
1066 else
1067 c_entries++;
1068 } else {
1069 err = dwarf_parse_fde(entry, entry_type, p, len,
1070 end, mod);
1071 if (err < 0)
1072 goto out;
1073 else
1074 f_entries++;
1075 }
1076
1077 entry = (char *)entry + len + 4;
1078 }
1079
1080 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
1081 c_entries, f_entries);
1082
1083 return 0;
1084
1085out:
1086 return err;
1087}
1088
Paul Mundt5a3abba72009-10-13 13:32:19 +09001089#ifdef CONFIG_MODULES
1090int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1091 struct module *me)
1092{
1093 unsigned int i, err;
1094 unsigned long start, end;
1095 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1096
1097 start = end = 0;
1098
1099 for (i = 1; i < hdr->e_shnum; i++) {
1100 /* Alloc bit cleared means "ignore it." */
1101 if ((sechdrs[i].sh_flags & SHF_ALLOC)
1102 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
1103 start = sechdrs[i].sh_addr;
1104 end = start + sechdrs[i].sh_size;
1105 break;
1106 }
1107 }
1108
1109 /* Did we find the .eh_frame section? */
1110 if (i != hdr->e_shnum) {
Matt Fleming858918b2010-02-07 12:40:36 +00001111 INIT_LIST_HEAD(&me->arch.cie_list);
1112 INIT_LIST_HEAD(&me->arch.fde_list);
Paul Mundt5a3abba72009-10-13 13:32:19 +09001113 err = dwarf_parse_section((char *)start, (char *)end, me);
1114 if (err) {
1115 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1116 me->name);
1117 return err;
1118 }
1119 }
1120
1121 return 0;
1122}
1123
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001124/**
Paul Mundt5a3abba72009-10-13 13:32:19 +09001125 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001126 * @mod: the module that is being unloaded
1127 *
1128 * Remove any FDEs and CIEs from the global lists that came from
1129 * @mod's .eh_frame section because @mod is being unloaded.
1130 */
Paul Mundt5a3abba72009-10-13 13:32:19 +09001131void module_dwarf_cleanup(struct module *mod)
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001132{
Matt Fleming858918b2010-02-07 12:40:36 +00001133 struct dwarf_fde *fde, *ftmp;
1134 struct dwarf_cie *cie, *ctmp;
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001135 unsigned long flags;
1136
1137 spin_lock_irqsave(&dwarf_cie_lock, flags);
1138
Matt Fleming858918b2010-02-07 12:40:36 +00001139 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001140 list_del(&cie->link);
Matt Fleming858918b2010-02-07 12:40:36 +00001141 rb_erase(&cie->node, &cie_root);
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001142 kfree(cie);
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001143 }
1144
1145 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1146
1147 spin_lock_irqsave(&dwarf_fde_lock, flags);
1148
Matt Fleming858918b2010-02-07 12:40:36 +00001149 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001150 list_del(&fde->link);
Matt Fleming858918b2010-02-07 12:40:36 +00001151 rb_erase(&fde->node, &fde_root);
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001152 kfree(fde);
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001153 }
1154
1155 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1156}
Paul Mundt5a3abba72009-10-13 13:32:19 +09001157#endif /* CONFIG_MODULES */
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001158
1159/**
Matt Flemingbd353862009-08-14 01:58:43 +09001160 * dwarf_unwinder_init - initialise the dwarf unwinder
1161 *
1162 * Build the data structures describing the .dwarf_frame section to
1163 * make it easier to lookup CIE and FDE entries. Because the
1164 * .eh_frame section is packed as tightly as possible it is not
1165 * easy to lookup the FDE for a given PC, so we build a list of FDE
1166 * and CIE entries that make it easier.
1167 */
Paul Mundt97f361e2009-08-17 05:07:38 +09001168static int __init dwarf_unwinder_init(void)
Matt Flemingbd353862009-08-14 01:58:43 +09001169{
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001170 int err;
Matt Flemingbd353862009-08-14 01:58:43 +09001171
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001172 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
Paul Mundt4f896ff2009-08-22 19:03:25 +09001173 sizeof(struct dwarf_frame), 0,
1174 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1175
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001176 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
Paul Mundt4f896ff2009-08-22 19:03:25 +09001177 sizeof(struct dwarf_reg), 0,
1178 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001179
1180 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1181 mempool_alloc_slab,
1182 mempool_free_slab,
1183 dwarf_frame_cachep);
1184
1185 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1186 mempool_alloc_slab,
1187 mempool_free_slab,
1188 dwarf_reg_cachep);
1189
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001190 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1191 if (err)
1192 goto out;
Matt Flemingbd353862009-08-14 01:58:43 +09001193
1194 err = unwinder_register(&dwarf_unwinder);
1195 if (err)
1196 goto out;
1197
Paul Mundt97f361e2009-08-17 05:07:38 +09001198 return 0;
Matt Flemingbd353862009-08-14 01:58:43 +09001199
1200out:
1201 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1202 dwarf_unwinder_cleanup();
Paul Mundt97f361e2009-08-17 05:07:38 +09001203 return -EINVAL;
Matt Flemingbd353862009-08-14 01:58:43 +09001204}
Paul Mundt97f361e2009-08-17 05:07:38 +09001205early_initcall(dwarf_unwinder_init);