Commit | Line | Data |
---|---|---|
bd353861 MF |
1 | /* |
2 | * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * This is an implementation of a DWARF unwinder. Its main purpose is | |
9 | * for generating stacktrace information. Based on the DWARF 3 | |
10 | * specification from http://www.dwarfstd.org. | |
11 | * | |
12 | * TODO: | |
13 | * - DWARF64 doesn't work. | |
97efbbd5 | 14 | * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. |
bd353861 MF |
15 | */ |
16 | ||
17 | /* #define DEBUG */ | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/list.h> | |
fb3f3e7f | 21 | #include <linux/mempool.h> |
bd353861 | 22 | #include <linux/mm.h> |
5a3abba7 | 23 | #include <linux/elf.h> |
60339fad | 24 | #include <linux/ftrace.h> |
5a0e3ad6 | 25 | #include <linux/slab.h> |
bd353861 MF |
26 | #include <asm/dwarf.h> |
27 | #include <asm/unwinder.h> | |
28 | #include <asm/sections.h> | |
3497447f | 29 | #include <asm/unaligned.h> |
bd353861 MF |
30 | #include <asm/stacktrace.h> |
31 | ||
fb3f3e7f MF |
32 | /* Reserve enough memory for two stack frames */ |
33 | #define DWARF_FRAME_MIN_REQ 2 | |
34 | /* ... with 4 registers per frame. */ | |
35 | #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) | |
36 | ||
37 | static struct kmem_cache *dwarf_frame_cachep; | |
38 | static mempool_t *dwarf_frame_pool; | |
39 | ||
40 | static struct kmem_cache *dwarf_reg_cachep; | |
41 | static mempool_t *dwarf_reg_pool; | |
42 | ||
858918b7 | 43 | static struct rb_root cie_root; |
97f361e2 | 44 | static DEFINE_SPINLOCK(dwarf_cie_lock); |
bd353861 | 45 | |
858918b7 | 46 | static struct rb_root fde_root; |
97f361e2 | 47 | static DEFINE_SPINLOCK(dwarf_fde_lock); |
bd353861 MF |
48 | |
49 | static struct dwarf_cie *cached_cie; | |
50 | ||
fb3f3e7f MF |
51 | /** |
52 | * dwarf_frame_alloc_reg - allocate memory for a DWARF register | |
53 | * @frame: the DWARF frame whose list of registers we insert on | |
54 | * @reg_num: the register number | |
55 | * | |
56 | * Allocate space for, and initialise, a dwarf reg from | |
57 | * dwarf_reg_pool and insert it onto the (unsorted) linked-list of | |
58 | * dwarf registers for @frame. | |
bd353861 | 59 | * |
fb3f3e7f | 60 | * Return the initialised DWARF reg. |
bd353861 | 61 | */ |
fb3f3e7f MF |
62 | static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, |
63 | unsigned int reg_num) | |
bd353861 | 64 | { |
fb3f3e7f | 65 | struct dwarf_reg *reg; |
bd353861 | 66 | |
fb3f3e7f MF |
67 | reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); |
68 | if (!reg) { | |
69 | printk(KERN_WARNING "Unable to allocate a DWARF register\n"); | |
bd353861 MF |
70 | /* |
71 | * Let's just bomb hard here, we have no way to | |
72 | * gracefully recover. | |
73 | */ | |
b344e24a | 74 | UNWINDER_BUG(); |
bd353861 MF |
75 | } |
76 | ||
fb3f3e7f MF |
77 | reg->number = reg_num; |
78 | reg->addr = 0; | |
79 | reg->flags = 0; | |
80 | ||
81 | list_add(®->link, &frame->reg_list); | |
82 | ||
83 | return reg; | |
84 | } | |
85 | ||
86 | static void dwarf_frame_free_regs(struct dwarf_frame *frame) | |
87 | { | |
88 | struct dwarf_reg *reg, *n; | |
89 | ||
90 | list_for_each_entry_safe(reg, n, &frame->reg_list, link) { | |
91 | list_del(®->link); | |
92 | mempool_free(reg, dwarf_reg_pool); | |
93 | } | |
94 | } | |
95 | ||
96 | /** | |
97 | * dwarf_frame_reg - return a DWARF register | |
98 | * @frame: the DWARF frame to search in for @reg_num | |
99 | * @reg_num: the register number to search for | |
100 | * | |
101 | * Lookup and return the dwarf reg @reg_num for this frame. Return | |
102 | * NULL if @reg_num is an register invalid number. | |
103 | */ | |
104 | static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, | |
105 | unsigned int reg_num) | |
106 | { | |
107 | struct dwarf_reg *reg; | |
108 | ||
109 | list_for_each_entry(reg, &frame->reg_list, link) { | |
110 | if (reg->number == reg_num) | |
111 | return reg; | |
bd353861 MF |
112 | } |
113 | ||
fb3f3e7f | 114 | return NULL; |
bd353861 MF |
115 | } |
116 | ||
117 | /** | |
118 | * dwarf_read_addr - read dwarf data | |
119 | * @src: source address of data | |
120 | * @dst: destination address to store the data to | |
121 | * | |
122 | * Read 'n' bytes from @src, where 'n' is the size of an address on | |
123 | * the native machine. We return the number of bytes read, which | |
124 | * should always be 'n'. We also have to be careful when reading | |
125 | * from @src and writing to @dst, because they can be arbitrarily | |
126 | * aligned. Return 'n' - the number of bytes read. | |
127 | */ | |
3497447f | 128 | static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) |
bd353861 | 129 | { |
bf43a160 PM |
130 | u32 val = get_unaligned(src); |
131 | put_unaligned(val, dst); | |
bd353861 MF |
132 | return sizeof(unsigned long *); |
133 | } | |
134 | ||
135 | /** | |
136 | * dwarf_read_uleb128 - read unsigned LEB128 data | |
137 | * @addr: the address where the ULEB128 data is stored | |
138 | * @ret: address to store the result | |
139 | * | |
140 | * Decode an unsigned LEB128 encoded datum. The algorithm is taken | |
141 | * from Appendix C of the DWARF 3 spec. For information on the | |
142 | * encodings refer to section "7.6 - Variable Length Data". Return | |
143 | * the number of bytes read. | |
144 | */ | |
145 | static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) | |
146 | { | |
147 | unsigned int result; | |
148 | unsigned char byte; | |
149 | int shift, count; | |
150 | ||
151 | result = 0; | |
152 | shift = 0; | |
153 | count = 0; | |
154 | ||
155 | while (1) { | |
156 | byte = __raw_readb(addr); | |
157 | addr++; | |
158 | count++; | |
159 | ||
160 | result |= (byte & 0x7f) << shift; | |
161 | shift += 7; | |
162 | ||
163 | if (!(byte & 0x80)) | |
164 | break; | |
165 | } | |
166 | ||
167 | *ret = result; | |
168 | ||
169 | return count; | |
170 | } | |
171 | ||
172 | /** | |
173 | * dwarf_read_leb128 - read signed LEB128 data | |
174 | * @addr: the address of the LEB128 encoded data | |
175 | * @ret: address to store the result | |
176 | * | |
177 | * Decode signed LEB128 data. The algorithm is taken from Appendix | |
178 | * C of the DWARF 3 spec. Return the number of bytes read. | |
179 | */ | |
180 | static inline unsigned long dwarf_read_leb128(char *addr, int *ret) | |
181 | { | |
182 | unsigned char byte; | |
183 | int result, shift; | |
184 | int num_bits; | |
185 | int count; | |
186 | ||
187 | result = 0; | |
188 | shift = 0; | |
189 | count = 0; | |
190 | ||
191 | while (1) { | |
192 | byte = __raw_readb(addr); | |
193 | addr++; | |
194 | result |= (byte & 0x7f) << shift; | |
195 | shift += 7; | |
196 | count++; | |
197 | ||
198 | if (!(byte & 0x80)) | |
199 | break; | |
200 | } | |
201 | ||
202 | /* The number of bits in a signed integer. */ | |
203 | num_bits = 8 * sizeof(result); | |
204 | ||
205 | if ((shift < num_bits) && (byte & 0x40)) | |
206 | result |= (-1 << shift); | |
207 | ||
208 | *ret = result; | |
209 | ||
210 | return count; | |
211 | } | |
212 | ||
213 | /** | |
214 | * dwarf_read_encoded_value - return the decoded value at @addr | |
215 | * @addr: the address of the encoded value | |
216 | * @val: where to write the decoded value | |
217 | * @encoding: the encoding with which we can decode @addr | |
218 | * | |
219 | * GCC emits encoded address in the .eh_frame FDE entries. Decode | |
220 | * the value at @addr using @encoding. The decoded value is written | |
221 | * to @val and the number of bytes read is returned. | |
222 | */ | |
223 | static int dwarf_read_encoded_value(char *addr, unsigned long *val, | |
224 | char encoding) | |
225 | { | |
226 | unsigned long decoded_addr = 0; | |
227 | int count = 0; | |
228 | ||
229 | switch (encoding & 0x70) { | |
230 | case DW_EH_PE_absptr: | |
231 | break; | |
232 | case DW_EH_PE_pcrel: | |
233 | decoded_addr = (unsigned long)addr; | |
234 | break; | |
235 | default: | |
236 | pr_debug("encoding=0x%x\n", (encoding & 0x70)); | |
b344e24a | 237 | UNWINDER_BUG(); |
bd353861 MF |
238 | } |
239 | ||
240 | if ((encoding & 0x07) == 0x00) | |
241 | encoding |= DW_EH_PE_udata4; | |
242 | ||
243 | switch (encoding & 0x0f) { | |
244 | case DW_EH_PE_sdata4: | |
245 | case DW_EH_PE_udata4: | |
246 | count += 4; | |
3497447f | 247 | decoded_addr += get_unaligned((u32 *)addr); |
bd353861 MF |
248 | __raw_writel(decoded_addr, val); |
249 | break; | |
250 | default: | |
251 | pr_debug("encoding=0x%x\n", encoding); | |
b344e24a | 252 | UNWINDER_BUG(); |
bd353861 MF |
253 | } |
254 | ||
255 | return count; | |
256 | } | |
257 | ||
258 | /** | |
259 | * dwarf_entry_len - return the length of an FDE or CIE | |
260 | * @addr: the address of the entry | |
261 | * @len: the length of the entry | |
262 | * | |
263 | * Read the initial_length field of the entry and store the size of | |
264 | * the entry in @len. We return the number of bytes read. Return a | |
265 | * count of 0 on error. | |
266 | */ | |
267 | static inline int dwarf_entry_len(char *addr, unsigned long *len) | |
268 | { | |
269 | u32 initial_len; | |
270 | int count; | |
271 | ||
3497447f | 272 | initial_len = get_unaligned((u32 *)addr); |
bd353861 MF |
273 | count = 4; |
274 | ||
275 | /* | |
276 | * An initial length field value in the range DW_LEN_EXT_LO - | |
277 | * DW_LEN_EXT_HI indicates an extension, and should not be | |
278 | * interpreted as a length. The only extension that we currently | |
279 | * understand is the use of DWARF64 addresses. | |
280 | */ | |
281 | if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { | |
282 | /* | |
283 | * The 64-bit length field immediately follows the | |
284 | * compulsory 32-bit length field. | |
285 | */ | |
286 | if (initial_len == DW_EXT_DWARF64) { | |
3497447f | 287 | *len = get_unaligned((u64 *)addr + 4); |
bd353861 MF |
288 | count = 12; |
289 | } else { | |
290 | printk(KERN_WARNING "Unknown DWARF extension\n"); | |
291 | count = 0; | |
292 | } | |
293 | } else | |
294 | *len = initial_len; | |
295 | ||
296 | return count; | |
297 | } | |
298 | ||
299 | /** | |
300 | * dwarf_lookup_cie - locate the cie | |
301 | * @cie_ptr: pointer to help with lookup | |
302 | */ | |
303 | static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) | |
304 | { | |
858918b7 MF |
305 | struct rb_node **rb_node = &cie_root.rb_node; |
306 | struct dwarf_cie *cie = NULL; | |
bd353861 MF |
307 | unsigned long flags; |
308 | ||
309 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
310 | ||
311 | /* | |
312 | * We've cached the last CIE we looked up because chances are | |
313 | * that the FDE wants this CIE. | |
314 | */ | |
315 | if (cached_cie && cached_cie->cie_pointer == cie_ptr) { | |
316 | cie = cached_cie; | |
317 | goto out; | |
318 | } | |
319 | ||
858918b7 MF |
320 | while (*rb_node) { |
321 | struct dwarf_cie *cie_tmp; | |
322 | ||
323 | cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); | |
324 | BUG_ON(!cie_tmp); | |
325 | ||
326 | if (cie_ptr == cie_tmp->cie_pointer) { | |
327 | cie = cie_tmp; | |
328 | cached_cie = cie_tmp; | |
329 | goto out; | |
330 | } else { | |
331 | if (cie_ptr < cie_tmp->cie_pointer) | |
332 | rb_node = &(*rb_node)->rb_left; | |
333 | else | |
334 | rb_node = &(*rb_node)->rb_right; | |
bd353861 MF |
335 | } |
336 | } | |
337 | ||
bd353861 MF |
338 | out: |
339 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
340 | return cie; | |
341 | } | |
342 | ||
343 | /** | |
344 | * dwarf_lookup_fde - locate the FDE that covers pc | |
345 | * @pc: the program counter | |
346 | */ | |
347 | struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) | |
348 | { | |
858918b7 MF |
349 | struct rb_node **rb_node = &fde_root.rb_node; |
350 | struct dwarf_fde *fde = NULL; | |
bd353861 | 351 | unsigned long flags; |
bd353861 MF |
352 | |
353 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
97f361e2 | 354 | |
858918b7 MF |
355 | while (*rb_node) { |
356 | struct dwarf_fde *fde_tmp; | |
357 | unsigned long tmp_start, tmp_end; | |
bd353861 | 358 | |
858918b7 MF |
359 | fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); |
360 | BUG_ON(!fde_tmp); | |
bd353861 | 361 | |
858918b7 MF |
362 | tmp_start = fde_tmp->initial_location; |
363 | tmp_end = fde_tmp->initial_location + fde_tmp->address_range; | |
bd353861 | 364 | |
858918b7 MF |
365 | if (pc < tmp_start) { |
366 | rb_node = &(*rb_node)->rb_left; | |
367 | } else { | |
368 | if (pc < tmp_end) { | |
369 | fde = fde_tmp; | |
370 | goto out; | |
371 | } else | |
372 | rb_node = &(*rb_node)->rb_right; | |
373 | } | |
374 | } | |
bd353861 | 375 | |
858918b7 | 376 | out: |
bd353861 MF |
377 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); |
378 | ||
379 | return fde; | |
380 | } | |
381 | ||
382 | /** | |
383 | * dwarf_cfa_execute_insns - execute instructions to calculate a CFA | |
384 | * @insn_start: address of the first instruction | |
385 | * @insn_end: address of the last instruction | |
386 | * @cie: the CIE for this function | |
387 | * @fde: the FDE for this function | |
388 | * @frame: the instructions calculate the CFA for this frame | |
389 | * @pc: the program counter of the address we're interested in | |
390 | * | |
391 | * Execute the Call Frame instruction sequence starting at | |
392 | * @insn_start and ending at @insn_end. The instructions describe | |
393 | * how to calculate the Canonical Frame Address of a stackframe. | |
394 | * Store the results in @frame. | |
395 | */ | |
396 | static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |
397 | unsigned char *insn_end, | |
398 | struct dwarf_cie *cie, | |
399 | struct dwarf_fde *fde, | |
400 | struct dwarf_frame *frame, | |
b955873b | 401 | unsigned long pc) |
bd353861 MF |
402 | { |
403 | unsigned char insn; | |
404 | unsigned char *current_insn; | |
405 | unsigned int count, delta, reg, expr_len, offset; | |
fb3f3e7f | 406 | struct dwarf_reg *regp; |
bd353861 MF |
407 | |
408 | current_insn = insn_start; | |
409 | ||
b955873b | 410 | while (current_insn < insn_end && frame->pc <= pc) { |
bd353861 MF |
411 | insn = __raw_readb(current_insn++); |
412 | ||
413 | /* | |
414 | * Firstly, handle the opcodes that embed their operands | |
415 | * in the instructions. | |
416 | */ | |
417 | switch (DW_CFA_opcode(insn)) { | |
418 | case DW_CFA_advance_loc: | |
419 | delta = DW_CFA_operand(insn); | |
420 | delta *= cie->code_alignment_factor; | |
421 | frame->pc += delta; | |
422 | continue; | |
423 | /* NOTREACHED */ | |
424 | case DW_CFA_offset: | |
425 | reg = DW_CFA_operand(insn); | |
426 | count = dwarf_read_uleb128(current_insn, &offset); | |
427 | current_insn += count; | |
428 | offset *= cie->data_alignment_factor; | |
fb3f3e7f MF |
429 | regp = dwarf_frame_alloc_reg(frame, reg); |
430 | regp->addr = offset; | |
431 | regp->flags |= DWARF_REG_OFFSET; | |
bd353861 MF |
432 | continue; |
433 | /* NOTREACHED */ | |
434 | case DW_CFA_restore: | |
435 | reg = DW_CFA_operand(insn); | |
436 | continue; | |
437 | /* NOTREACHED */ | |
438 | } | |
439 | ||
440 | /* | |
441 | * Secondly, handle the opcodes that don't embed their | |
442 | * operands in the instruction. | |
443 | */ | |
444 | switch (insn) { | |
445 | case DW_CFA_nop: | |
446 | continue; | |
447 | case DW_CFA_advance_loc1: | |
448 | delta = *current_insn++; | |
449 | frame->pc += delta * cie->code_alignment_factor; | |
450 | break; | |
451 | case DW_CFA_advance_loc2: | |
3497447f | 452 | delta = get_unaligned((u16 *)current_insn); |
bd353861 MF |
453 | current_insn += 2; |
454 | frame->pc += delta * cie->code_alignment_factor; | |
455 | break; | |
456 | case DW_CFA_advance_loc4: | |
3497447f | 457 | delta = get_unaligned((u32 *)current_insn); |
bd353861 MF |
458 | current_insn += 4; |
459 | frame->pc += delta * cie->code_alignment_factor; | |
460 | break; | |
461 | case DW_CFA_offset_extended: | |
462 | count = dwarf_read_uleb128(current_insn, ®); | |
463 | current_insn += count; | |
464 | count = dwarf_read_uleb128(current_insn, &offset); | |
465 | current_insn += count; | |
466 | offset *= cie->data_alignment_factor; | |
467 | break; | |
468 | case DW_CFA_restore_extended: | |
469 | count = dwarf_read_uleb128(current_insn, ®); | |
470 | current_insn += count; | |
471 | break; | |
472 | case DW_CFA_undefined: | |
473 | count = dwarf_read_uleb128(current_insn, ®); | |
474 | current_insn += count; | |
5580e904 MF |
475 | regp = dwarf_frame_alloc_reg(frame, reg); |
476 | regp->flags |= DWARF_UNDEFINED; | |
bd353861 MF |
477 | break; |
478 | case DW_CFA_def_cfa: | |
479 | count = dwarf_read_uleb128(current_insn, | |
480 | &frame->cfa_register); | |
481 | current_insn += count; | |
482 | count = dwarf_read_uleb128(current_insn, | |
483 | &frame->cfa_offset); | |
484 | current_insn += count; | |
485 | ||
486 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | |
487 | break; | |
488 | case DW_CFA_def_cfa_register: | |
489 | count = dwarf_read_uleb128(current_insn, | |
490 | &frame->cfa_register); | |
491 | current_insn += count; | |
492 | frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; | |
493 | break; | |
494 | case DW_CFA_def_cfa_offset: | |
495 | count = dwarf_read_uleb128(current_insn, &offset); | |
496 | current_insn += count; | |
497 | frame->cfa_offset = offset; | |
498 | break; | |
499 | case DW_CFA_def_cfa_expression: | |
500 | count = dwarf_read_uleb128(current_insn, &expr_len); | |
501 | current_insn += count; | |
502 | ||
503 | frame->cfa_expr = current_insn; | |
504 | frame->cfa_expr_len = expr_len; | |
505 | current_insn += expr_len; | |
506 | ||
507 | frame->flags |= DWARF_FRAME_CFA_REG_EXP; | |
508 | break; | |
509 | case DW_CFA_offset_extended_sf: | |
510 | count = dwarf_read_uleb128(current_insn, ®); | |
511 | current_insn += count; | |
512 | count = dwarf_read_leb128(current_insn, &offset); | |
513 | current_insn += count; | |
514 | offset *= cie->data_alignment_factor; | |
fb3f3e7f MF |
515 | regp = dwarf_frame_alloc_reg(frame, reg); |
516 | regp->flags |= DWARF_REG_OFFSET; | |
517 | regp->addr = offset; | |
bd353861 MF |
518 | break; |
519 | case DW_CFA_val_offset: | |
520 | count = dwarf_read_uleb128(current_insn, ®); | |
521 | current_insn += count; | |
522 | count = dwarf_read_leb128(current_insn, &offset); | |
523 | offset *= cie->data_alignment_factor; | |
fb3f3e7f | 524 | regp = dwarf_frame_alloc_reg(frame, reg); |
97efbbd5 | 525 | regp->flags |= DWARF_VAL_OFFSET; |
fb3f3e7f | 526 | regp->addr = offset; |
bd353861 | 527 | break; |
cd7246f0 MF |
528 | case DW_CFA_GNU_args_size: |
529 | count = dwarf_read_uleb128(current_insn, &offset); | |
530 | current_insn += count; | |
531 | break; | |
532 | case DW_CFA_GNU_negative_offset_extended: | |
533 | count = dwarf_read_uleb128(current_insn, ®); | |
534 | current_insn += count; | |
535 | count = dwarf_read_uleb128(current_insn, &offset); | |
536 | offset *= cie->data_alignment_factor; | |
fb3f3e7f MF |
537 | |
538 | regp = dwarf_frame_alloc_reg(frame, reg); | |
539 | regp->flags |= DWARF_REG_OFFSET; | |
540 | regp->addr = -offset; | |
cd7246f0 | 541 | break; |
bd353861 MF |
542 | default: |
543 | pr_debug("unhandled DWARF instruction 0x%x\n", insn); | |
b344e24a | 544 | UNWINDER_BUG(); |
bd353861 MF |
545 | break; |
546 | } | |
547 | } | |
548 | ||
549 | return 0; | |
550 | } | |
551 | ||
552 | /** | |
ed4fe7f4 MF |
553 | * dwarf_free_frame - free the memory allocated for @frame |
554 | * @frame: the frame to free | |
555 | */ | |
556 | void dwarf_free_frame(struct dwarf_frame *frame) | |
557 | { | |
558 | dwarf_frame_free_regs(frame); | |
559 | mempool_free(frame, dwarf_frame_pool); | |
560 | } | |
561 | ||
944a3438 MF |
562 | extern void ret_from_irq(void); |
563 | ||
bd353861 | 564 | /** |
c2d474d6 MF |
565 | * dwarf_unwind_stack - unwind the stack |
566 | * | |
bd353861 MF |
567 | * @pc: address of the function to unwind |
568 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | |
569 | * | |
570 | * Return a struct dwarf_frame representing the most recent frame | |
571 | * on the callstack. Each of the lower (older) stack frames are | |
572 | * linked via the "prev" member. | |
573 | */ | |
858918b7 MF |
574 | struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, |
575 | struct dwarf_frame *prev) | |
bd353861 MF |
576 | { |
577 | struct dwarf_frame *frame; | |
578 | struct dwarf_cie *cie; | |
579 | struct dwarf_fde *fde; | |
fb3f3e7f | 580 | struct dwarf_reg *reg; |
bd353861 | 581 | unsigned long addr; |
bd353861 MF |
582 | |
583 | /* | |
c2d474d6 MF |
584 | * If we're starting at the top of the stack we need get the |
585 | * contents of a physical register to get the CFA in order to | |
586 | * begin the virtual unwinding of the stack. | |
bd353861 | 587 | * |
f8264667 MF |
588 | * NOTE: the return address is guaranteed to be setup by the |
589 | * time this function makes its first function call. | |
bd353861 | 590 | */ |
421b5411 | 591 | if (!pc || !prev) |
b955873b | 592 | pc = (unsigned long)current_text_addr(); |
bd353861 | 593 | |
60339fad MF |
594 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
595 | /* | |
596 | * If our stack has been patched by the function graph tracer | |
597 | * then we might see the address of return_to_handler() where we | |
598 | * expected to find the real return address. | |
599 | */ | |
600 | if (pc == (unsigned long)&return_to_handler) { | |
601 | int index = current->curr_ret_stack; | |
602 | ||
603 | /* | |
604 | * We currently have no way of tracking how many | |
605 | * return_to_handler()'s we've seen. If there is more | |
606 | * than one patched return address on our stack, | |
607 | * complain loudly. | |
608 | */ | |
609 | WARN_ON(index > 0); | |
610 | ||
611 | pc = current->ret_stack[index].ret; | |
612 | } | |
613 | #endif | |
614 | ||
fb3f3e7f MF |
615 | frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); |
616 | if (!frame) { | |
617 | printk(KERN_ERR "Unable to allocate a dwarf frame\n"); | |
b344e24a | 618 | UNWINDER_BUG(); |
fb3f3e7f | 619 | } |
bd353861 | 620 | |
fb3f3e7f MF |
621 | INIT_LIST_HEAD(&frame->reg_list); |
622 | frame->flags = 0; | |
bd353861 | 623 | frame->prev = prev; |
fb3f3e7f | 624 | frame->return_addr = 0; |
bd353861 MF |
625 | |
626 | fde = dwarf_lookup_fde(pc); | |
627 | if (!fde) { | |
628 | /* | |
c2d474d6 MF |
629 | * This is our normal exit path. There are two reasons |
630 | * why we might exit here, | |
bd353861 MF |
631 | * |
632 | * a) pc has no asscociated DWARF frame info and so | |
633 | * we don't know how to unwind this frame. This is | |
634 | * usually the case when we're trying to unwind a | |
635 | * frame that was called from some assembly code | |
636 | * that has no DWARF info, e.g. syscalls. | |
637 | * | |
638 | * b) the DEBUG info for pc is bogus. There's | |
639 | * really no way to distinguish this case from the | |
640 | * case above, which sucks because we could print a | |
641 | * warning here. | |
642 | */ | |
fb3f3e7f | 643 | goto bail; |
bd353861 MF |
644 | } |
645 | ||
646 | cie = dwarf_lookup_cie(fde->cie_pointer); | |
647 | ||
648 | frame->pc = fde->initial_location; | |
649 | ||
650 | /* CIE initial instructions */ | |
651 | dwarf_cfa_execute_insns(cie->initial_instructions, | |
f8264667 | 652 | cie->instructions_end, cie, fde, |
b955873b | 653 | frame, pc); |
bd353861 MF |
654 | |
655 | /* FDE instructions */ | |
656 | dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, | |
b955873b | 657 | fde, frame, pc); |
bd353861 MF |
658 | |
659 | /* Calculate the CFA */ | |
660 | switch (frame->flags) { | |
661 | case DWARF_FRAME_CFA_REG_OFFSET: | |
662 | if (prev) { | |
fb3f3e7f | 663 | reg = dwarf_frame_reg(prev, frame->cfa_register); |
b344e24a MF |
664 | UNWINDER_BUG_ON(!reg); |
665 | UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); | |
bd353861 | 666 | |
fb3f3e7f | 667 | addr = prev->cfa + reg->addr; |
bd353861 MF |
668 | frame->cfa = __raw_readl(addr); |
669 | ||
670 | } else { | |
671 | /* | |
c2d474d6 MF |
672 | * Again, we're starting from the top of the |
673 | * stack. We need to physically read | |
674 | * the contents of a register in order to get | |
675 | * the Canonical Frame Address for this | |
bd353861 MF |
676 | * function. |
677 | */ | |
678 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | |
679 | } | |
680 | ||
681 | frame->cfa += frame->cfa_offset; | |
682 | break; | |
683 | default: | |
b344e24a | 684 | UNWINDER_BUG(); |
bd353861 MF |
685 | } |
686 | ||
fb3f3e7f | 687 | reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); |
5580e904 MF |
688 | |
689 | /* | |
690 | * If we haven't seen the return address register or the return | |
691 | * address column is undefined then we must assume that this is | |
692 | * the end of the callstack. | |
693 | */ | |
694 | if (!reg || reg->flags == DWARF_UNDEFINED) | |
695 | goto bail; | |
696 | ||
b344e24a | 697 | UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); |
bd353861 | 698 | |
fb3f3e7f | 699 | addr = frame->cfa + reg->addr; |
bd353861 MF |
700 | frame->return_addr = __raw_readl(addr); |
701 | ||
944a3438 MF |
702 | /* |
703 | * Ah, the joys of unwinding through interrupts. | |
704 | * | |
705 | * Interrupts are tricky - the DWARF info needs to be _really_ | |
706 | * accurate and unfortunately I'm seeing a lot of bogus DWARF | |
707 | * info. For example, I've seen interrupts occur in epilogues | |
708 | * just after the frame pointer (r14) had been restored. The | |
709 | * problem was that the DWARF info claimed that the CFA could be | |
710 | * reached by using the value of the frame pointer before it was | |
711 | * restored. | |
712 | * | |
713 | * So until the compiler can be trusted to produce reliable | |
714 | * DWARF info when it really matters, let's stop unwinding once | |
715 | * we've calculated the function that was interrupted. | |
716 | */ | |
717 | if (prev && prev->pc == (unsigned long)ret_from_irq) | |
718 | frame->return_addr = 0; | |
719 | ||
bd353861 | 720 | return frame; |
fb3f3e7f MF |
721 | |
722 | bail: | |
ed4fe7f4 | 723 | dwarf_free_frame(frame); |
fb3f3e7f | 724 | return NULL; |
bd353861 MF |
725 | } |
726 | ||
727 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |
a6a2f2ad | 728 | unsigned char *end, struct module *mod) |
bd353861 | 729 | { |
858918b7 | 730 | struct rb_node **rb_node = &cie_root.rb_node; |
4e1a2594 | 731 | struct rb_node *parent = *rb_node; |
bd353861 MF |
732 | struct dwarf_cie *cie; |
733 | unsigned long flags; | |
734 | int count; | |
735 | ||
736 | cie = kzalloc(sizeof(*cie), GFP_KERNEL); | |
737 | if (!cie) | |
738 | return -ENOMEM; | |
739 | ||
740 | cie->length = len; | |
741 | ||
742 | /* | |
743 | * Record the offset into the .eh_frame section | |
744 | * for this CIE. It allows this CIE to be | |
745 | * quickly and easily looked up from the | |
746 | * corresponding FDE. | |
747 | */ | |
748 | cie->cie_pointer = (unsigned long)entry; | |
749 | ||
750 | cie->version = *(char *)p++; | |
b344e24a | 751 | UNWINDER_BUG_ON(cie->version != 1); |
bd353861 MF |
752 | |
753 | cie->augmentation = p; | |
754 | p += strlen(cie->augmentation) + 1; | |
755 | ||
756 | count = dwarf_read_uleb128(p, &cie->code_alignment_factor); | |
757 | p += count; | |
758 | ||
759 | count = dwarf_read_leb128(p, &cie->data_alignment_factor); | |
760 | p += count; | |
761 | ||
762 | /* | |
763 | * Which column in the rule table contains the | |
764 | * return address? | |
765 | */ | |
766 | if (cie->version == 1) { | |
767 | cie->return_address_reg = __raw_readb(p); | |
768 | p++; | |
769 | } else { | |
770 | count = dwarf_read_uleb128(p, &cie->return_address_reg); | |
771 | p += count; | |
772 | } | |
773 | ||
774 | if (cie->augmentation[0] == 'z') { | |
775 | unsigned int length, count; | |
776 | cie->flags |= DWARF_CIE_Z_AUGMENTATION; | |
777 | ||
778 | count = dwarf_read_uleb128(p, &length); | |
779 | p += count; | |
780 | ||
b344e24a | 781 | UNWINDER_BUG_ON((unsigned char *)p > end); |
bd353861 MF |
782 | |
783 | cie->initial_instructions = p + length; | |
784 | cie->augmentation++; | |
785 | } | |
786 | ||
787 | while (*cie->augmentation) { | |
788 | /* | |
789 | * "L" indicates a byte showing how the | |
790 | * LSDA pointer is encoded. Skip it. | |
791 | */ | |
792 | if (*cie->augmentation == 'L') { | |
793 | p++; | |
794 | cie->augmentation++; | |
795 | } else if (*cie->augmentation == 'R') { | |
796 | /* | |
797 | * "R" indicates a byte showing | |
798 | * how FDE addresses are | |
799 | * encoded. | |
800 | */ | |
801 | cie->encoding = *(char *)p++; | |
802 | cie->augmentation++; | |
803 | } else if (*cie->augmentation == 'P') { | |
804 | /* | |
805 | * "R" indicates a personality | |
806 | * routine in the CIE | |
807 | * augmentation. | |
808 | */ | |
b344e24a | 809 | UNWINDER_BUG(); |
bd353861 | 810 | } else if (*cie->augmentation == 'S') { |
b344e24a | 811 | UNWINDER_BUG(); |
bd353861 MF |
812 | } else { |
813 | /* | |
814 | * Unknown augmentation. Assume | |
815 | * 'z' augmentation. | |
816 | */ | |
817 | p = cie->initial_instructions; | |
b344e24a | 818 | UNWINDER_BUG_ON(!p); |
bd353861 MF |
819 | break; |
820 | } | |
821 | } | |
822 | ||
823 | cie->initial_instructions = p; | |
824 | cie->instructions_end = end; | |
825 | ||
826 | /* Add to list */ | |
827 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
858918b7 MF |
828 | |
829 | while (*rb_node) { | |
830 | struct dwarf_cie *cie_tmp; | |
831 | ||
832 | cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); | |
833 | ||
834 | parent = *rb_node; | |
835 | ||
836 | if (cie->cie_pointer < cie_tmp->cie_pointer) | |
837 | rb_node = &parent->rb_left; | |
838 | else if (cie->cie_pointer >= cie_tmp->cie_pointer) | |
839 | rb_node = &parent->rb_right; | |
840 | else | |
841 | WARN_ON(1); | |
842 | } | |
843 | ||
844 | rb_link_node(&cie->node, parent, rb_node); | |
845 | rb_insert_color(&cie->node, &cie_root); | |
846 | ||
847 | if (mod != NULL) | |
848 | list_add_tail(&cie->link, &mod->arch.cie_list); | |
849 | ||
bd353861 MF |
850 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); |
851 | ||
852 | return 0; | |
853 | } | |
854 | ||
855 | static int dwarf_parse_fde(void *entry, u32 entry_type, | |
5480675d | 856 | void *start, unsigned long len, |
a6a2f2ad | 857 | unsigned char *end, struct module *mod) |
bd353861 | 858 | { |
858918b7 | 859 | struct rb_node **rb_node = &fde_root.rb_node; |
4e1a2594 | 860 | struct rb_node *parent = *rb_node; |
bd353861 MF |
861 | struct dwarf_fde *fde; |
862 | struct dwarf_cie *cie; | |
863 | unsigned long flags; | |
864 | int count; | |
865 | void *p = start; | |
866 | ||
867 | fde = kzalloc(sizeof(*fde), GFP_KERNEL); | |
868 | if (!fde) | |
869 | return -ENOMEM; | |
870 | ||
871 | fde->length = len; | |
872 | ||
873 | /* | |
874 | * In a .eh_frame section the CIE pointer is the | |
875 | * delta between the address within the FDE | |
876 | */ | |
877 | fde->cie_pointer = (unsigned long)(p - entry_type - 4); | |
878 | ||
879 | cie = dwarf_lookup_cie(fde->cie_pointer); | |
880 | fde->cie = cie; | |
881 | ||
882 | if (cie->encoding) | |
883 | count = dwarf_read_encoded_value(p, &fde->initial_location, | |
884 | cie->encoding); | |
885 | else | |
886 | count = dwarf_read_addr(p, &fde->initial_location); | |
887 | ||
888 | p += count; | |
889 | ||
890 | if (cie->encoding) | |
891 | count = dwarf_read_encoded_value(p, &fde->address_range, | |
892 | cie->encoding & 0x0f); | |
893 | else | |
894 | count = dwarf_read_addr(p, &fde->address_range); | |
895 | ||
896 | p += count; | |
897 | ||
898 | if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { | |
899 | unsigned int length; | |
900 | count = dwarf_read_uleb128(p, &length); | |
901 | p += count + length; | |
902 | } | |
903 | ||
904 | /* Call frame instructions. */ | |
905 | fde->instructions = p; | |
5480675d | 906 | fde->end = end; |
bd353861 MF |
907 | |
908 | /* Add to list. */ | |
909 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
858918b7 MF |
910 | |
911 | while (*rb_node) { | |
912 | struct dwarf_fde *fde_tmp; | |
913 | unsigned long tmp_start, tmp_end; | |
914 | unsigned long start, end; | |
915 | ||
916 | fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); | |
917 | ||
918 | start = fde->initial_location; | |
919 | end = fde->initial_location + fde->address_range; | |
920 | ||
921 | tmp_start = fde_tmp->initial_location; | |
922 | tmp_end = fde_tmp->initial_location + fde_tmp->address_range; | |
923 | ||
924 | parent = *rb_node; | |
925 | ||
926 | if (start < tmp_start) | |
927 | rb_node = &parent->rb_left; | |
928 | else if (start >= tmp_end) | |
929 | rb_node = &parent->rb_right; | |
930 | else | |
931 | WARN_ON(1); | |
932 | } | |
933 | ||
934 | rb_link_node(&fde->node, parent, rb_node); | |
935 | rb_insert_color(&fde->node, &fde_root); | |
936 | ||
937 | if (mod != NULL) | |
938 | list_add_tail(&fde->link, &mod->arch.fde_list); | |
939 | ||
bd353861 MF |
940 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); |
941 | ||
942 | return 0; | |
943 | } | |
944 | ||
b344e24a MF |
945 | static void dwarf_unwinder_dump(struct task_struct *task, |
946 | struct pt_regs *regs, | |
bd353861 | 947 | unsigned long *sp, |
b344e24a MF |
948 | const struct stacktrace_ops *ops, |
949 | void *data) | |
bd353861 | 950 | { |
fb3f3e7f MF |
951 | struct dwarf_frame *frame, *_frame; |
952 | unsigned long return_addr; | |
953 | ||
954 | _frame = NULL; | |
955 | return_addr = 0; | |
bd353861 | 956 | |
fb3f3e7f MF |
957 | while (1) { |
958 | frame = dwarf_unwind_stack(return_addr, _frame); | |
959 | ||
ed4fe7f4 MF |
960 | if (_frame) |
961 | dwarf_free_frame(_frame); | |
fb3f3e7f MF |
962 | |
963 | _frame = frame; | |
964 | ||
965 | if (!frame || !frame->return_addr) | |
966 | break; | |
bd353861 | 967 | |
fb3f3e7f MF |
968 | return_addr = frame->return_addr; |
969 | ops->address(data, return_addr, 1); | |
bd353861 | 970 | } |
ed4fe7f4 MF |
971 | |
972 | if (frame) | |
973 | dwarf_free_frame(frame); | |
bd353861 MF |
974 | } |
975 | ||
976 | static struct unwinder dwarf_unwinder = { | |
977 | .name = "dwarf-unwinder", | |
978 | .dump = dwarf_unwinder_dump, | |
979 | .rating = 150, | |
980 | }; | |
981 | ||
982 | static void dwarf_unwinder_cleanup(void) | |
983 | { | |
858918b7 MF |
984 | struct rb_node **fde_rb_node = &fde_root.rb_node; |
985 | struct rb_node **cie_rb_node = &cie_root.rb_node; | |
bd353861 MF |
986 | |
987 | /* | |
988 | * Deallocate all the memory allocated for the DWARF unwinder. | |
989 | * Traverse all the FDE/CIE lists and remove and free all the | |
990 | * memory associated with those data structures. | |
991 | */ | |
858918b7 MF |
992 | while (*fde_rb_node) { |
993 | struct dwarf_fde *fde; | |
bd353861 | 994 | |
858918b7 MF |
995 | fde = rb_entry(*fde_rb_node, struct dwarf_fde, node); |
996 | rb_erase(*fde_rb_node, &fde_root); | |
bd353861 | 997 | kfree(fde); |
858918b7 MF |
998 | } |
999 | ||
1000 | while (*cie_rb_node) { | |
1001 | struct dwarf_cie *cie; | |
1002 | ||
1003 | cie = rb_entry(*cie_rb_node, struct dwarf_cie, node); | |
1004 | rb_erase(*cie_rb_node, &cie_root); | |
1005 | kfree(cie); | |
1006 | } | |
fb3f3e7f MF |
1007 | |
1008 | kmem_cache_destroy(dwarf_reg_cachep); | |
1009 | kmem_cache_destroy(dwarf_frame_cachep); | |
bd353861 MF |
1010 | } |
1011 | ||
1012 | /** | |
a6a2f2ad MF |
1013 | * dwarf_parse_section - parse DWARF section |
1014 | * @eh_frame_start: start address of the .eh_frame section | |
1015 | * @eh_frame_end: end address of the .eh_frame section | |
1016 | * @mod: the kernel module containing the .eh_frame section | |
bd353861 | 1017 | * |
a6a2f2ad | 1018 | * Parse the information in a .eh_frame section. |
bd353861 | 1019 | */ |
5a3abba7 PM |
1020 | static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
1021 | struct module *mod) | |
bd353861 MF |
1022 | { |
1023 | u32 entry_type; | |
1024 | void *p, *entry; | |
2f6dafc5 | 1025 | int count, err = 0; |
eca28e37 | 1026 | unsigned long len = 0; |
bd353861 MF |
1027 | unsigned int c_entries, f_entries; |
1028 | unsigned char *end; | |
bd353861 MF |
1029 | |
1030 | c_entries = 0; | |
1031 | f_entries = 0; | |
a6a2f2ad | 1032 | entry = eh_frame_start; |
fb3f3e7f | 1033 | |
a6a2f2ad | 1034 | while ((char *)entry < eh_frame_end) { |
bd353861 MF |
1035 | p = entry; |
1036 | ||
1037 | count = dwarf_entry_len(p, &len); | |
1038 | if (count == 0) { | |
1039 | /* | |
1040 | * We read a bogus length field value. There is | |
1041 | * nothing we can do here apart from disabling | |
1042 | * the DWARF unwinder. We can't even skip this | |
1043 | * entry and move to the next one because 'len' | |
1044 | * tells us where our next entry is. | |
1045 | */ | |
a6a2f2ad | 1046 | err = -EINVAL; |
bd353861 MF |
1047 | goto out; |
1048 | } else | |
1049 | p += count; | |
1050 | ||
1051 | /* initial length does not include itself */ | |
1052 | end = p + len; | |
1053 | ||
3497447f | 1054 | entry_type = get_unaligned((u32 *)p); |
bd353861 MF |
1055 | p += 4; |
1056 | ||
1057 | if (entry_type == DW_EH_FRAME_CIE) { | |
a6a2f2ad | 1058 | err = dwarf_parse_cie(entry, p, len, end, mod); |
bd353861 MF |
1059 | if (err < 0) |
1060 | goto out; | |
1061 | else | |
1062 | c_entries++; | |
1063 | } else { | |
a6a2f2ad MF |
1064 | err = dwarf_parse_fde(entry, entry_type, p, len, |
1065 | end, mod); | |
bd353861 MF |
1066 | if (err < 0) |
1067 | goto out; | |
1068 | else | |
1069 | f_entries++; | |
1070 | } | |
1071 | ||
1072 | entry = (char *)entry + len + 4; | |
1073 | } | |
1074 | ||
1075 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | |
1076 | c_entries, f_entries); | |
1077 | ||
a6a2f2ad MF |
1078 | return 0; |
1079 | ||
1080 | out: | |
1081 | return err; | |
1082 | } | |
1083 | ||
5a3abba7 PM |
1084 | #ifdef CONFIG_MODULES |
1085 | int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |
1086 | struct module *me) | |
1087 | { | |
1088 | unsigned int i, err; | |
1089 | unsigned long start, end; | |
1090 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | |
1091 | ||
1092 | start = end = 0; | |
1093 | ||
1094 | for (i = 1; i < hdr->e_shnum; i++) { | |
1095 | /* Alloc bit cleared means "ignore it." */ | |
1096 | if ((sechdrs[i].sh_flags & SHF_ALLOC) | |
1097 | && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | |
1098 | start = sechdrs[i].sh_addr; | |
1099 | end = start + sechdrs[i].sh_size; | |
1100 | break; | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | /* Did we find the .eh_frame section? */ | |
1105 | if (i != hdr->e_shnum) { | |
858918b7 MF |
1106 | INIT_LIST_HEAD(&me->arch.cie_list); |
1107 | INIT_LIST_HEAD(&me->arch.fde_list); | |
5a3abba7 PM |
1108 | err = dwarf_parse_section((char *)start, (char *)end, me); |
1109 | if (err) { | |
1110 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | |
1111 | me->name); | |
1112 | return err; | |
1113 | } | |
1114 | } | |
1115 | ||
1116 | return 0; | |
1117 | } | |
1118 | ||
a6a2f2ad | 1119 | /** |
5a3abba7 | 1120 | * module_dwarf_cleanup - remove FDE/CIEs associated with @mod |
a6a2f2ad MF |
1121 | * @mod: the module that is being unloaded |
1122 | * | |
1123 | * Remove any FDEs and CIEs from the global lists that came from | |
1124 | * @mod's .eh_frame section because @mod is being unloaded. | |
1125 | */ | |
5a3abba7 | 1126 | void module_dwarf_cleanup(struct module *mod) |
a6a2f2ad | 1127 | { |
858918b7 MF |
1128 | struct dwarf_fde *fde, *ftmp; |
1129 | struct dwarf_cie *cie, *ctmp; | |
a6a2f2ad MF |
1130 | unsigned long flags; |
1131 | ||
1132 | spin_lock_irqsave(&dwarf_cie_lock, flags); | |
1133 | ||
858918b7 | 1134 | list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) { |
a6a2f2ad | 1135 | list_del(&cie->link); |
858918b7 | 1136 | rb_erase(&cie->node, &cie_root); |
a6a2f2ad | 1137 | kfree(cie); |
a6a2f2ad MF |
1138 | } |
1139 | ||
1140 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | |
1141 | ||
1142 | spin_lock_irqsave(&dwarf_fde_lock, flags); | |
1143 | ||
858918b7 | 1144 | list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { |
a6a2f2ad | 1145 | list_del(&fde->link); |
858918b7 | 1146 | rb_erase(&fde->node, &fde_root); |
a6a2f2ad | 1147 | kfree(fde); |
a6a2f2ad MF |
1148 | } |
1149 | ||
1150 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | |
1151 | } | |
5a3abba7 | 1152 | #endif /* CONFIG_MODULES */ |
a6a2f2ad MF |
1153 | |
1154 | /** | |
1155 | * dwarf_unwinder_init - initialise the dwarf unwinder | |
1156 | * | |
1157 | * Build the data structures describing the .dwarf_frame section to | |
1158 | * make it easier to lookup CIE and FDE entries. Because the | |
1159 | * .eh_frame section is packed as tightly as possible it is not | |
1160 | * easy to lookup the FDE for a given PC, so we build a list of FDE | |
1161 | * and CIE entries that make it easier. | |
1162 | */ | |
1163 | static int __init dwarf_unwinder_init(void) | |
1164 | { | |
1165 | int err; | |
a6a2f2ad MF |
1166 | |
1167 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | |
8ec006c5 PM |
1168 | sizeof(struct dwarf_frame), 0, |
1169 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
1170 | ||
a6a2f2ad | 1171 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", |
8ec006c5 PM |
1172 | sizeof(struct dwarf_reg), 0, |
1173 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | |
a6a2f2ad MF |
1174 | |
1175 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | |
1176 | mempool_alloc_slab, | |
1177 | mempool_free_slab, | |
1178 | dwarf_frame_cachep); | |
1179 | ||
1180 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | |
1181 | mempool_alloc_slab, | |
1182 | mempool_free_slab, | |
1183 | dwarf_reg_cachep); | |
1184 | ||
1185 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | |
1186 | if (err) | |
1187 | goto out; | |
1188 | ||
bd353861 MF |
1189 | err = unwinder_register(&dwarf_unwinder); |
1190 | if (err) | |
1191 | goto out; | |
1192 | ||
97f361e2 | 1193 | return 0; |
bd353861 MF |
1194 | |
1195 | out: | |
1196 | printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); | |
1197 | dwarf_unwinder_cleanup(); | |
97f361e2 | 1198 | return -EINVAL; |
bd353861 | 1199 | } |
97f361e2 | 1200 | early_initcall(dwarf_unwinder_init); |