Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/oprofile/init.c |
| 3 | * |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 4 | * Copyright (C) 2003 - 2010 Paul Mundt |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 5 | * |
| 6 | * Based on arch/mips/oprofile/common.c: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005 Ralf Baechle |
| 9 | * Copyright (C) 2005 MIPS Technologies, Inc. |
| 10 | * |
| 11 | * This file is subject to the terms and conditions of the GNU General Public |
| 12 | * License. See the file "COPYING" in the main directory of this archive |
| 13 | * for more details. |
| 14 | */ |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/oprofile.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/smp.h> |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 20 | #include <linux/perf_event.h> |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 22 | #include <asm/processor.h> |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 23 | |
Dave Peverley | 40a8b42 | 2008-12-16 09:35:40 +0900 | [diff] [blame] | 24 | extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); |
| 25 | |
Paul Mundt | 7c84247 | 2010-10-13 07:43:50 +0900 | [diff] [blame] | 26 | #ifdef CONFIG_HW_PERF_EVENTS |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 27 | /* |
| 28 | * This will need to be reworked when multiple PMUs are supported. |
| 29 | */ |
| 30 | static char *sh_pmu_op_name; |
| 31 | |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 32 | char *op_name_from_perf_id(void) |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 33 | { |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 34 | return sh_pmu_op_name; |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 35 | } |
| 36 | |
| 37 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
| 38 | { |
Dave Peverley | 40a8b42 | 2008-12-16 09:35:40 +0900 | [diff] [blame] | 39 | ops->backtrace = sh_backtrace; |
| 40 | |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 41 | if (perf_num_counters() == 0) |
| 42 | return -ENODEV; |
| 43 | |
| 44 | sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s", |
| 45 | UTS_MACHINE, perf_pmu_name()); |
| 46 | if (unlikely(!sh_pmu_op_name)) |
| 47 | return -ENOMEM; |
| 48 | |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 49 | return oprofile_perf_init(ops); |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 50 | } |
| 51 | |
Vladimir Zapolskiy | 55205c9 | 2011-12-22 16:15:40 +0100 | [diff] [blame] | 52 | void oprofile_arch_exit(void) |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 53 | { |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 54 | oprofile_perf_exit(); |
Paul Mundt | 2e4f17d | 2010-10-13 03:46:25 +0900 | [diff] [blame] | 55 | kfree(sh_pmu_op_name); |
Paul Mundt | 60a51fb | 2008-12-16 09:33:53 +0900 | [diff] [blame] | 56 | } |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 57 | #else |
| 58 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
| 59 | { |
Paul Mundt | 7c84247 | 2010-10-13 07:43:50 +0900 | [diff] [blame] | 60 | ops->backtrace = sh_backtrace; |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 61 | return -ENODEV; |
| 62 | } |
Vladimir Zapolskiy | 55205c9 | 2011-12-22 16:15:40 +0100 | [diff] [blame] | 63 | void oprofile_arch_exit(void) {} |
Matt Fleming | 86c8c04 | 2010-09-10 20:36:23 +0100 | [diff] [blame] | 64 | #endif /* CONFIG_HW_PERF_EVENTS */ |