blob: e4dd5d5a111506889b5a69284355721fc23c80f7 [file] [log] [blame]
Paul Mundt60a51fb2008-12-16 09:33:53 +09001/*
2 * arch/sh/oprofile/init.c
3 *
Paul Mundt2e4f17d2010-10-13 03:46:25 +09004 * Copyright (C) 2003 - 2010 Paul Mundt
Paul Mundt60a51fb2008-12-16 09:33:53 +09005 *
6 * Based on arch/mips/oprofile/common.c:
7 *
8 * Copyright (C) 2004, 2005 Ralf Baechle
9 * Copyright (C) 2005 MIPS Technologies, Inc.
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/oprofile.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/smp.h>
Matt Fleming86c8c042010-09-10 20:36:23 +010020#include <linux/perf_event.h>
Paul Mundt2e4f17d2010-10-13 03:46:25 +090021#include <linux/slab.h>
Paul Mundt60a51fb2008-12-16 09:33:53 +090022#include <asm/processor.h>
Paul Mundt60a51fb2008-12-16 09:33:53 +090023
Dave Peverley40a8b422008-12-16 09:35:40 +090024extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
25
Paul Mundt7c842472010-10-13 07:43:50 +090026#ifdef CONFIG_HW_PERF_EVENTS
Paul Mundt2e4f17d2010-10-13 03:46:25 +090027/*
28 * This will need to be reworked when multiple PMUs are supported.
29 */
30static char *sh_pmu_op_name;
31
Matt Fleming86c8c042010-09-10 20:36:23 +010032char *op_name_from_perf_id(void)
Paul Mundt60a51fb2008-12-16 09:33:53 +090033{
Paul Mundt2e4f17d2010-10-13 03:46:25 +090034 return sh_pmu_op_name;
Paul Mundt60a51fb2008-12-16 09:33:53 +090035}
36
37int __init oprofile_arch_init(struct oprofile_operations *ops)
38{
Dave Peverley40a8b422008-12-16 09:35:40 +090039 ops->backtrace = sh_backtrace;
40
Paul Mundt2e4f17d2010-10-13 03:46:25 +090041 if (perf_num_counters() == 0)
42 return -ENODEV;
43
44 sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
45 UTS_MACHINE, perf_pmu_name());
46 if (unlikely(!sh_pmu_op_name))
47 return -ENOMEM;
48
Matt Fleming86c8c042010-09-10 20:36:23 +010049 return oprofile_perf_init(ops);
Paul Mundt60a51fb2008-12-16 09:33:53 +090050}
51
Vladimir Zapolskiy55205c92011-12-22 16:15:40 +010052void oprofile_arch_exit(void)
Paul Mundt60a51fb2008-12-16 09:33:53 +090053{
Matt Fleming86c8c042010-09-10 20:36:23 +010054 oprofile_perf_exit();
Paul Mundt2e4f17d2010-10-13 03:46:25 +090055 kfree(sh_pmu_op_name);
Paul Mundt60a51fb2008-12-16 09:33:53 +090056}
Matt Fleming86c8c042010-09-10 20:36:23 +010057#else
58int __init oprofile_arch_init(struct oprofile_operations *ops)
59{
Paul Mundt7c842472010-10-13 07:43:50 +090060 ops->backtrace = sh_backtrace;
Matt Fleming86c8c042010-09-10 20:36:23 +010061 return -ENODEV;
62}
Vladimir Zapolskiy55205c92011-12-22 16:15:40 +010063void oprofile_arch_exit(void) {}
Matt Fleming86c8c042010-09-10 20:36:23 +010064#endif /* CONFIG_HW_PERF_EVENTS */