blob: ac604937f3ee16fde018d6f5b0c094a189f4d2b5 [file] [log] [blame]
Paul Mundt60a51fb2008-12-16 09:33:53 +09001/*
2 * arch/sh/oprofile/init.c
3 *
4 * Copyright (C) 2003 - 2008 Paul Mundt
5 *
6 * Based on arch/mips/oprofile/common.c:
7 *
8 * Copyright (C) 2004, 2005 Ralf Baechle
9 * Copyright (C) 2005 MIPS Technologies, Inc.
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/oprofile.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/smp.h>
20#include <asm/processor.h>
21#include "op_impl.h"
22
Paul Mundt60a51fb2008-12-16 09:33:53 +090023static struct op_sh_model *model;
24
25static struct op_counter_config ctr[20];
26
Dave Peverley40a8b422008-12-16 09:35:40 +090027extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
28
Paul Mundt60a51fb2008-12-16 09:33:53 +090029static int op_sh_setup(void)
30{
31 /* Pre-compute the values to stuff in the hardware registers. */
32 model->reg_setup(ctr);
33
34 /* Configure the registers on all cpus. */
35 on_each_cpu(model->cpu_setup, NULL, 1);
36
37 return 0;
38}
39
40static int op_sh_create_files(struct super_block *sb, struct dentry *root)
41{
42 int i, ret = 0;
43
44 for (i = 0; i < model->num_counters; i++) {
45 struct dentry *dir;
46 char buf[4];
47
48 snprintf(buf, sizeof(buf), "%d", i);
49 dir = oprofilefs_mkdir(sb, root, buf);
50
51 ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
52 ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
53 ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
54 ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
55
56 if (model->create_files)
57 ret |= model->create_files(sb, dir);
58 else
59 ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
60
61 /* Dummy entries */
62 ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
63 }
64
65 return ret;
66}
67
68static int op_sh_start(void)
69{
70 /* Enable performance monitoring for all counters. */
71 on_each_cpu(model->cpu_start, NULL, 1);
72
73 return 0;
74}
75
76static void op_sh_stop(void)
77{
78 /* Disable performance monitoring for all counters. */
79 on_each_cpu(model->cpu_stop, NULL, 1);
80}
81
82int __init oprofile_arch_init(struct oprofile_operations *ops)
83{
84 struct op_sh_model *lmodel = NULL;
85 int ret;
86
Dave Peverley40a8b422008-12-16 09:35:40 +090087 /*
88 * Always assign the backtrace op. If the counter initialization
89 * fails, we fall back to the timer which will still make use of
90 * this.
91 */
92 ops->backtrace = sh_backtrace;
93
Paul Mundt093aed12009-11-05 17:09:59 +090094 /*
95 * XXX
96 *
97 * All of the SH7750/SH-4A counters have been converted to perf,
98 * this infrastructure hook is left for other users until they've
99 * had a chance to convert over, at which point all of this
100 * will be deleted.
101 */
Paul Mundt60a51fb2008-12-16 09:33:53 +0900102
103 if (!lmodel)
104 return -ENODEV;
105 if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
106 return -ENODEV;
107
108 ret = lmodel->init();
109 if (unlikely(ret != 0))
110 return ret;
111
112 model = lmodel;
113
114 ops->setup = op_sh_setup;
115 ops->create_files = op_sh_create_files;
116 ops->start = op_sh_start;
117 ops->stop = op_sh_stop;
118 ops->cpu_type = lmodel->cpu_type;
119
120 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
121 lmodel->cpu_type);
122
123 return 0;
124}
125
126void oprofile_arch_exit(void)
127{
128 if (model && model->exit)
129 model->exit();
130}