Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** |
| 2 | * @file op_model_xscale.c |
| 3 | * XScale Performance Monitor Driver |
| 4 | * |
| 5 | * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com> |
| 6 | * @remark Copyright 2000-2004 MontaVista Software Inc |
| 7 | * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com> |
| 8 | * @remark Copyright 2004 Intel Corporation |
| 9 | * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk> |
| 10 | * @remark Copyright 2004 OProfile Authors |
| 11 | * |
| 12 | * @remark Read the file COPYING |
| 13 | * |
| 14 | * @author Zwane Mwaikambo |
| 15 | */ |
| 16 | |
| 17 | /* #define DEBUG */ |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/oprofile.h> |
| 22 | #include <linux/interrupt.h> |
Russell King | 2326eb98 | 2006-10-15 13:48:37 +0100 | [diff] [blame] | 23 | #include <linux/irq.h> |
| 24 | |
Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 25 | #include <asm/cputype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include "op_counter.h" |
| 28 | #include "op_arm_model.h" |
| 29 | |
| 30 | #define PMU_ENABLE 0x001 /* Enable counters */ |
| 31 | #define PMN_RESET 0x002 /* Reset event counters */ |
| 32 | #define CCNT_RESET 0x004 /* Reset clock counter */ |
| 33 | #define PMU_RESET (CCNT_RESET | PMN_RESET) |
| 34 | #define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */ |
| 35 | |
| 36 | /* TODO do runtime detection */ |
Lennert Buytenhek | 98954df | 2006-09-18 23:02:25 +0100 | [diff] [blame] | 37 | #ifdef CONFIG_ARCH_IOP32X |
Lennert Buytenhek | c852ac8 | 2006-09-18 23:26:25 +0100 | [diff] [blame] | 38 | #define XSCALE_PMU_IRQ IRQ_IOP32X_CORE_PMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #endif |
Lennert Buytenhek | 98954df | 2006-09-18 23:02:25 +0100 | [diff] [blame] | 40 | #ifdef CONFIG_ARCH_IOP33X |
Lennert Buytenhek | c852ac8 | 2006-09-18 23:26:25 +0100 | [diff] [blame] | 41 | #define XSCALE_PMU_IRQ IRQ_IOP33X_CORE_PMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #endif |
| 43 | #ifdef CONFIG_ARCH_PXA |
| 44 | #define XSCALE_PMU_IRQ IRQ_PMU |
| 45 | #endif |
| 46 | |
| 47 | /* |
| 48 | * Different types of events that can be counted by the XScale PMU |
| 49 | * as used by Oprofile userspace. Here primarily for documentation |
| 50 | * purposes. |
| 51 | */ |
| 52 | |
| 53 | #define EVT_ICACHE_MISS 0x00 |
| 54 | #define EVT_ICACHE_NO_DELIVER 0x01 |
| 55 | #define EVT_DATA_STALL 0x02 |
| 56 | #define EVT_ITLB_MISS 0x03 |
| 57 | #define EVT_DTLB_MISS 0x04 |
| 58 | #define EVT_BRANCH 0x05 |
| 59 | #define EVT_BRANCH_MISS 0x06 |
| 60 | #define EVT_INSTRUCTION 0x07 |
| 61 | #define EVT_DCACHE_FULL_STALL 0x08 |
| 62 | #define EVT_DCACHE_FULL_STALL_CONTIG 0x09 |
| 63 | #define EVT_DCACHE_ACCESS 0x0A |
| 64 | #define EVT_DCACHE_MISS 0x0B |
| 65 | #define EVT_DCACE_WRITE_BACK 0x0C |
| 66 | #define EVT_PC_CHANGED 0x0D |
| 67 | #define EVT_BCU_REQUEST 0x10 |
| 68 | #define EVT_BCU_FULL 0x11 |
| 69 | #define EVT_BCU_DRAIN 0x12 |
| 70 | #define EVT_BCU_ECC_NO_ELOG 0x14 |
| 71 | #define EVT_BCU_1_BIT_ERR 0x15 |
| 72 | #define EVT_RMW 0x16 |
| 73 | /* EVT_CCNT is not hardware defined */ |
| 74 | #define EVT_CCNT 0xFE |
| 75 | #define EVT_UNUSED 0xFF |
| 76 | |
| 77 | struct pmu_counter { |
| 78 | volatile unsigned long ovf; |
| 79 | unsigned long reset_counter; |
| 80 | }; |
| 81 | |
| 82 | enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS }; |
| 83 | |
| 84 | static struct pmu_counter results[MAX_COUNTERS]; |
| 85 | |
| 86 | /* |
| 87 | * There are two versions of the PMU in current XScale processors |
| 88 | * with differing register layouts and number of performance counters. |
Lennert Buytenhek | c852ac8 | 2006-09-18 23:26:25 +0100 | [diff] [blame] | 89 | * e.g. IOP32x is xsc1 whilst IOP33x is xsc2. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | * We detect which register layout to use in xscale_detect_pmu() |
| 91 | */ |
| 92 | enum { PMU_XSC1, PMU_XSC2 }; |
| 93 | |
| 94 | struct pmu_type { |
| 95 | int id; |
| 96 | char *name; |
| 97 | int num_counters; |
| 98 | unsigned int int_enable; |
| 99 | unsigned int cnt_ovf[MAX_COUNTERS]; |
| 100 | unsigned int int_mask[MAX_COUNTERS]; |
| 101 | }; |
| 102 | |
| 103 | static struct pmu_type pmu_parms[] = { |
| 104 | { |
| 105 | .id = PMU_XSC1, |
| 106 | .name = "arm/xscale1", |
| 107 | .num_counters = 3, |
| 108 | .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20, |
| 109 | [CCNT] = 0x40 }, |
| 110 | .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100, |
| 111 | [PMN1] = 0x200}, |
| 112 | }, |
| 113 | { |
| 114 | .id = PMU_XSC2, |
| 115 | .name = "arm/xscale2", |
| 116 | .num_counters = 5, |
| 117 | .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02, |
| 118 | [PMN1] = 0x04, [PMN2] = 0x08, |
| 119 | [PMN3] = 0x10 }, |
| 120 | .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02, |
| 121 | [PMN1] = 0x04, [PMN2] = 0x08, |
| 122 | [PMN3] = 0x10 }, |
| 123 | }, |
| 124 | }; |
| 125 | |
| 126 | static struct pmu_type *pmu; |
| 127 | |
| 128 | static void write_pmnc(u32 val) |
| 129 | { |
| 130 | if (pmu->id == PMU_XSC1) { |
| 131 | /* upper 4bits and 7, 11 are write-as-0 */ |
| 132 | val &= 0xffff77f; |
| 133 | __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); |
| 134 | } else { |
| 135 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ |
| 136 | val &= 0xf; |
| 137 | __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | static u32 read_pmnc(void) |
| 142 | { |
| 143 | u32 val; |
| 144 | |
| 145 | if (pmu->id == PMU_XSC1) |
| 146 | __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); |
| 147 | else { |
| 148 | __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); |
| 149 | /* bits 1-2 and 4-23 are read-unpredictable */ |
| 150 | val &= 0xff000009; |
| 151 | } |
| 152 | |
| 153 | return val; |
| 154 | } |
| 155 | |
| 156 | static u32 __xsc1_read_counter(int counter) |
| 157 | { |
| 158 | u32 val = 0; |
| 159 | |
| 160 | switch (counter) { |
| 161 | case CCNT: |
| 162 | __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); |
| 163 | break; |
| 164 | case PMN0: |
| 165 | __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); |
| 166 | break; |
| 167 | case PMN1: |
| 168 | __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); |
| 169 | break; |
| 170 | } |
| 171 | return val; |
| 172 | } |
| 173 | |
| 174 | static u32 __xsc2_read_counter(int counter) |
| 175 | { |
| 176 | u32 val = 0; |
| 177 | |
| 178 | switch (counter) { |
| 179 | case CCNT: |
| 180 | __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); |
| 181 | break; |
| 182 | case PMN0: |
| 183 | __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); |
| 184 | break; |
| 185 | case PMN1: |
| 186 | __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); |
| 187 | break; |
| 188 | case PMN2: |
| 189 | __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); |
| 190 | break; |
| 191 | case PMN3: |
| 192 | __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); |
| 193 | break; |
| 194 | } |
| 195 | return val; |
| 196 | } |
| 197 | |
| 198 | static u32 read_counter(int counter) |
| 199 | { |
| 200 | u32 val; |
| 201 | |
| 202 | if (pmu->id == PMU_XSC1) |
| 203 | val = __xsc1_read_counter(counter); |
| 204 | else |
| 205 | val = __xsc2_read_counter(counter); |
| 206 | |
| 207 | return val; |
| 208 | } |
| 209 | |
| 210 | static void __xsc1_write_counter(int counter, u32 val) |
| 211 | { |
| 212 | switch (counter) { |
| 213 | case CCNT: |
| 214 | __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); |
| 215 | break; |
| 216 | case PMN0: |
| 217 | __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); |
| 218 | break; |
| 219 | case PMN1: |
| 220 | __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); |
| 221 | break; |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | static void __xsc2_write_counter(int counter, u32 val) |
| 226 | { |
| 227 | switch (counter) { |
| 228 | case CCNT: |
| 229 | __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); |
| 230 | break; |
| 231 | case PMN0: |
| 232 | __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); |
| 233 | break; |
| 234 | case PMN1: |
| 235 | __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); |
| 236 | break; |
| 237 | case PMN2: |
| 238 | __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); |
| 239 | break; |
| 240 | case PMN3: |
| 241 | __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); |
| 242 | break; |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | static void write_counter(int counter, u32 val) |
| 247 | { |
| 248 | if (pmu->id == PMU_XSC1) |
| 249 | __xsc1_write_counter(counter, val); |
| 250 | else |
| 251 | __xsc2_write_counter(counter, val); |
| 252 | } |
| 253 | |
| 254 | static int xscale_setup_ctrs(void) |
| 255 | { |
| 256 | u32 evtsel, pmnc; |
| 257 | int i; |
| 258 | |
| 259 | for (i = CCNT; i < MAX_COUNTERS; i++) { |
| 260 | if (counter_config[i].enabled) |
| 261 | continue; |
| 262 | |
| 263 | counter_config[i].event = EVT_UNUSED; |
| 264 | } |
| 265 | |
| 266 | switch (pmu->id) { |
| 267 | case PMU_XSC1: |
| 268 | pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12); |
| 269 | pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc); |
| 270 | write_pmnc(pmnc); |
| 271 | break; |
| 272 | |
| 273 | case PMU_XSC2: |
| 274 | evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) | |
| 275 | (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24); |
| 276 | |
| 277 | pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel); |
| 278 | __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel)); |
| 279 | break; |
| 280 | } |
| 281 | |
| 282 | for (i = CCNT; i < MAX_COUNTERS; i++) { |
| 283 | if (counter_config[i].event == EVT_UNUSED) { |
| 284 | counter_config[i].event = 0; |
| 285 | pmu->int_enable &= ~pmu->int_mask[i]; |
| 286 | continue; |
| 287 | } |
| 288 | |
| 289 | results[i].reset_counter = counter_config[i].count; |
| 290 | write_counter(i, -(u32)counter_config[i].count); |
| 291 | pmu->int_enable |= pmu->int_mask[i]; |
| 292 | pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i, |
| 293 | read_counter(i), counter_config[i].count); |
| 294 | } |
| 295 | |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | static void inline __xsc1_check_ctrs(void) |
| 300 | { |
| 301 | int i; |
| 302 | u32 pmnc = read_pmnc(); |
| 303 | |
| 304 | /* NOTE: there's an A stepping errata that states if an overflow */ |
| 305 | /* bit already exists and another occurs, the previous */ |
| 306 | /* Overflow bit gets cleared. There's no workaround. */ |
| 307 | /* Fixed in B stepping or later */ |
| 308 | |
| 309 | /* Write the value back to clear the overflow flags. Overflow */ |
| 310 | /* flags remain in pmnc for use below */ |
| 311 | write_pmnc(pmnc & ~PMU_ENABLE); |
| 312 | |
| 313 | for (i = CCNT; i <= PMN1; i++) { |
| 314 | if (!(pmu->int_mask[i] & pmu->int_enable)) |
| 315 | continue; |
| 316 | |
| 317 | if (pmnc & pmu->cnt_ovf[i]) |
| 318 | results[i].ovf++; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | static void inline __xsc2_check_ctrs(void) |
| 323 | { |
| 324 | int i; |
| 325 | u32 flag = 0, pmnc = read_pmnc(); |
| 326 | |
| 327 | pmnc &= ~PMU_ENABLE; |
| 328 | write_pmnc(pmnc); |
| 329 | |
| 330 | /* read overflow flag register */ |
| 331 | __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag)); |
| 332 | |
| 333 | for (i = CCNT; i <= PMN3; i++) { |
| 334 | if (!(pmu->int_mask[i] & pmu->int_enable)) |
| 335 | continue; |
| 336 | |
| 337 | if (flag & pmu->cnt_ovf[i]) |
| 338 | results[i].ovf++; |
| 339 | } |
| 340 | |
| 341 | /* writeback clears overflow bits */ |
| 342 | __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag)); |
| 343 | } |
| 344 | |
Linus Torvalds | 0cd61b6 | 2006-10-06 10:53:39 -0700 | [diff] [blame] | 345 | static irqreturn_t xscale_pmu_interrupt(int irq, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | { |
| 347 | int i; |
| 348 | u32 pmnc; |
| 349 | |
| 350 | if (pmu->id == PMU_XSC1) |
| 351 | __xsc1_check_ctrs(); |
| 352 | else |
| 353 | __xsc2_check_ctrs(); |
| 354 | |
| 355 | for (i = CCNT; i < MAX_COUNTERS; i++) { |
| 356 | if (!results[i].ovf) |
| 357 | continue; |
| 358 | |
| 359 | write_counter(i, -(u32)results[i].reset_counter); |
Linus Torvalds | 0cd61b6 | 2006-10-06 10:53:39 -0700 | [diff] [blame] | 360 | oprofile_add_sample(get_irq_regs(), i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | results[i].ovf--; |
| 362 | } |
| 363 | |
| 364 | pmnc = read_pmnc() | PMU_ENABLE; |
| 365 | write_pmnc(pmnc); |
| 366 | |
| 367 | return IRQ_HANDLED; |
| 368 | } |
| 369 | |
| 370 | static void xscale_pmu_stop(void) |
| 371 | { |
| 372 | u32 pmnc = read_pmnc(); |
| 373 | |
| 374 | pmnc &= ~PMU_ENABLE; |
| 375 | write_pmnc(pmnc); |
| 376 | |
| 377 | free_irq(XSCALE_PMU_IRQ, results); |
| 378 | } |
| 379 | |
| 380 | static int xscale_pmu_start(void) |
| 381 | { |
| 382 | int ret; |
| 383 | u32 pmnc = read_pmnc(); |
| 384 | |
Thomas Gleixner | 52e405e | 2006-07-03 02:20:05 +0200 | [diff] [blame] | 385 | ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, IRQF_DISABLED, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | "XScale PMU", (void *)results); |
| 387 | |
| 388 | if (ret < 0) { |
| 389 | printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n", |
| 390 | XSCALE_PMU_IRQ); |
| 391 | return ret; |
| 392 | } |
| 393 | |
| 394 | if (pmu->id == PMU_XSC1) |
| 395 | pmnc |= pmu->int_enable; |
| 396 | else { |
| 397 | __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable)); |
| 398 | pmnc &= ~PMU_CNT64; |
| 399 | } |
| 400 | |
| 401 | pmnc |= PMU_ENABLE; |
| 402 | write_pmnc(pmnc); |
| 403 | pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable); |
| 404 | return 0; |
| 405 | } |
| 406 | |
| 407 | static int xscale_detect_pmu(void) |
| 408 | { |
| 409 | int ret = 0; |
| 410 | u32 id; |
| 411 | |
| 412 | id = (read_cpuid(CPUID_ID) >> 13) & 0x7; |
| 413 | |
| 414 | switch (id) { |
| 415 | case 1: |
| 416 | pmu = &pmu_parms[PMU_XSC1]; |
| 417 | break; |
| 418 | case 2: |
| 419 | pmu = &pmu_parms[PMU_XSC2]; |
| 420 | break; |
| 421 | default: |
| 422 | ret = -ENODEV; |
| 423 | break; |
| 424 | } |
| 425 | |
| 426 | if (!ret) { |
| 427 | op_xscale_spec.name = pmu->name; |
| 428 | op_xscale_spec.num_counters = pmu->num_counters; |
| 429 | pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name); |
| 430 | } |
| 431 | |
| 432 | return ret; |
| 433 | } |
| 434 | |
| 435 | struct op_arm_model_spec op_xscale_spec = { |
| 436 | .init = xscale_detect_pmu, |
| 437 | .setup_ctrs = xscale_setup_ctrs, |
| 438 | .start = xscale_pmu_start, |
| 439 | .stop = xscale_pmu_stop, |
| 440 | }; |
| 441 | |