Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | /* |
| 14 | per-process_perf |
| 15 | DESCRIPTION |
| 16 | Capture the processor performances registers when the process context |
| 17 | switches. The /proc file system is used to control and access the results |
| 18 | of the performance counters. |
| 19 | |
| 20 | Each time a process is context switched, the performance counters for |
| 21 | the Snoop Control Unit and the standard ARM counters are set according |
| 22 | to the values stored for that process. |
| 23 | |
| 24 | The events to capture per process are set in the /proc/ppPerf/settings |
| 25 | directory. |
| 26 | |
| 27 | EXTERNALIZED FUNCTIONS |
| 28 | |
| 29 | INITIALIZATION AND SEQUENCING REQUIREMENTS |
| 30 | Detail how to initialize and use this service. The sequencing aspect |
| 31 | is only needed if the order of operations is important. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | INCLUDE FILES FOR MODULE |
| 36 | */ |
| 37 | #include <linux/module.h> |
| 38 | #include <linux/init.h> |
| 39 | #include <linux/sched.h> |
| 40 | #include <linux/sysrq.h> |
| 41 | #include <linux/time.h> |
| 42 | #include "linux/proc_fs.h" |
| 43 | #include "linux/kernel_stat.h" |
| 44 | #include <asm/thread_notify.h> |
| 45 | #include "asm/uaccess.h" |
| 46 | #include "cp15_registers.h" |
| 47 | #include "l2_cp15_registers.h" |
| 48 | #include <asm/perftypes.h> |
| 49 | #include "per-axi.h" |
| 50 | #include "perf.h" |
| 51 | |
| 52 | #define DEBUG_SWAPIO |
| 53 | #ifdef DEBUG_SWAPIO |
| 54 | #define MR_SIZE 1024 |
| 55 | #define PM_PP_ERR -1 |
| 56 | struct mark_data_s { |
| 57 | long c; |
| 58 | long cpu; |
| 59 | unsigned long pid_old; |
| 60 | unsigned long pid_new; |
| 61 | }; |
| 62 | |
| 63 | struct mark_data_s markRay[MR_SIZE] __attribute__((aligned(16))); |
| 64 | int mrcnt; |
| 65 | |
| 66 | DEFINE_SPINLOCK(_mark_lock); |
| 67 | |
| 68 | static inline void MARKPIDS(char a, int opid, int npid) |
| 69 | { |
| 70 | int cpu = smp_processor_id(); |
| 71 | |
| 72 | if (opid == 0) |
| 73 | return; |
| 74 | spin_lock(&_mark_lock); |
| 75 | if (++mrcnt >= MR_SIZE) |
| 76 | mrcnt = 0; |
| 77 | spin_unlock(&_mark_lock); |
| 78 | |
| 79 | markRay[mrcnt].pid_old = opid; |
| 80 | markRay[mrcnt].pid_new = npid; |
| 81 | markRay[mrcnt].cpu = cpu; |
| 82 | markRay[mrcnt].c = a; |
| 83 | } |
| 84 | static inline void MARK(char a) { MARKPIDS(a, 0xFFFF, 0xFFFF); } |
| 85 | static inline void MARKPID(char a, int pid) { MARKPIDS(a, pid, 0xFFFF); } |
| 86 | |
| 87 | #else |
| 88 | #define MARK(a) |
| 89 | #define MARKPID(a, b) |
| 90 | #define MARKPIDS(a, b, c) |
| 91 | |
| 92 | #endif /* DEBUG_SWAPIO */ |
| 93 | |
| 94 | /* |
| 95 | DEFINITIONS AND DECLARATIONS FOR MODULE |
| 96 | |
| 97 | This section contains definitions for constants, macros, types, variables |
| 98 | and other items needed by this module. |
| 99 | */ |
| 100 | |
| 101 | /* |
| 102 | Constant / Define Declarations |
| 103 | */ |
| 104 | |
| 105 | #define PERF_MON_PROCESS_NUM 0x400 |
| 106 | #define PERF_MON_PROCESS_MASK (PERF_MON_PROCESS_NUM-1) |
| 107 | #define PP_MAX_PROC_ENTRIES 32 |
| 108 | |
| 109 | /* |
| 110 | * The entry is locked and is not to be replaced. |
| 111 | */ |
| 112 | #define PERF_ENTRY_LOCKED (1<<0) |
| 113 | #define PERF_NOT_FIRST_TIME (1<<1) |
| 114 | #define PERF_EXITED (1<<2) |
| 115 | #define PERF_AUTOLOCK (1<<3) |
| 116 | |
| 117 | #define IS_LOCKED(p) (p->flags & PERF_ENTRY_LOCKED) |
| 118 | |
| 119 | #define PERF_NUM_MONITORS 4 |
| 120 | |
| 121 | #define L1_EVENTS_0 0 |
| 122 | #define L1_EVENTS_1 1 |
| 123 | #define L2_EVENTS_0 2 |
| 124 | #define L2_EVENTS_1 3 |
| 125 | |
| 126 | #define PM_CYCLE_OVERFLOW_MASK 0x80000000 |
| 127 | #define L2_PM_CYCLE_OVERFLOW_MASK 0x80000000 |
| 128 | |
| 129 | #define PM_START_ALL() do {\ |
| 130 | if (pm_global) \ |
| 131 | pmStartAll();\ |
| 132 | } while (0); |
| 133 | #define PM_STOP_ALL() do {\ |
| 134 | if (pm_global)\ |
| 135 | pmStopAll();\ |
| 136 | } while (0); |
| 137 | #define PM_RESET_ALL() do {\ |
| 138 | if (pm_global)\ |
| 139 | pmResetAll();\ |
| 140 | } while (0); |
| 141 | |
| 142 | /* |
| 143 | * Accessors for SMP based variables. |
| 144 | */ |
| 145 | #define _SWAPS(p) ((p)->cnts[smp_processor_id()].swaps) |
| 146 | #define _CYCLES(p) ((p)->cnts[smp_processor_id()].cycles) |
| 147 | #define _COUNTS(p, i) ((p)->cnts[smp_processor_id()].counts[i]) |
| 148 | #define _L2COUNTS(p, i) ((p)->cnts[smp_processor_id()].l2_counts[i]) |
| 149 | #define _L2CYCLES(p) ((p)->cnts[smp_processor_id()].l2_cycles) |
| 150 | |
| 151 | /* |
| 152 | Type Declarations |
| 153 | */ |
| 154 | |
| 155 | /* |
| 156 | * Counts are on a per core basis. |
| 157 | */ |
| 158 | struct pm_counters_s { |
| 159 | unsigned long long cycles; |
| 160 | unsigned long long l2_cycles; |
| 161 | unsigned long long counts[PERF_NUM_MONITORS]; |
| 162 | unsigned long long l2_counts[PERF_NUM_MONITORS]; |
| 163 | unsigned long swaps; |
| 164 | }; |
| 165 | |
| 166 | struct per_process_perf_mon_type{ |
| 167 | struct pm_counters_s cnts[NR_CPUS]; |
| 168 | unsigned long control; |
| 169 | unsigned long index[PERF_NUM_MONITORS]; |
| 170 | unsigned long l2_index[PERF_NUM_MONITORS]; |
| 171 | unsigned long pid; |
| 172 | struct proc_dir_entry *proc; |
| 173 | struct proc_dir_entry *l2_proc; |
| 174 | unsigned short flags; |
| 175 | unsigned short running_cpu; |
| 176 | char *pidName; |
| 177 | unsigned long lpm0evtyper; |
| 178 | unsigned long lpm1evtyper; |
| 179 | unsigned long lpm2evtyper; |
| 180 | unsigned long l2lpmevtyper; |
| 181 | unsigned long vlpmevtyper; |
| 182 | unsigned long l2pmevtyper0; |
| 183 | unsigned long l2pmevtyper1; |
| 184 | unsigned long l2pmevtyper2; |
| 185 | unsigned long l2pmevtyper3; |
| 186 | unsigned long l2pmevtyper4; |
| 187 | }; |
| 188 | |
| 189 | unsigned long last_in_pid[NR_CPUS]; |
| 190 | unsigned long fake_swap_out[NR_CPUS] = {0}; |
| 191 | |
| 192 | /* |
| 193 | Local Object Definitions |
| 194 | */ |
| 195 | struct per_process_perf_mon_type perf_mons[PERF_MON_PROCESS_NUM]; |
| 196 | struct proc_dir_entry *proc_dir; |
| 197 | struct proc_dir_entry *settings_dir; |
| 198 | struct proc_dir_entry *values_dir; |
| 199 | struct proc_dir_entry *axi_dir; |
| 200 | struct proc_dir_entry *l2_dir; |
| 201 | struct proc_dir_entry *axi_settings_dir; |
| 202 | struct proc_dir_entry *axi_results_dir; |
| 203 | struct proc_dir_entry *l2_results_dir; |
| 204 | |
| 205 | unsigned long pp_enabled; |
| 206 | unsigned long pp_settings_valid = -1; |
| 207 | unsigned long pp_auto_lock; |
| 208 | unsigned long pp_set_pid; |
| 209 | signed long pp_clear_pid = -1; |
| 210 | unsigned long per_proc_event[PERF_NUM_MONITORS]; |
| 211 | unsigned long l2_per_proc_event[PERF_NUM_MONITORS]; |
| 212 | unsigned long dbg_flags; |
| 213 | unsigned long pp_lpm0evtyper; |
| 214 | unsigned long pp_lpm1evtyper; |
| 215 | unsigned long pp_lpm2evtyper; |
| 216 | unsigned long pp_l2lpmevtyper; |
| 217 | unsigned long pp_vlpmevtyper; |
| 218 | unsigned long pm_stop_for_interrupts; |
| 219 | unsigned long pm_global; /* track all, not process based */ |
| 220 | unsigned long pm_global_enable; |
| 221 | unsigned long pm_remove_pid; |
| 222 | |
| 223 | unsigned long pp_l2pmevtyper0; |
| 224 | unsigned long pp_l2pmevtyper1; |
| 225 | unsigned long pp_l2pmevtyper2; |
| 226 | unsigned long pp_l2pmevtyper3; |
| 227 | unsigned long pp_l2pmevtyper4; |
| 228 | |
| 229 | unsigned long pp_proc_entry_index; |
| 230 | char *per_process_proc_names[PP_MAX_PROC_ENTRIES]; |
| 231 | |
| 232 | unsigned int axi_swaps; |
| 233 | #define MAX_AXI_SWAPS 10 |
| 234 | int first_switch = 1; |
| 235 | /* |
| 236 | Forward Declarations |
| 237 | */ |
| 238 | |
| 239 | /* |
| 240 | Function Definitions |
| 241 | */ |
| 242 | |
| 243 | /* |
| 244 | FUNCTION per_process_find |
| 245 | |
| 246 | DESCRIPTION |
| 247 | Find the per process information based on the process id (pid) passed. |
| 248 | This is a simple mask based on the number of entries stored in the |
| 249 | static array |
| 250 | |
| 251 | DEPENDENCIES |
| 252 | |
| 253 | RETURN VALUE |
| 254 | Pointer to the per process data |
| 255 | SIDE EFFECTS |
| 256 | |
| 257 | */ |
| 258 | struct per_process_perf_mon_type *per_process_find(unsigned long pid) |
| 259 | { |
| 260 | return &perf_mons[pid & PERF_MON_PROCESS_MASK]; |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | FUNCTION per_process_get_name |
| 265 | |
| 266 | DESCRIPTION |
| 267 | Retreive the name of the performance counter based on the table and |
| 268 | index passed. We have two different sets of performance counters so |
| 269 | different table need to be used. |
| 270 | |
| 271 | DEPENDENCIES |
| 272 | |
| 273 | RETURN VALUE |
| 274 | Pointer to char string with the name of the event or "BAD" |
| 275 | Never returns NULL or a bad pointer. |
| 276 | |
| 277 | SIDE EFFECTS |
| 278 | */ |
| 279 | char *per_process_get_name(unsigned long index) |
| 280 | { |
| 281 | return pm_find_event_name(index); |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | FUNCTION per_process_results_read |
| 286 | |
| 287 | DESCRIPTION |
| 288 | Print out the formatted results from the process id read. Event names |
| 289 | and counts are printed. |
| 290 | |
| 291 | DEPENDENCIES |
| 292 | |
| 293 | RETURN VALUE |
| 294 | |
| 295 | SIDE EFFECTS |
| 296 | */ |
| 297 | int per_process_results_read(char *page, char **start, off_t off, int count, |
| 298 | int *eof, void *data) |
| 299 | { |
| 300 | struct per_process_perf_mon_type *p = |
| 301 | (struct per_process_perf_mon_type *)data; |
| 302 | struct pm_counters_s cnts; |
| 303 | int i, j; |
| 304 | |
| 305 | /* |
| 306 | * Total across all CPUS |
| 307 | */ |
| 308 | memset(&cnts, 0, sizeof(cnts)); |
| 309 | for (i = 0; i < num_possible_cpus(); i++) { |
| 310 | cnts.swaps += p->cnts[i].swaps; |
| 311 | cnts.cycles += p->cnts[i].cycles; |
| 312 | for (j = 0; j < PERF_NUM_MONITORS; j++) |
| 313 | cnts.counts[j] += p->cnts[i].counts[j]; |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * Display as single results of the totals calculated above. |
| 318 | * Do we want to display or have option to display individula cores? |
| 319 | */ |
| 320 | return sprintf(page, "pid:%lu one:%s:%llu two:%s:%llu three:%s:%llu \ |
| 321 | four:%s:%llu cycles:%llu swaps:%lu\n", |
| 322 | p->pid, |
| 323 | per_process_get_name(p->index[0]), cnts.counts[0], |
| 324 | per_process_get_name(p->index[1]), cnts.counts[1], |
| 325 | per_process_get_name(p->index[2]), cnts.counts[2], |
| 326 | per_process_get_name(p->index[3]), cnts.counts[3], |
| 327 | cnts.cycles, cnts.swaps); |
| 328 | } |
| 329 | |
| 330 | int per_process_l2_results_read(char *page, char **start, off_t off, int count, |
| 331 | int *eof, void *data) |
| 332 | { |
| 333 | struct per_process_perf_mon_type *p = |
| 334 | (struct per_process_perf_mon_type *)data; |
| 335 | struct pm_counters_s cnts; |
| 336 | int i, j; |
| 337 | |
| 338 | /* |
| 339 | * Total across all CPUS |
| 340 | */ |
| 341 | memset(&cnts, 0, sizeof(cnts)); |
| 342 | for (i = 0; i < num_possible_cpus(); i++) { |
| 343 | cnts.l2_cycles += p->cnts[i].l2_cycles; |
| 344 | for (j = 0; j < PERF_NUM_MONITORS; j++) |
| 345 | cnts.l2_counts[j] += p->cnts[i].l2_counts[j]; |
| 346 | } |
| 347 | |
| 348 | /* |
| 349 | * Display as single results of the totals calculated above. |
| 350 | * Do we want to display or have option to display individula cores? |
| 351 | */ |
| 352 | return sprintf(page, "pid:%lu l2_one:%s:%llu l2_two:%s:%llu \ |
| 353 | l2_three:%s:%llu \ |
| 354 | l2_four:%s:%llu l2_cycles:%llu\n", |
| 355 | p->pid, |
| 356 | per_process_get_name(p->l2_index[0]), cnts.l2_counts[0], |
| 357 | per_process_get_name(p->l2_index[1]), cnts.l2_counts[1], |
| 358 | per_process_get_name(p->l2_index[2]), cnts.l2_counts[2], |
| 359 | per_process_get_name(p->l2_index[3]), cnts.l2_counts[3], |
| 360 | cnts.l2_cycles); |
| 361 | } |
| 362 | |
| 363 | /* |
| 364 | FUNCTION per_process_results_write |
| 365 | |
| 366 | DESCRIPTION |
| 367 | Allow some control over the results. If the user forgets to autolock or |
| 368 | wants to unlock the results so they will be deleted, then this is |
| 369 | where it is processed. |
| 370 | |
| 371 | For example, to unlock process 23 |
| 372 | echo "unlock" > 23 |
| 373 | |
| 374 | DEPENDENCIES |
| 375 | |
| 376 | RETURN VALUE |
| 377 | Number of characters used (all of them!) |
| 378 | |
| 379 | SIDE EFFECTS |
| 380 | */ |
| 381 | int per_process_results_write(struct file *file, const char *buff, |
| 382 | unsigned long cnt, void *data) |
| 383 | { |
| 384 | char *newbuf; |
| 385 | struct per_process_perf_mon_type *p = |
| 386 | (struct per_process_perf_mon_type *)data; |
| 387 | |
| 388 | if (p == 0) |
| 389 | return cnt; |
| 390 | /* |
| 391 | * Alloc the user data in kernel space. and then copy user to kernel |
| 392 | */ |
| 393 | newbuf = kmalloc(cnt + 1, GFP_KERNEL); |
| 394 | if (0 == newbuf) |
| 395 | return cnt; |
| 396 | if (copy_from_user(newbuf, buff, cnt) != 0) { |
| 397 | printk(KERN_INFO "%s copy_from_user failed\n", __func__); |
| 398 | return cnt; |
| 399 | } |
| 400 | |
| 401 | if (0 == strcmp("lock", newbuf)) |
| 402 | p->flags |= PERF_ENTRY_LOCKED; |
| 403 | else if (0 == strcmp("unlock", newbuf)) |
| 404 | p->flags &= ~PERF_ENTRY_LOCKED; |
| 405 | else if (0 == strcmp("auto", newbuf)) |
| 406 | p->flags |= PERF_AUTOLOCK; |
| 407 | else if (0 == strcmp("autoun", newbuf)) |
| 408 | p->flags &= ~PERF_AUTOLOCK; |
| 409 | |
| 410 | return cnt; |
| 411 | } |
| 412 | |
| 413 | /* |
| 414 | FUNCTION perProcessCreateResults |
| 415 | |
| 416 | DESCRIPTION |
| 417 | Create the results /proc file if the system parameters allow it... |
| 418 | DEPENDENCIES |
| 419 | |
| 420 | RETURN VALUE |
| 421 | |
| 422 | SIDE EFFECTS |
| 423 | */ |
| 424 | void per_process_create_results_proc(struct per_process_perf_mon_type *p) |
| 425 | { |
| 426 | |
| 427 | if (0 == p->pidName) |
| 428 | p->pidName = kmalloc(12, GFP_KERNEL); |
| 429 | if (0 == p->pidName) |
| 430 | return; |
| 431 | sprintf(p->pidName, "%ld", p->pid); |
| 432 | |
| 433 | if (0 == p->proc) { |
| 434 | p->proc = create_proc_entry(p->pidName, 0777, values_dir); |
| 435 | if (0 == p->proc) |
| 436 | return; |
| 437 | } else { |
| 438 | p->proc->name = p->pidName; |
| 439 | } |
| 440 | |
| 441 | p->proc->read_proc = per_process_results_read; |
| 442 | p->proc->write_proc = per_process_results_write; |
| 443 | p->proc->data = (void *)p; |
| 444 | } |
| 445 | |
| 446 | void per_process_create_l2_results_proc(struct per_process_perf_mon_type *p) |
| 447 | { |
| 448 | |
| 449 | if (0 == p->pidName) |
| 450 | p->pidName = kmalloc(12, GFP_KERNEL); |
| 451 | if (0 == p->pidName) |
| 452 | return; |
| 453 | sprintf(p->pidName, "%ld", p->pid); |
| 454 | |
| 455 | if (0 == p->l2_proc) { |
| 456 | p->l2_proc = create_proc_entry(p->pidName, 0777, |
| 457 | l2_results_dir); |
| 458 | if (0 == p->l2_proc) |
| 459 | return; |
| 460 | } else { |
| 461 | p->l2_proc->name = p->pidName; |
| 462 | } |
| 463 | |
| 464 | p->l2_proc->read_proc = per_process_l2_results_read; |
| 465 | p->l2_proc->write_proc = per_process_results_write; |
| 466 | p->l2_proc->data = (void *)p; |
| 467 | } |
| 468 | /* |
| 469 | FUNCTION per_process_swap_out |
| 470 | |
| 471 | DESCRIPTION |
| 472 | Store the counters from the process that is about to swap out. We take |
| 473 | the old counts and add them to the current counts in the perf registers. |
| 474 | Before the new process is swapped in, the counters are reset. |
| 475 | |
| 476 | DEPENDENCIES |
| 477 | |
| 478 | RETURN VALUE |
| 479 | |
| 480 | SIDE EFFECTS |
| 481 | */ |
| 482 | typedef void (*vfun)(void *); |
| 483 | void per_process_swap_out(struct per_process_perf_mon_type *data) |
| 484 | { |
| 485 | int i; |
| 486 | unsigned long overflow; |
| 487 | #ifdef CONFIG_ARCH_MSM8X60 |
| 488 | unsigned long l2_overflow; |
| 489 | #endif |
| 490 | struct per_process_perf_mon_type *p = data; |
| 491 | |
| 492 | MARKPIDS('O', p->pid, 0); |
| 493 | RCP15_PMOVSR(overflow); |
| 494 | #ifdef CONFIG_ARCH_MSM8X60 |
| 495 | RCP15_L2PMOVSR(l2_overflow); |
| 496 | #endif |
| 497 | |
| 498 | if (!pp_enabled) |
| 499 | return; |
| 500 | |
| 501 | /* |
| 502 | * The kernel for some reason (2.6.32.9) starts a process context on |
| 503 | * one core and ends on another. So the swap in and swap out can be |
| 504 | * on different cores. If this happens, we need to stop the |
| 505 | * counters and collect the data on the core that started the counters |
| 506 | * ....otherwise we receive invalid data. So we mark the the core with |
| 507 | * the process as deferred. The next time a process is swapped on |
| 508 | * the core that the process was running on, the counters will be |
| 509 | * updated. |
| 510 | */ |
| 511 | if ((smp_processor_id() != p->running_cpu) && (p->pid != 0)) { |
| 512 | fake_swap_out[p->running_cpu] = 1; |
| 513 | return; |
| 514 | } |
| 515 | |
| 516 | _SWAPS(p)++; |
| 517 | _CYCLES(p) += pm_get_cycle_count(); |
| 518 | |
| 519 | if (overflow & PM_CYCLE_OVERFLOW_MASK) |
| 520 | _CYCLES(p) += 0xFFFFFFFF; |
| 521 | |
| 522 | for (i = 0; i < PERF_NUM_MONITORS; i++) { |
| 523 | _COUNTS(p, i) += pm_get_count(i); |
| 524 | if (overflow & (1 << i)) |
| 525 | _COUNTS(p, i) += 0xFFFFFFFF; |
| 526 | } |
| 527 | |
| 528 | #ifdef CONFIG_ARCH_MSM8X60 |
| 529 | _L2CYCLES(p) += l2_pm_get_cycle_count(); |
| 530 | if (l2_overflow & L2_PM_CYCLE_OVERFLOW_MASK) |
| 531 | _L2CYCLES(p) += 0xFFFFFFFF; |
| 532 | for (i = 0; i < PERF_NUM_MONITORS; i++) { |
| 533 | _L2COUNTS(p, i) += l2_pm_get_count(i); |
| 534 | if (l2_overflow & (1 << i)) |
| 535 | _L2COUNTS(p, i) += 0xFFFFFFFF; |
| 536 | } |
| 537 | #endif |
| 538 | } |
| 539 | |
| 540 | /* |
| 541 | FUNCTION per_process_remove_manual |
| 542 | |
| 543 | DESCRIPTION |
| 544 | Remove an entry from the results directory if the flags allow this. |
| 545 | When not enbled or the entry is locked, the values/results will |
| 546 | not be removed. |
| 547 | |
| 548 | DEPENDENCIES |
| 549 | |
| 550 | RETURN VALUE |
| 551 | |
| 552 | SIDE EFFECTS |
| 553 | */ |
| 554 | void per_process_remove_manual(unsigned long pid) |
| 555 | { |
| 556 | struct per_process_perf_mon_type *p = per_process_find(pid); |
| 557 | |
| 558 | /* |
| 559 | * Check all of the flags to see if we can remove this one |
| 560 | * Then mark as not used |
| 561 | */ |
| 562 | if (0 == p) |
| 563 | return; |
| 564 | p->pid = (0xFFFFFFFF); |
| 565 | |
| 566 | /* |
| 567 | * Remove the proc entry. |
| 568 | */ |
| 569 | if (p->proc) |
| 570 | remove_proc_entry(p->pidName, values_dir); |
| 571 | if (p->l2_proc) |
| 572 | remove_proc_entry(p->pidName, l2_results_dir); |
| 573 | kfree(p->pidName); |
| 574 | |
| 575 | /* |
| 576 | * Clear them out...and ensure the pid is invalid |
| 577 | */ |
| 578 | memset(p, 0, sizeof *p); |
| 579 | p->pid = 0xFFFFFFFF; |
| 580 | pm_remove_pid = -1; |
| 581 | } |
| 582 | |
| 583 | /* |
| 584 | * Remove called when a process exits... |
| 585 | */ |
| 586 | void _per_process_remove(unsigned long pid) {} |
| 587 | |
| 588 | /* |
| 589 | FUNCTION per_process_initialize |
| 590 | |
| 591 | DESCRIPTION |
| 592 | Initialize performance collection information for a new process. |
| 593 | |
| 594 | DEPENDENCIES |
| 595 | |
| 596 | RETURN VALUE |
| 597 | |
| 598 | SIDE EFFECTS |
| 599 | May create a new proc entry |
| 600 | */ |
| 601 | void per_process_initialize(struct per_process_perf_mon_type *p, |
| 602 | unsigned long pid) |
| 603 | { |
| 604 | int i; |
| 605 | |
| 606 | /* |
| 607 | * See if this is the pid we are interested in... |
| 608 | */ |
| 609 | if (pp_settings_valid == -1) |
| 610 | return; |
| 611 | if ((pp_set_pid != pid) && (pp_set_pid != 0)) |
| 612 | return; |
| 613 | |
| 614 | /* |
| 615 | * Clear out the statistics table then insert this pid |
| 616 | * We want to keep the proc entry and the name |
| 617 | */ |
| 618 | p->pid = pid; |
| 619 | |
| 620 | /* |
| 621 | * Create a proc entry for this pid, then get the current event types and |
| 622 | * store in data struct so when the process is switched in we can track |
| 623 | * it. |
| 624 | */ |
| 625 | if (p->proc == 0) { |
| 626 | per_process_create_results_proc(p); |
| 627 | #ifdef CONFIG_ARCH_MSM8X60 |
| 628 | per_process_create_l2_results_proc(p); |
| 629 | #endif |
| 630 | } |
| 631 | _CYCLES(p) = 0; |
| 632 | _L2CYCLES(p) = 0; |
| 633 | _SWAPS(p) = 0; |
| 634 | /* |
| 635 | * Set the per process data struct, but not the monitors until later... |
| 636 | * Init only happens with the user sets the SetPID variable to this pid |
| 637 | * so we can load new values. |
| 638 | */ |
| 639 | for (i = 0; i < PERF_NUM_MONITORS; i++) { |
| 640 | p->index[i] = per_proc_event[i]; |
| 641 | #ifdef CONFIG_ARCH_MSM8X60 |
| 642 | p->l2_index[i] = l2_per_proc_event[i]; |
| 643 | #endif |
| 644 | _COUNTS(p, i) = 0; |
| 645 | _L2COUNTS(p, i) = 0; |
| 646 | } |
| 647 | p->lpm0evtyper = pp_lpm0evtyper; |
| 648 | p->lpm1evtyper = pp_lpm1evtyper; |
| 649 | p->lpm2evtyper = pp_lpm2evtyper; |
| 650 | p->l2lpmevtyper = pp_l2lpmevtyper; |
| 651 | p->vlpmevtyper = pp_vlpmevtyper; |
| 652 | |
| 653 | #ifdef CONFIG_ARCH_MSM8X60 |
| 654 | p->l2pmevtyper0 = pp_l2pmevtyper0; |
| 655 | p->l2pmevtyper1 = pp_l2pmevtyper1; |
| 656 | p->l2pmevtyper2 = pp_l2pmevtyper2; |
| 657 | p->l2pmevtyper3 = pp_l2pmevtyper3; |
| 658 | p->l2pmevtyper4 = pp_l2pmevtyper4; |
| 659 | #endif |
| 660 | |
| 661 | /* |
| 662 | * Reset pid and settings value |
| 663 | */ |
| 664 | pp_set_pid = -1; |
| 665 | pp_settings_valid = -1; |
| 666 | } |
| 667 | |
| 668 | /* |
| 669 | FUNCTION per_process_swap_in |
| 670 | |
| 671 | DESCRIPTION |
| 672 | Called when a context switch is about to start this PID. |
| 673 | We check to see if this process has an entry or not and create one |
| 674 | if not locked... |
| 675 | |
| 676 | DEPENDENCIES |
| 677 | |
| 678 | RETURN VALUE |
| 679 | |
| 680 | SIDE EFFECTS |
| 681 | */ |
| 682 | void per_process_swap_in(struct per_process_perf_mon_type *p_new, |
| 683 | unsigned long pid) |
| 684 | { |
| 685 | int i; |
| 686 | |
| 687 | MARKPIDS('I', p_new->pid, 0); |
| 688 | /* |
| 689 | * If the set proc variable == the current pid then init a new |
| 690 | * entry... |
| 691 | */ |
| 692 | if (pp_set_pid == pid) |
| 693 | per_process_initialize(p_new, pid); |
| 694 | |
| 695 | p_new->running_cpu = smp_processor_id(); |
| 696 | last_in_pid[smp_processor_id()] = pid; |
| 697 | |
| 698 | /* |
| 699 | * setup the monitors for this process. |
| 700 | */ |
| 701 | for (i = 0; i < PERF_NUM_MONITORS; i++) { |
| 702 | pm_set_event(i, p_new->index[i]); |
| 703 | #ifdef CONFIG_ARCH_MSM8X60 |
| 704 | l2_pm_set_event(i, p_new->l2_index[i]); |
| 705 | #endif |
| 706 | } |
| 707 | pm_set_local_iu(p_new->lpm0evtyper); |
| 708 | pm_set_local_xu(p_new->lpm1evtyper); |
| 709 | pm_set_local_su(p_new->lpm2evtyper); |
| 710 | pm_set_local_l2(p_new->l2lpmevtyper); |
| 711 | |
| 712 | #ifdef CONFIG_ARCH_MSM8X60 |
| 713 | pm_set_local_bu(p_new->l2pmevtyper0); |
| 714 | pm_set_local_cb(p_new->l2pmevtyper1); |
| 715 | pm_set_local_mp(p_new->l2pmevtyper2); |
| 716 | pm_set_local_sp(p_new->l2pmevtyper3); |
| 717 | pm_set_local_scu(p_new->l2pmevtyper4); |
| 718 | #endif |
| 719 | } |
| 720 | |
| 721 | /* |
| 722 | FUNCTION perProcessSwitch |
| 723 | |
| 724 | DESCRIPTION |
| 725 | Called during context switch. Updates the counts on the process about to |
| 726 | be swapped out and brings in the counters for the process about to be |
| 727 | swapped in. |
| 728 | |
| 729 | All is dependant on the enabled and lock flags. |
| 730 | |
| 731 | DEPENDENCIES |
| 732 | |
| 733 | RETURN VALUE |
| 734 | |
| 735 | SIDE EFFECTS |
| 736 | */ |
| 737 | |
| 738 | DEFINE_SPINLOCK(pm_lock); |
| 739 | void _per_process_switch(unsigned long old_pid, unsigned long new_pid) |
| 740 | { |
| 741 | struct per_process_perf_mon_type *p_old, *p_new; |
| 742 | |
| 743 | if (pm_global_enable == 0) |
| 744 | return; |
| 745 | |
| 746 | spin_lock(&pm_lock); |
| 747 | |
| 748 | pm_stop_all(); |
| 749 | #ifdef CONFIG_ARCH_MSM8X60 |
| 750 | l2_pm_stop_all(); |
| 751 | #endif |
| 752 | |
| 753 | /* |
| 754 | * We detected that the process was swapped in on one core and out on |
| 755 | * a different core. This does not allow us to stop and stop counters |
| 756 | * properly so we need to defer processing. This checks to see if there |
| 757 | * is any defered processing necessary. And does it... */ |
| 758 | if (fake_swap_out[smp_processor_id()] != 0) { |
| 759 | fake_swap_out[smp_processor_id()] = 0; |
| 760 | p_old = per_process_find(last_in_pid[smp_processor_id()]); |
| 761 | last_in_pid[smp_processor_id()] = 0; |
| 762 | if (p_old != 0) |
| 763 | per_process_swap_out(p_old); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * Clear the data collected so far for this process? |
| 768 | */ |
| 769 | if (pp_clear_pid != -1) { |
| 770 | struct per_process_perf_mon_type *p_clear = |
| 771 | per_process_find(pp_clear_pid); |
| 772 | if (p_clear) { |
| 773 | memset(p_clear->cnts, 0, |
| 774 | sizeof(struct pm_counters_s)*num_possible_cpus()); |
| 775 | printk(KERN_INFO "Clear Per Processor Stats for \ |
| 776 | PID:%ld\n", pp_clear_pid); |
| 777 | pp_clear_pid = -1; |
| 778 | } |
| 779 | } |
| 780 | /* |
| 781 | * Always collect for 0, it collects for all. |
| 782 | */ |
| 783 | if (pp_enabled) { |
| 784 | if (first_switch == 1) { |
| 785 | per_process_initialize(&perf_mons[0], 0); |
| 786 | first_switch = 0; |
| 787 | } |
| 788 | if (pm_global) { |
| 789 | per_process_swap_out(&perf_mons[0]); |
| 790 | per_process_swap_in(&perf_mons[0], 0); |
| 791 | } else { |
| 792 | p_old = per_process_find(old_pid); |
| 793 | p_new = per_process_find(new_pid); |
| 794 | |
| 795 | |
| 796 | /* |
| 797 | * save the old counts to the old data struct, if the |
| 798 | * returned ptr is NULL or the process id passed is not |
| 799 | * the same as the process id in the data struct then |
| 800 | * don't update the data. |
| 801 | */ |
| 802 | if ((p_old) && (p_old->pid == old_pid) && |
| 803 | (p_old->pid != 0)) { |
| 804 | per_process_swap_out(p_old); |
| 805 | } |
| 806 | |
| 807 | /* |
| 808 | * Setup the counters for the new process |
| 809 | */ |
| 810 | if (pp_set_pid == new_pid) |
| 811 | per_process_initialize(p_new, new_pid); |
| 812 | if ((p_new->pid == new_pid) && (new_pid != 0)) |
| 813 | per_process_swap_in(p_new, new_pid); |
| 814 | } |
| 815 | pm_reset_all(); |
| 816 | #ifdef CONFIG_ARCH_MSM8X60 |
| 817 | l2_pm_reset_all(); |
| 818 | #endif |
| 819 | #ifdef CONFIG_ARCH_QSD8X50 |
| 820 | axi_swaps++; |
| 821 | if (axi_swaps%pm_axi_info.refresh == 0) { |
| 822 | if (pm_axi_info.clear == 1) { |
| 823 | pm_axi_clear_cnts(); |
| 824 | pm_axi_info.clear = 0; |
| 825 | } |
| 826 | if (pm_axi_info.enable == 0) |
| 827 | pm_axi_disable(); |
| 828 | else |
| 829 | pm_axi_update_cnts(); |
| 830 | axi_swaps = 0; |
| 831 | } |
| 832 | #endif |
| 833 | } |
| 834 | pm_start_all(); |
| 835 | #ifdef CONFIG_ARCH_MSM8X60 |
| 836 | l2_pm_start_all(); |
| 837 | #endif |
| 838 | |
| 839 | spin_unlock(&pm_lock); |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | FUNCTION pmInterruptIn |
| 844 | |
| 845 | DESCRIPTION |
| 846 | Called when an interrupt is being processed. If the pmStopForInterrutps |
| 847 | flag is non zero then we disable the counting of performance monitors. |
| 848 | |
| 849 | DEPENDENCIES |
| 850 | |
| 851 | RETURN VALUE |
| 852 | |
| 853 | SIDE EFFECTS |
| 854 | */ |
| 855 | static int pm_interrupt_nesting_count; |
| 856 | static unsigned long pm_cycle_in, pm_cycle_out; |
| 857 | void _perf_mon_interrupt_in(void) |
| 858 | { |
| 859 | if (pm_global_enable == 0) |
| 860 | return; |
| 861 | if (pm_stop_for_interrupts == 0) |
| 862 | return; |
| 863 | pm_interrupt_nesting_count++; /* Atomic */ |
| 864 | pm_stop_all(); |
| 865 | pm_cycle_in = pm_get_cycle_count(); |
| 866 | } |
| 867 | |
| 868 | /* |
| 869 | FUNCTION perfMonInterruptOut |
| 870 | |
| 871 | DESCRIPTION |
| 872 | Reenable performance monitor counting whn the nest count goes to zero |
| 873 | provided the counting has been stoped |
| 874 | |
| 875 | DEPENDENCIES |
| 876 | |
| 877 | RETURN VALUE |
| 878 | |
| 879 | SIDE EFFECTS |
| 880 | */ |
| 881 | void _perf_mon_interrupt_out(void) |
| 882 | { |
| 883 | if (pm_global_enable == 0) |
| 884 | return; |
| 885 | if (pm_stop_for_interrupts == 0) |
| 886 | return; |
| 887 | --pm_interrupt_nesting_count; /* Atomic?? */ |
| 888 | |
| 889 | if (pm_interrupt_nesting_count <= 0) { |
| 890 | pm_cycle_out = pm_get_cycle_count(); |
| 891 | if (pm_cycle_in != pm_cycle_out) |
| 892 | printk(KERN_INFO "pmIn!=pmOut in:%lx out:%lx\n", |
| 893 | pm_cycle_in, pm_cycle_out); |
| 894 | if (pp_enabled) { |
| 895 | pm_start_all(); |
| 896 | #ifdef CONFIG_ARCH_MSM8X60 |
| 897 | l2_pm_start_all(); |
| 898 | #endif |
| 899 | } |
| 900 | pm_interrupt_nesting_count = 0; |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | void per_process_do_global(unsigned long g) |
| 905 | { |
| 906 | pm_global = g; |
| 907 | |
| 908 | if (pm_global == 1) { |
| 909 | pm_stop_all(); |
| 910 | #ifdef CONFIG_ARCH_MSM8X60 |
| 911 | l2_pm_stop_all(); |
| 912 | #endif |
| 913 | pm_reset_all(); |
| 914 | #ifdef CONFIG_ARCH_MSM8X60 |
| 915 | l2_pm_reset_all(); |
| 916 | #endif |
| 917 | pp_set_pid = 0; |
| 918 | per_process_swap_in(&perf_mons[0], 0); |
| 919 | pm_start_all(); |
| 920 | #ifdef CONFIG_ARCH_MSM8X60 |
| 921 | l2_pm_start_all(); |
| 922 | #endif |
| 923 | } else { |
| 924 | pm_stop_all(); |
| 925 | #ifdef CONFIG_ARCH_MSM8X60 |
| 926 | l2_pm_stop_all(); |
| 927 | #endif |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | |
| 932 | /* |
| 933 | FUNCTION per_process_write |
| 934 | |
| 935 | DESCRIPTION |
| 936 | Generic routine to handle any of the settings /proc directory writes. |
| 937 | |
| 938 | DEPENDENCIES |
| 939 | |
| 940 | RETURN VALUE |
| 941 | |
| 942 | SIDE EFFECTS |
| 943 | */ |
| 944 | int per_process_write(struct file *file, const char *buff, |
| 945 | unsigned long cnt, void *data, const char *fmt) |
| 946 | { |
| 947 | char *newbuf; |
| 948 | unsigned long *d = (unsigned long *)data; |
| 949 | |
| 950 | /* |
| 951 | * Alloc the user data in kernel space. and then copy user to kernel |
| 952 | */ |
| 953 | newbuf = kmalloc(cnt + 1, GFP_KERNEL); |
| 954 | if (0 == newbuf) |
| 955 | return PM_PP_ERR; |
| 956 | if (copy_from_user(newbuf, buff, cnt) != 0) { |
| 957 | printk(KERN_INFO "%s copy_from_user failed\n", __func__); |
| 958 | return cnt; |
| 959 | } |
| 960 | sscanf(newbuf, fmt, d); |
| 961 | kfree(newbuf); |
| 962 | |
| 963 | /* |
| 964 | * If this is a remove command then do it now... |
| 965 | */ |
| 966 | if (d == &pm_remove_pid) |
| 967 | per_process_remove_manual(*d); |
| 968 | if (d == &pm_global) |
| 969 | per_process_do_global(*d); |
| 970 | return cnt; |
| 971 | } |
| 972 | |
| 973 | int per_process_write_dec(struct file *file, const char *buff, |
| 974 | unsigned long cnt, void *data) |
| 975 | { |
| 976 | return per_process_write(file, buff, cnt, data, "%ld"); |
| 977 | } |
| 978 | |
| 979 | int per_process_write_hex(struct file *file, const char *buff, |
| 980 | unsigned long cnt, void *data) |
| 981 | { |
| 982 | return per_process_write(file, buff, cnt, data, "%lx"); |
| 983 | } |
| 984 | |
| 985 | /* |
| 986 | FUNCTION per_process_read |
| 987 | |
| 988 | DESCRIPTION |
| 989 | Generic read handler for the /proc settings directory. |
| 990 | |
| 991 | DEPENDENCIES |
| 992 | |
| 993 | RETURN VALUE |
| 994 | Number of characters to output. |
| 995 | |
| 996 | SIDE EFFECTS |
| 997 | */ |
| 998 | int per_process_read(char *page, char **start, off_t off, int count, |
| 999 | int *eof, void *data) |
| 1000 | { |
| 1001 | unsigned long *d = (unsigned long *)data; |
| 1002 | return sprintf(page, "%lx", *d); |
| 1003 | } |
| 1004 | |
| 1005 | int per_process_read_decimal(char *page, char **start, off_t off, int count, |
| 1006 | int *eof, void *data) |
| 1007 | { |
| 1008 | unsigned long *d = (unsigned long *)data; |
| 1009 | return sprintf(page, "%ld", *d); |
| 1010 | } |
| 1011 | |
| 1012 | /* |
| 1013 | FUNCTION per_process_proc_entry |
| 1014 | |
| 1015 | DESCRIPTION |
| 1016 | Create a generic entry for the /proc settings directory. |
| 1017 | |
| 1018 | DEPENDENCIES |
| 1019 | |
| 1020 | RETURN VALUE |
| 1021 | |
| 1022 | SIDE EFFECTS |
| 1023 | */ |
| 1024 | void per_process_proc_entry(char *name, unsigned long *var, |
| 1025 | struct proc_dir_entry *d, int hex) |
| 1026 | { |
| 1027 | struct proc_dir_entry *pe; |
| 1028 | |
| 1029 | pe = create_proc_entry(name, 0777, d); |
| 1030 | if (0 == pe) |
| 1031 | return; |
| 1032 | if (hex) { |
| 1033 | pe->read_proc = per_process_read; |
| 1034 | pe->write_proc = per_process_write_hex; |
| 1035 | } else { |
| 1036 | pe->read_proc = per_process_read_decimal; |
| 1037 | pe->write_proc = per_process_write_dec; |
| 1038 | } |
| 1039 | pe->data = (void *)var; |
| 1040 | |
| 1041 | if (pp_proc_entry_index >= PP_MAX_PROC_ENTRIES) { |
| 1042 | printk(KERN_INFO "PERF: proc entry overflow,\ |
| 1043 | memleak on module unload occured"); |
| 1044 | return; |
| 1045 | } |
| 1046 | per_process_proc_names[pp_proc_entry_index++] = name; |
| 1047 | } |
| 1048 | |
| 1049 | static int perfmon_notifier(struct notifier_block *self, unsigned long cmd, |
| 1050 | void *v) |
| 1051 | { |
| 1052 | static int old_pid = -1; |
| 1053 | struct thread_info *thread = v; |
| 1054 | int current_pid; |
| 1055 | |
| 1056 | if (cmd != THREAD_NOTIFY_SWITCH) |
| 1057 | return old_pid; |
| 1058 | |
| 1059 | current_pid = thread->task->pid; |
| 1060 | if (old_pid != -1) |
| 1061 | _per_process_switch(old_pid, current_pid); |
| 1062 | old_pid = current_pid; |
| 1063 | return old_pid; |
| 1064 | } |
| 1065 | |
| 1066 | static struct notifier_block perfmon_notifier_block = { |
| 1067 | .notifier_call = perfmon_notifier, |
| 1068 | }; |
| 1069 | |
| 1070 | /* |
| 1071 | FUNCTION per_process_perf_init |
| 1072 | |
| 1073 | DESCRIPTION |
| 1074 | Initialze the per process performance monitor variables and /proc space. |
| 1075 | |
| 1076 | DEPENDENCIES |
| 1077 | |
| 1078 | RETURN VALUE |
| 1079 | |
| 1080 | SIDE EFFECTS |
| 1081 | */ |
| 1082 | int per_process_perf_init(void) |
| 1083 | { |
| 1084 | #ifdef CONFIG_ARCH_MSM8X60 |
| 1085 | smp_call_function_single(0, (void *)pm_initialize, (void *)NULL, 1); |
| 1086 | smp_call_function_single(1, (void *)pm_initialize, (void *)NULL, 1); |
| 1087 | l2_pm_initialize(); |
| 1088 | #else |
| 1089 | pm_initialize(); |
| 1090 | #endif |
| 1091 | pm_axi_init(); |
| 1092 | pm_axi_clear_cnts(); |
| 1093 | proc_dir = proc_mkdir("ppPerf", NULL); |
| 1094 | values_dir = proc_mkdir("results", proc_dir); |
| 1095 | settings_dir = proc_mkdir("settings", proc_dir); |
| 1096 | per_process_proc_entry("enable", &pp_enabled, settings_dir, 1); |
| 1097 | per_process_proc_entry("valid", &pp_settings_valid, settings_dir, 1); |
| 1098 | per_process_proc_entry("setPID", &pp_set_pid, settings_dir, 0); |
| 1099 | per_process_proc_entry("clearPID", &pp_clear_pid, settings_dir, 0); |
| 1100 | per_process_proc_entry("event0", &per_proc_event[0], settings_dir, 1); |
| 1101 | per_process_proc_entry("event1", &per_proc_event[1], settings_dir, 1); |
| 1102 | per_process_proc_entry("event2", &per_proc_event[2], settings_dir, 1); |
| 1103 | per_process_proc_entry("event3", &per_proc_event[3], settings_dir, 1); |
| 1104 | per_process_proc_entry("l2_event0", &l2_per_proc_event[0], settings_dir, |
| 1105 | 1); |
| 1106 | per_process_proc_entry("l2_event1", &l2_per_proc_event[1], settings_dir, |
| 1107 | 1); |
| 1108 | per_process_proc_entry("l2_event2", &l2_per_proc_event[2], settings_dir, |
| 1109 | 1); |
| 1110 | per_process_proc_entry("l2_event3", &l2_per_proc_event[3], settings_dir, |
| 1111 | 1); |
| 1112 | per_process_proc_entry("debug", &dbg_flags, settings_dir, 1); |
| 1113 | per_process_proc_entry("autolock", &pp_auto_lock, settings_dir, 1); |
| 1114 | per_process_proc_entry("lpm0evtyper", &pp_lpm0evtyper, settings_dir, 1); |
| 1115 | per_process_proc_entry("lpm1evtyper", &pp_lpm1evtyper, settings_dir, 1); |
| 1116 | per_process_proc_entry("lpm2evtyper", &pp_lpm2evtyper, settings_dir, 1); |
| 1117 | per_process_proc_entry("l2lpmevtyper", &pp_l2lpmevtyper, settings_dir, |
| 1118 | 1); |
| 1119 | per_process_proc_entry("vlpmevtyper", &pp_vlpmevtyper, settings_dir, 1); |
| 1120 | per_process_proc_entry("l2pmevtyper0", &pp_l2pmevtyper0, settings_dir, |
| 1121 | 1); |
| 1122 | per_process_proc_entry("l2pmevtyper1", &pp_l2pmevtyper1, settings_dir, |
| 1123 | 1); |
| 1124 | per_process_proc_entry("l2pmevtyper2", &pp_l2pmevtyper2, settings_dir, |
| 1125 | 1); |
| 1126 | per_process_proc_entry("l2pmevtyper3", &pp_l2pmevtyper3, settings_dir, |
| 1127 | 1); |
| 1128 | per_process_proc_entry("l2pmevtyper4", &pp_l2pmevtyper4, settings_dir, |
| 1129 | 1); |
| 1130 | per_process_proc_entry("stopForInterrupts", &pm_stop_for_interrupts, |
| 1131 | settings_dir, 1); |
| 1132 | per_process_proc_entry("global", &pm_global, settings_dir, 1); |
| 1133 | per_process_proc_entry("globalEnable", &pm_global_enable, settings_dir, |
| 1134 | 1); |
| 1135 | per_process_proc_entry("removePID", &pm_remove_pid, settings_dir, 0); |
| 1136 | |
| 1137 | axi_dir = proc_mkdir("axi", proc_dir); |
| 1138 | axi_settings_dir = proc_mkdir("settings", axi_dir); |
| 1139 | axi_results_dir = proc_mkdir("results", axi_dir); |
| 1140 | pm_axi_set_proc_entry("axi_enable", &pm_axi_info.enable, |
| 1141 | axi_settings_dir, 1); |
| 1142 | pm_axi_set_proc_entry("axi_clear", &pm_axi_info.clear, axi_settings_dir, |
| 1143 | 0); |
| 1144 | pm_axi_set_proc_entry("axi_valid", &pm_axi_info.valid, axi_settings_dir, |
| 1145 | 1); |
| 1146 | pm_axi_set_proc_entry("axi_sel_reg0", &pm_axi_info.sel_reg0, |
| 1147 | axi_settings_dir, 1); |
| 1148 | pm_axi_set_proc_entry("axi_sel_reg1", &pm_axi_info.sel_reg1, |
| 1149 | axi_settings_dir, 1); |
| 1150 | pm_axi_set_proc_entry("axi_ten_sel", &pm_axi_info.ten_sel_reg, |
| 1151 | axi_settings_dir, 1); |
| 1152 | pm_axi_set_proc_entry("axi_refresh", &pm_axi_info.refresh, |
| 1153 | axi_settings_dir, 1); |
| 1154 | pm_axi_get_cnt_proc_entry("axi_cnts", &axi_cnts, axi_results_dir, 0); |
| 1155 | l2_dir = proc_mkdir("l2", proc_dir); |
| 1156 | l2_results_dir = proc_mkdir("results", l2_dir); |
| 1157 | |
| 1158 | memset(perf_mons, 0, sizeof(perf_mons)); |
| 1159 | per_process_create_results_proc(&perf_mons[0]); |
| 1160 | per_process_create_l2_results_proc(&perf_mons[0]); |
| 1161 | thread_register_notifier(&perfmon_notifier_block); |
| 1162 | /* |
| 1163 | * Set the function pointers so the module can be activated. |
| 1164 | */ |
| 1165 | pp_interrupt_out_ptr = _perf_mon_interrupt_out; |
| 1166 | pp_interrupt_in_ptr = _perf_mon_interrupt_in; |
| 1167 | pp_process_remove_ptr = _per_process_remove; |
| 1168 | pp_loaded = 1; |
| 1169 | pm_axi_info.refresh = 1; |
| 1170 | |
| 1171 | #ifdef CONFIG_ARCH_MSM8X60 |
| 1172 | smp_call_function_single(0, (void *)pm_reset_all, (void *)NULL, 1); |
| 1173 | smp_call_function_single(1, (void *)pm_reset_all, (void *)NULL, 1); |
| 1174 | smp_call_function_single(0, (void *)l2_pm_reset_all, (void *)NULL, 1); |
| 1175 | smp_call_function_single(1, (void *)l2_pm_reset_all, (void *)NULL, 1); |
| 1176 | #else |
| 1177 | pm_reset_all(); |
| 1178 | #endif |
| 1179 | |
| 1180 | return 0; |
| 1181 | } |
| 1182 | |
| 1183 | /* |
| 1184 | FUNCTION per_process_perf_exit |
| 1185 | |
| 1186 | DESCRIPTION |
| 1187 | Module exit functionm, clean up, renmove proc entries |
| 1188 | |
| 1189 | DEPENDENCIES |
| 1190 | |
| 1191 | RETURN VALUE |
| 1192 | |
| 1193 | SIDE EFFECTS |
| 1194 | No more per process |
| 1195 | */ |
| 1196 | void per_process_perf_exit(void) |
| 1197 | { |
| 1198 | unsigned long i; |
| 1199 | /* |
| 1200 | * Sert the function pointers to 0 so the functions will no longer |
| 1201 | * be invoked |
| 1202 | */ |
| 1203 | pp_loaded = 0; |
| 1204 | pp_interrupt_out_ptr = 0; |
| 1205 | pp_interrupt_in_ptr = 0; |
| 1206 | pp_process_remove_ptr = 0; |
| 1207 | /* |
| 1208 | * Remove the results |
| 1209 | */ |
| 1210 | for (i = 0; i < PERF_MON_PROCESS_NUM; i++) |
| 1211 | per_process_remove_manual(perf_mons[i].pid); |
| 1212 | /* |
| 1213 | * Remove the proc entries in the settings dir |
| 1214 | */ |
| 1215 | i = 0; |
| 1216 | for (i = 0; i < pp_proc_entry_index; i++) |
| 1217 | remove_proc_entry(per_process_proc_names[i], settings_dir); |
| 1218 | |
| 1219 | /*remove proc axi files*/ |
| 1220 | remove_proc_entry("axi_enable", axi_settings_dir); |
| 1221 | remove_proc_entry("axi_valid", axi_settings_dir); |
| 1222 | remove_proc_entry("axi_refresh", axi_settings_dir); |
| 1223 | remove_proc_entry("axi_clear", axi_settings_dir); |
| 1224 | remove_proc_entry("axi_sel_reg0", axi_settings_dir); |
| 1225 | remove_proc_entry("axi_sel_reg1", axi_settings_dir); |
| 1226 | remove_proc_entry("axi_ten_sel", axi_settings_dir); |
| 1227 | remove_proc_entry("axi_cnts", axi_results_dir); |
| 1228 | /* |
| 1229 | * Remove the directories |
| 1230 | */ |
| 1231 | remove_proc_entry("results", l2_dir); |
| 1232 | remove_proc_entry("l2", proc_dir); |
| 1233 | remove_proc_entry("results", proc_dir); |
| 1234 | remove_proc_entry("settings", proc_dir); |
| 1235 | remove_proc_entry("results", axi_dir); |
| 1236 | remove_proc_entry("settings", axi_dir); |
| 1237 | remove_proc_entry("axi", proc_dir); |
| 1238 | remove_proc_entry("ppPerf", NULL); |
| 1239 | pm_free_irq(); |
| 1240 | #ifdef CONFIG_ARCH_MSM8X60 |
| 1241 | l2_pm_free_irq(); |
| 1242 | #endif |
| 1243 | thread_unregister_notifier(&perfmon_notifier_block); |
| 1244 | #ifdef CONFIG_ARCH_MSM8X60 |
| 1245 | smp_call_function_single(0, (void *)pm_deinitialize, (void *)NULL, 1); |
| 1246 | smp_call_function_single(1, (void *)pm_deinitialize, (void *)NULL, 1); |
| 1247 | l2_pm_deinitialize(); |
| 1248 | #else |
| 1249 | pm_deinitialize(); |
| 1250 | #endif |
| 1251 | } |