Bob Nelson | 1474855 | 2007-07-20 21:39:53 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Cell Broadband Engine OProfile Support |
| 3 | * |
| 4 | * (C) Copyright IBM Corporation 2006 |
| 5 | * |
| 6 | * Author: Maynard Johnson <maynardj@us.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version |
| 11 | * 2 of the License, or (at your option) any later version. |
| 12 | */ |
| 13 | |
| 14 | /* The purpose of this file is to handle SPU event task switching |
| 15 | * and to record SPU context information into the OProfile |
| 16 | * event buffer. |
| 17 | * |
| 18 | * Additionally, the spu_sync_buffer function is provided as a helper |
| 19 | * for recoding actual SPU program counter samples to the event buffer. |
| 20 | */ |
| 21 | #include <linux/dcookies.h> |
| 22 | #include <linux/kref.h> |
| 23 | #include <linux/mm.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame^] | 24 | #include <linux/fs.h> |
Bob Nelson | 1474855 | 2007-07-20 21:39:53 +0200 | [diff] [blame] | 25 | #include <linux/module.h> |
| 26 | #include <linux/notifier.h> |
| 27 | #include <linux/numa.h> |
| 28 | #include <linux/oprofile.h> |
| 29 | #include <linux/spinlock.h> |
| 30 | #include "pr_util.h" |
| 31 | |
| 32 | #define RELEASE_ALL 9999 |
| 33 | |
| 34 | static DEFINE_SPINLOCK(buffer_lock); |
| 35 | static DEFINE_SPINLOCK(cache_lock); |
| 36 | static int num_spu_nodes; |
| 37 | int spu_prof_num_nodes; |
| 38 | int last_guard_val[MAX_NUMNODES * 8]; |
| 39 | |
| 40 | /* Container for caching information about an active SPU task. */ |
| 41 | struct cached_info { |
| 42 | struct vma_to_fileoffset_map *map; |
| 43 | struct spu *the_spu; /* needed to access pointer to local_store */ |
| 44 | struct kref cache_ref; |
| 45 | }; |
| 46 | |
| 47 | static struct cached_info *spu_info[MAX_NUMNODES * 8]; |
| 48 | |
| 49 | static void destroy_cached_info(struct kref *kref) |
| 50 | { |
| 51 | struct cached_info *info; |
| 52 | |
| 53 | info = container_of(kref, struct cached_info, cache_ref); |
| 54 | vma_map_free(info->map); |
| 55 | kfree(info); |
| 56 | module_put(THIS_MODULE); |
| 57 | } |
| 58 | |
| 59 | /* Return the cached_info for the passed SPU number. |
| 60 | * ATTENTION: Callers are responsible for obtaining the |
| 61 | * cache_lock if needed prior to invoking this function. |
| 62 | */ |
| 63 | static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num) |
| 64 | { |
| 65 | struct kref *ref; |
| 66 | struct cached_info *ret_info; |
| 67 | |
| 68 | if (spu_num >= num_spu_nodes) { |
| 69 | printk(KERN_ERR "SPU_PROF: " |
| 70 | "%s, line %d: Invalid index %d into spu info cache\n", |
| 71 | __FUNCTION__, __LINE__, spu_num); |
| 72 | ret_info = NULL; |
| 73 | goto out; |
| 74 | } |
| 75 | if (!spu_info[spu_num] && the_spu) { |
| 76 | ref = spu_get_profile_private_kref(the_spu->ctx); |
| 77 | if (ref) { |
| 78 | spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref); |
| 79 | kref_get(&spu_info[spu_num]->cache_ref); |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | ret_info = spu_info[spu_num]; |
| 84 | out: |
| 85 | return ret_info; |
| 86 | } |
| 87 | |
| 88 | |
| 89 | /* Looks for cached info for the passed spu. If not found, the |
| 90 | * cached info is created for the passed spu. |
| 91 | * Returns 0 for success; otherwise, -1 for error. |
| 92 | */ |
| 93 | static int |
| 94 | prepare_cached_spu_info(struct spu *spu, unsigned long objectId) |
| 95 | { |
| 96 | unsigned long flags; |
| 97 | struct vma_to_fileoffset_map *new_map; |
| 98 | int retval = 0; |
| 99 | struct cached_info *info; |
| 100 | |
| 101 | /* We won't bother getting cache_lock here since |
| 102 | * don't do anything with the cached_info that's returned. |
| 103 | */ |
| 104 | info = get_cached_info(spu, spu->number); |
| 105 | |
| 106 | if (info) { |
| 107 | pr_debug("Found cached SPU info.\n"); |
| 108 | goto out; |
| 109 | } |
| 110 | |
| 111 | /* Create cached_info and set spu_info[spu->number] to point to it. |
| 112 | * spu->number is a system-wide value, not a per-node value. |
| 113 | */ |
| 114 | info = kzalloc(sizeof(struct cached_info), GFP_KERNEL); |
| 115 | if (!info) { |
| 116 | printk(KERN_ERR "SPU_PROF: " |
| 117 | "%s, line %d: create vma_map failed\n", |
| 118 | __FUNCTION__, __LINE__); |
| 119 | retval = -ENOMEM; |
| 120 | goto err_alloc; |
| 121 | } |
| 122 | new_map = create_vma_map(spu, objectId); |
| 123 | if (!new_map) { |
| 124 | printk(KERN_ERR "SPU_PROF: " |
| 125 | "%s, line %d: create vma_map failed\n", |
| 126 | __FUNCTION__, __LINE__); |
| 127 | retval = -ENOMEM; |
| 128 | goto err_alloc; |
| 129 | } |
| 130 | |
| 131 | pr_debug("Created vma_map\n"); |
| 132 | info->map = new_map; |
| 133 | info->the_spu = spu; |
| 134 | kref_init(&info->cache_ref); |
| 135 | spin_lock_irqsave(&cache_lock, flags); |
| 136 | spu_info[spu->number] = info; |
| 137 | /* Increment count before passing off ref to SPUFS. */ |
| 138 | kref_get(&info->cache_ref); |
| 139 | |
| 140 | /* We increment the module refcount here since SPUFS is |
| 141 | * responsible for the final destruction of the cached_info, |
| 142 | * and it must be able to access the destroy_cached_info() |
| 143 | * function defined in the OProfile module. We decrement |
| 144 | * the module refcount in destroy_cached_info. |
| 145 | */ |
| 146 | try_module_get(THIS_MODULE); |
| 147 | spu_set_profile_private_kref(spu->ctx, &info->cache_ref, |
| 148 | destroy_cached_info); |
| 149 | spin_unlock_irqrestore(&cache_lock, flags); |
| 150 | goto out; |
| 151 | |
| 152 | err_alloc: |
| 153 | kfree(info); |
| 154 | out: |
| 155 | return retval; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * NOTE: The caller is responsible for locking the |
| 160 | * cache_lock prior to calling this function. |
| 161 | */ |
| 162 | static int release_cached_info(int spu_index) |
| 163 | { |
| 164 | int index, end; |
| 165 | |
| 166 | if (spu_index == RELEASE_ALL) { |
| 167 | end = num_spu_nodes; |
| 168 | index = 0; |
| 169 | } else { |
| 170 | if (spu_index >= num_spu_nodes) { |
| 171 | printk(KERN_ERR "SPU_PROF: " |
| 172 | "%s, line %d: " |
| 173 | "Invalid index %d into spu info cache\n", |
| 174 | __FUNCTION__, __LINE__, spu_index); |
| 175 | goto out; |
| 176 | } |
| 177 | end = spu_index + 1; |
| 178 | index = spu_index; |
| 179 | } |
| 180 | for (; index < end; index++) { |
| 181 | if (spu_info[index]) { |
| 182 | kref_put(&spu_info[index]->cache_ref, |
| 183 | destroy_cached_info); |
| 184 | spu_info[index] = NULL; |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | out: |
| 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | /* The source code for fast_get_dcookie was "borrowed" |
| 193 | * from drivers/oprofile/buffer_sync.c. |
| 194 | */ |
| 195 | |
| 196 | /* Optimisation. We can manage without taking the dcookie sem |
| 197 | * because we cannot reach this code without at least one |
| 198 | * dcookie user still being registered (namely, the reader |
| 199 | * of the event buffer). |
| 200 | */ |
| 201 | static inline unsigned long fast_get_dcookie(struct dentry *dentry, |
| 202 | struct vfsmount *vfsmnt) |
| 203 | { |
| 204 | unsigned long cookie; |
| 205 | |
| 206 | if (dentry->d_cookie) |
| 207 | return (unsigned long)dentry; |
| 208 | get_dcookie(dentry, vfsmnt, &cookie); |
| 209 | return cookie; |
| 210 | } |
| 211 | |
| 212 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, |
| 213 | * which corresponds loosely to "application name". Also, determine |
| 214 | * the offset for the SPU ELF object. If computed offset is |
| 215 | * non-zero, it implies an embedded SPU object; otherwise, it's a |
| 216 | * separate SPU binary, in which case we retrieve it's dcookie. |
| 217 | * For the embedded case, we must determine if SPU ELF is embedded |
| 218 | * in the executable application or another file (i.e., shared lib). |
| 219 | * If embedded in a shared lib, we must get the dcookie and return |
| 220 | * that to the caller. |
| 221 | */ |
| 222 | static unsigned long |
| 223 | get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp, |
| 224 | unsigned long *spu_bin_dcookie, |
| 225 | unsigned long spu_ref) |
| 226 | { |
| 227 | unsigned long app_cookie = 0; |
| 228 | unsigned int my_offset = 0; |
| 229 | struct file *app = NULL; |
| 230 | struct vm_area_struct *vma; |
| 231 | struct mm_struct *mm = spu->mm; |
| 232 | |
| 233 | if (!mm) |
| 234 | goto out; |
| 235 | |
| 236 | down_read(&mm->mmap_sem); |
| 237 | |
| 238 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 239 | if (!vma->vm_file) |
| 240 | continue; |
| 241 | if (!(vma->vm_flags & VM_EXECUTABLE)) |
| 242 | continue; |
| 243 | app_cookie = fast_get_dcookie(vma->vm_file->f_dentry, |
| 244 | vma->vm_file->f_vfsmnt); |
| 245 | pr_debug("got dcookie for %s\n", |
| 246 | vma->vm_file->f_dentry->d_name.name); |
| 247 | app = vma->vm_file; |
| 248 | break; |
| 249 | } |
| 250 | |
| 251 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 252 | if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref) |
| 253 | continue; |
| 254 | my_offset = spu_ref - vma->vm_start; |
| 255 | if (!vma->vm_file) |
| 256 | goto fail_no_image_cookie; |
| 257 | |
| 258 | pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n", |
| 259 | my_offset, spu_ref, |
| 260 | vma->vm_file->f_dentry->d_name.name); |
| 261 | *offsetp = my_offset; |
| 262 | break; |
| 263 | } |
| 264 | |
| 265 | *spu_bin_dcookie = fast_get_dcookie(vma->vm_file->f_dentry, |
| 266 | vma->vm_file->f_vfsmnt); |
| 267 | pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name); |
| 268 | |
| 269 | up_read(&mm->mmap_sem); |
| 270 | |
| 271 | out: |
| 272 | return app_cookie; |
| 273 | |
| 274 | fail_no_image_cookie: |
| 275 | up_read(&mm->mmap_sem); |
| 276 | |
| 277 | printk(KERN_ERR "SPU_PROF: " |
| 278 | "%s, line %d: Cannot find dcookie for SPU binary\n", |
| 279 | __FUNCTION__, __LINE__); |
| 280 | goto out; |
| 281 | } |
| 282 | |
| 283 | |
| 284 | |
| 285 | /* This function finds or creates cached context information for the |
| 286 | * passed SPU and records SPU context information into the OProfile |
| 287 | * event buffer. |
| 288 | */ |
| 289 | static int process_context_switch(struct spu *spu, unsigned long objectId) |
| 290 | { |
| 291 | unsigned long flags; |
| 292 | int retval; |
| 293 | unsigned int offset = 0; |
| 294 | unsigned long spu_cookie = 0, app_dcookie; |
| 295 | |
| 296 | retval = prepare_cached_spu_info(spu, objectId); |
| 297 | if (retval) |
| 298 | goto out; |
| 299 | |
| 300 | /* Get dcookie first because a mutex_lock is taken in that |
| 301 | * code path, so interrupts must not be disabled. |
| 302 | */ |
| 303 | app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId); |
| 304 | if (!app_dcookie || !spu_cookie) { |
| 305 | retval = -ENOENT; |
| 306 | goto out; |
| 307 | } |
| 308 | |
| 309 | /* Record context info in event buffer */ |
| 310 | spin_lock_irqsave(&buffer_lock, flags); |
| 311 | add_event_entry(ESCAPE_CODE); |
| 312 | add_event_entry(SPU_CTX_SWITCH_CODE); |
| 313 | add_event_entry(spu->number); |
| 314 | add_event_entry(spu->pid); |
| 315 | add_event_entry(spu->tgid); |
| 316 | add_event_entry(app_dcookie); |
| 317 | add_event_entry(spu_cookie); |
| 318 | add_event_entry(offset); |
| 319 | spin_unlock_irqrestore(&buffer_lock, flags); |
| 320 | smp_wmb(); /* insure spu event buffer updates are written */ |
| 321 | /* don't want entries intermingled... */ |
| 322 | out: |
| 323 | return retval; |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * This function is invoked on either a bind_context or unbind_context. |
| 328 | * If called for an unbind_context, the val arg is 0; otherwise, |
| 329 | * it is the object-id value for the spu context. |
| 330 | * The data arg is of type 'struct spu *'. |
| 331 | */ |
| 332 | static int spu_active_notify(struct notifier_block *self, unsigned long val, |
| 333 | void *data) |
| 334 | { |
| 335 | int retval; |
| 336 | unsigned long flags; |
| 337 | struct spu *the_spu = data; |
| 338 | |
| 339 | pr_debug("SPU event notification arrived\n"); |
| 340 | if (!val) { |
| 341 | spin_lock_irqsave(&cache_lock, flags); |
| 342 | retval = release_cached_info(the_spu->number); |
| 343 | spin_unlock_irqrestore(&cache_lock, flags); |
| 344 | } else { |
| 345 | retval = process_context_switch(the_spu, val); |
| 346 | } |
| 347 | return retval; |
| 348 | } |
| 349 | |
| 350 | static struct notifier_block spu_active = { |
| 351 | .notifier_call = spu_active_notify, |
| 352 | }; |
| 353 | |
| 354 | static int number_of_online_nodes(void) |
| 355 | { |
| 356 | u32 cpu; u32 tmp; |
| 357 | int nodes = 0; |
| 358 | for_each_online_cpu(cpu) { |
| 359 | tmp = cbe_cpu_to_node(cpu) + 1; |
| 360 | if (tmp > nodes) |
| 361 | nodes++; |
| 362 | } |
| 363 | return nodes; |
| 364 | } |
| 365 | |
| 366 | /* The main purpose of this function is to synchronize |
| 367 | * OProfile with SPUFS by registering to be notified of |
| 368 | * SPU task switches. |
| 369 | * |
| 370 | * NOTE: When profiling SPUs, we must ensure that only |
| 371 | * spu_sync_start is invoked and not the generic sync_start |
| 372 | * in drivers/oprofile/oprof.c. A return value of |
| 373 | * SKIP_GENERIC_SYNC or SYNC_START_ERROR will |
| 374 | * accomplish this. |
| 375 | */ |
| 376 | int spu_sync_start(void) |
| 377 | { |
| 378 | int k; |
| 379 | int ret = SKIP_GENERIC_SYNC; |
| 380 | int register_ret; |
| 381 | unsigned long flags = 0; |
| 382 | |
| 383 | spu_prof_num_nodes = number_of_online_nodes(); |
| 384 | num_spu_nodes = spu_prof_num_nodes * 8; |
| 385 | |
| 386 | spin_lock_irqsave(&buffer_lock, flags); |
| 387 | add_event_entry(ESCAPE_CODE); |
| 388 | add_event_entry(SPU_PROFILING_CODE); |
| 389 | add_event_entry(num_spu_nodes); |
| 390 | spin_unlock_irqrestore(&buffer_lock, flags); |
| 391 | |
| 392 | /* Register for SPU events */ |
| 393 | register_ret = spu_switch_event_register(&spu_active); |
| 394 | if (register_ret) { |
| 395 | ret = SYNC_START_ERROR; |
| 396 | goto out; |
| 397 | } |
| 398 | |
| 399 | for (k = 0; k < (MAX_NUMNODES * 8); k++) |
| 400 | last_guard_val[k] = 0; |
| 401 | pr_debug("spu_sync_start -- running.\n"); |
| 402 | out: |
| 403 | return ret; |
| 404 | } |
| 405 | |
| 406 | /* Record SPU program counter samples to the oprofile event buffer. */ |
| 407 | void spu_sync_buffer(int spu_num, unsigned int *samples, |
| 408 | int num_samples) |
| 409 | { |
| 410 | unsigned long long file_offset; |
| 411 | unsigned long flags; |
| 412 | int i; |
| 413 | struct vma_to_fileoffset_map *map; |
| 414 | struct spu *the_spu; |
| 415 | unsigned long long spu_num_ll = spu_num; |
| 416 | unsigned long long spu_num_shifted = spu_num_ll << 32; |
| 417 | struct cached_info *c_info; |
| 418 | |
| 419 | /* We need to obtain the cache_lock here because it's |
| 420 | * possible that after getting the cached_info, the SPU job |
| 421 | * corresponding to this cached_info may end, thus resulting |
| 422 | * in the destruction of the cached_info. |
| 423 | */ |
| 424 | spin_lock_irqsave(&cache_lock, flags); |
| 425 | c_info = get_cached_info(NULL, spu_num); |
| 426 | if (!c_info) { |
| 427 | /* This legitimately happens when the SPU task ends before all |
| 428 | * samples are recorded. |
| 429 | * No big deal -- so we just drop a few samples. |
| 430 | */ |
| 431 | pr_debug("SPU_PROF: No cached SPU contex " |
| 432 | "for SPU #%d. Dropping samples.\n", spu_num); |
| 433 | goto out; |
| 434 | } |
| 435 | |
| 436 | map = c_info->map; |
| 437 | the_spu = c_info->the_spu; |
| 438 | spin_lock(&buffer_lock); |
| 439 | for (i = 0; i < num_samples; i++) { |
| 440 | unsigned int sample = *(samples+i); |
| 441 | int grd_val = 0; |
| 442 | file_offset = 0; |
| 443 | if (sample == 0) |
| 444 | continue; |
| 445 | file_offset = vma_map_lookup( map, sample, the_spu, &grd_val); |
| 446 | |
| 447 | /* If overlays are used by this SPU application, the guard |
| 448 | * value is non-zero, indicating which overlay section is in |
| 449 | * use. We need to discard samples taken during the time |
| 450 | * period which an overlay occurs (i.e., guard value changes). |
| 451 | */ |
| 452 | if (grd_val && grd_val != last_guard_val[spu_num]) { |
| 453 | last_guard_val[spu_num] = grd_val; |
| 454 | /* Drop the rest of the samples. */ |
| 455 | break; |
| 456 | } |
| 457 | |
| 458 | add_event_entry(file_offset | spu_num_shifted); |
| 459 | } |
| 460 | spin_unlock(&buffer_lock); |
| 461 | out: |
| 462 | spin_unlock_irqrestore(&cache_lock, flags); |
| 463 | } |
| 464 | |
| 465 | |
| 466 | int spu_sync_stop(void) |
| 467 | { |
| 468 | unsigned long flags = 0; |
| 469 | int ret = spu_switch_event_unregister(&spu_active); |
| 470 | if (ret) { |
| 471 | printk(KERN_ERR "SPU_PROF: " |
| 472 | "%s, line %d: spu_switch_event_unregister returned %d\n", |
| 473 | __FUNCTION__, __LINE__, ret); |
| 474 | goto out; |
| 475 | } |
| 476 | |
| 477 | spin_lock_irqsave(&cache_lock, flags); |
| 478 | ret = release_cached_info(RELEASE_ALL); |
| 479 | spin_unlock_irqrestore(&cache_lock, flags); |
| 480 | out: |
| 481 | pr_debug("spu_sync_stop -- done.\n"); |
| 482 | return ret; |
| 483 | } |
| 484 | |
| 485 | |