Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <xfs.h> |
| 19 | |
| 20 | static kmem_zone_t *ktrace_hdr_zone; |
| 21 | static kmem_zone_t *ktrace_ent_zone; |
| 22 | static int ktrace_zentries; |
| 23 | |
| 24 | void |
| 25 | ktrace_init(int zentries) |
| 26 | { |
| 27 | ktrace_zentries = zentries; |
| 28 | |
| 29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), |
| 30 | "ktrace_hdr"); |
| 31 | ASSERT(ktrace_hdr_zone); |
| 32 | |
| 33 | ktrace_ent_zone = kmem_zone_init(ktrace_zentries |
| 34 | * sizeof(ktrace_entry_t), |
| 35 | "ktrace_ent"); |
| 36 | ASSERT(ktrace_ent_zone); |
| 37 | } |
| 38 | |
| 39 | void |
| 40 | ktrace_uninit(void) |
| 41 | { |
| 42 | kmem_cache_destroy(ktrace_hdr_zone); |
| 43 | kmem_cache_destroy(ktrace_ent_zone); |
| 44 | } |
| 45 | |
| 46 | /* |
| 47 | * ktrace_alloc() |
| 48 | * |
| 49 | * Allocate a ktrace header and enough buffering for the given |
| 50 | * number of entries. |
| 51 | */ |
| 52 | ktrace_t * |
Christoph Hellwig | 4750ddb | 2005-11-02 15:07:23 +1100 | [diff] [blame] | 53 | ktrace_alloc(int nentries, unsigned int __nocast sleep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | { |
| 55 | ktrace_t *ktp; |
| 56 | ktrace_entry_t *ktep; |
| 57 | |
| 58 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); |
| 59 | |
| 60 | if (ktp == (ktrace_t*)NULL) { |
| 61 | /* |
| 62 | * KM_SLEEP callers don't expect failure. |
| 63 | */ |
| 64 | if (sleep & KM_SLEEP) |
| 65 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 66 | |
| 67 | return NULL; |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Special treatment for buffers with the ktrace_zentries entries |
| 72 | */ |
| 73 | if (nentries == ktrace_zentries) { |
| 74 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, |
| 75 | sleep); |
| 76 | } else { |
| 77 | ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), |
| 78 | sleep); |
| 79 | } |
| 80 | |
| 81 | if (ktep == NULL) { |
| 82 | /* |
| 83 | * KM_SLEEP callers don't expect failure. |
| 84 | */ |
| 85 | if (sleep & KM_SLEEP) |
| 86 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 87 | |
| 88 | kmem_free(ktp, sizeof(*ktp)); |
| 89 | |
| 90 | return NULL; |
| 91 | } |
| 92 | |
| 93 | spinlock_init(&(ktp->kt_lock), "kt_lock"); |
| 94 | |
| 95 | ktp->kt_entries = ktep; |
| 96 | ktp->kt_nentries = nentries; |
| 97 | ktp->kt_index = 0; |
| 98 | ktp->kt_rollover = 0; |
| 99 | return ktp; |
| 100 | } |
| 101 | |
| 102 | |
| 103 | /* |
| 104 | * ktrace_free() |
| 105 | * |
| 106 | * Free up the ktrace header and buffer. It is up to the caller |
| 107 | * to ensure that no-one is referencing it. |
| 108 | */ |
| 109 | void |
| 110 | ktrace_free(ktrace_t *ktp) |
| 111 | { |
| 112 | int entries_size; |
| 113 | |
| 114 | if (ktp == (ktrace_t *)NULL) |
| 115 | return; |
| 116 | |
| 117 | spinlock_destroy(&ktp->kt_lock); |
| 118 | |
| 119 | /* |
| 120 | * Special treatment for the Vnode trace buffer. |
| 121 | */ |
| 122 | if (ktp->kt_nentries == ktrace_zentries) { |
| 123 | kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); |
| 124 | } else { |
| 125 | entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); |
| 126 | |
| 127 | kmem_free(ktp->kt_entries, entries_size); |
| 128 | } |
| 129 | |
| 130 | kmem_zone_free(ktrace_hdr_zone, ktp); |
| 131 | } |
| 132 | |
| 133 | |
| 134 | /* |
| 135 | * Enter the given values into the "next" entry in the trace buffer. |
| 136 | * kt_index is always the index of the next entry to be filled. |
| 137 | */ |
| 138 | void |
| 139 | ktrace_enter( |
| 140 | ktrace_t *ktp, |
| 141 | void *val0, |
| 142 | void *val1, |
| 143 | void *val2, |
| 144 | void *val3, |
| 145 | void *val4, |
| 146 | void *val5, |
| 147 | void *val6, |
| 148 | void *val7, |
| 149 | void *val8, |
| 150 | void *val9, |
| 151 | void *val10, |
| 152 | void *val11, |
| 153 | void *val12, |
| 154 | void *val13, |
| 155 | void *val14, |
| 156 | void *val15) |
| 157 | { |
Ingo Molnar | a9f6a0d | 2005-09-09 13:10:41 -0700 | [diff] [blame] | 158 | static DEFINE_SPINLOCK(wrap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | unsigned long flags; |
| 160 | int index; |
| 161 | ktrace_entry_t *ktep; |
| 162 | |
| 163 | ASSERT(ktp != NULL); |
| 164 | |
| 165 | /* |
| 166 | * Grab an entry by pushing the index up to the next one. |
| 167 | */ |
| 168 | spin_lock_irqsave(&wrap_lock, flags); |
| 169 | index = ktp->kt_index; |
| 170 | if (++ktp->kt_index == ktp->kt_nentries) |
| 171 | ktp->kt_index = 0; |
| 172 | spin_unlock_irqrestore(&wrap_lock, flags); |
| 173 | |
| 174 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) |
| 175 | ktp->kt_rollover = 1; |
| 176 | |
| 177 | ASSERT((index >= 0) && (index < ktp->kt_nentries)); |
| 178 | |
| 179 | ktep = &(ktp->kt_entries[index]); |
| 180 | |
| 181 | ktep->val[0] = val0; |
| 182 | ktep->val[1] = val1; |
| 183 | ktep->val[2] = val2; |
| 184 | ktep->val[3] = val3; |
| 185 | ktep->val[4] = val4; |
| 186 | ktep->val[5] = val5; |
| 187 | ktep->val[6] = val6; |
| 188 | ktep->val[7] = val7; |
| 189 | ktep->val[8] = val8; |
| 190 | ktep->val[9] = val9; |
| 191 | ktep->val[10] = val10; |
| 192 | ktep->val[11] = val11; |
| 193 | ktep->val[12] = val12; |
| 194 | ktep->val[13] = val13; |
| 195 | ktep->val[14] = val14; |
| 196 | ktep->val[15] = val15; |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * Return the number of entries in the trace buffer. |
| 201 | */ |
| 202 | int |
| 203 | ktrace_nentries( |
| 204 | ktrace_t *ktp) |
| 205 | { |
| 206 | if (ktp == NULL) { |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); |
| 211 | } |
| 212 | |
| 213 | /* |
| 214 | * ktrace_first() |
| 215 | * |
| 216 | * This is used to find the start of the trace buffer. |
| 217 | * In conjunction with ktrace_next() it can be used to |
| 218 | * iterate through the entire trace buffer. This code does |
| 219 | * not do any locking because it is assumed that it is called |
| 220 | * from the debugger. |
| 221 | * |
| 222 | * The caller must pass in a pointer to a ktrace_snap |
| 223 | * structure in which we will keep some state used to |
| 224 | * iterate through the buffer. This state must not touched |
| 225 | * by any code outside of this module. |
| 226 | */ |
| 227 | ktrace_entry_t * |
| 228 | ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) |
| 229 | { |
| 230 | ktrace_entry_t *ktep; |
| 231 | int index; |
| 232 | int nentries; |
| 233 | |
| 234 | if (ktp->kt_rollover) |
| 235 | index = ktp->kt_index; |
| 236 | else |
| 237 | index = 0; |
| 238 | |
| 239 | ktsp->ks_start = index; |
| 240 | ktep = &(ktp->kt_entries[index]); |
| 241 | |
| 242 | nentries = ktrace_nentries(ktp); |
| 243 | index++; |
| 244 | if (index < nentries) { |
| 245 | ktsp->ks_index = index; |
| 246 | } else { |
| 247 | ktsp->ks_index = 0; |
| 248 | if (index > nentries) |
| 249 | ktep = NULL; |
| 250 | } |
| 251 | return ktep; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * ktrace_next() |
| 256 | * |
| 257 | * This is used to iterate through the entries of the given |
| 258 | * trace buffer. The caller must pass in the ktrace_snap_t |
| 259 | * structure initialized by ktrace_first(). The return value |
| 260 | * will be either a pointer to the next ktrace_entry or NULL |
| 261 | * if all of the entries have been traversed. |
| 262 | */ |
| 263 | ktrace_entry_t * |
| 264 | ktrace_next( |
| 265 | ktrace_t *ktp, |
| 266 | ktrace_snap_t *ktsp) |
| 267 | { |
| 268 | int index; |
| 269 | ktrace_entry_t *ktep; |
| 270 | |
| 271 | index = ktsp->ks_index; |
| 272 | if (index == ktsp->ks_start) { |
| 273 | ktep = NULL; |
| 274 | } else { |
| 275 | ktep = &ktp->kt_entries[index]; |
| 276 | } |
| 277 | |
| 278 | index++; |
| 279 | if (index == ktrace_nentries(ktp)) { |
| 280 | ktsp->ks_index = 0; |
| 281 | } else { |
| 282 | ktsp->ks_index = index; |
| 283 | } |
| 284 | |
| 285 | return ktep; |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * ktrace_skip() |
| 290 | * |
| 291 | * Skip the next "count" entries and return the entry after that. |
| 292 | * Return NULL if this causes us to iterate past the beginning again. |
| 293 | */ |
| 294 | ktrace_entry_t * |
| 295 | ktrace_skip( |
| 296 | ktrace_t *ktp, |
| 297 | int count, |
| 298 | ktrace_snap_t *ktsp) |
| 299 | { |
| 300 | int index; |
| 301 | int new_index; |
| 302 | ktrace_entry_t *ktep; |
| 303 | int nentries = ktrace_nentries(ktp); |
| 304 | |
| 305 | index = ktsp->ks_index; |
| 306 | new_index = index + count; |
| 307 | while (new_index >= nentries) { |
| 308 | new_index -= nentries; |
| 309 | } |
| 310 | if (index == ktsp->ks_start) { |
| 311 | /* |
| 312 | * We've iterated around to the start, so we're done. |
| 313 | */ |
| 314 | ktep = NULL; |
| 315 | } else if ((new_index < index) && (index < ktsp->ks_index)) { |
| 316 | /* |
| 317 | * We've skipped past the start again, so we're done. |
| 318 | */ |
| 319 | ktep = NULL; |
| 320 | ktsp->ks_index = ktsp->ks_start; |
| 321 | } else { |
| 322 | ktep = &(ktp->kt_entries[new_index]); |
| 323 | new_index++; |
| 324 | if (new_index == nentries) { |
| 325 | ktsp->ks_index = 0; |
| 326 | } else { |
| 327 | ktsp->ks_index = new_index; |
| 328 | } |
| 329 | } |
| 330 | return ktep; |
| 331 | } |