Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <xfs.h> |
| 19 | |
| 20 | static kmem_zone_t *ktrace_hdr_zone; |
| 21 | static kmem_zone_t *ktrace_ent_zone; |
| 22 | static int ktrace_zentries; |
| 23 | |
Lachlan McIlroy | de2eeea | 2008-02-06 13:37:56 +1100 | [diff] [blame^] | 24 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | ktrace_init(int zentries) |
| 26 | { |
| 27 | ktrace_zentries = zentries; |
| 28 | |
| 29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), |
| 30 | "ktrace_hdr"); |
| 31 | ASSERT(ktrace_hdr_zone); |
| 32 | |
| 33 | ktrace_ent_zone = kmem_zone_init(ktrace_zentries |
| 34 | * sizeof(ktrace_entry_t), |
| 35 | "ktrace_ent"); |
| 36 | ASSERT(ktrace_ent_zone); |
| 37 | } |
| 38 | |
Lachlan McIlroy | de2eeea | 2008-02-06 13:37:56 +1100 | [diff] [blame^] | 39 | void __exit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | ktrace_uninit(void) |
| 41 | { |
Nathan Scott | 3758dee | 2006-03-22 12:47:28 +1100 | [diff] [blame] | 42 | kmem_zone_destroy(ktrace_hdr_zone); |
| 43 | kmem_zone_destroy(ktrace_ent_zone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* |
| 47 | * ktrace_alloc() |
| 48 | * |
| 49 | * Allocate a ktrace header and enough buffering for the given |
| 50 | * number of entries. |
| 51 | */ |
| 52 | ktrace_t * |
Christoph Hellwig | 4750ddb | 2005-11-02 15:07:23 +1100 | [diff] [blame] | 53 | ktrace_alloc(int nentries, unsigned int __nocast sleep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | { |
| 55 | ktrace_t *ktp; |
| 56 | ktrace_entry_t *ktep; |
| 57 | |
| 58 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); |
| 59 | |
| 60 | if (ktp == (ktrace_t*)NULL) { |
| 61 | /* |
| 62 | * KM_SLEEP callers don't expect failure. |
| 63 | */ |
| 64 | if (sleep & KM_SLEEP) |
| 65 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 66 | |
| 67 | return NULL; |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Special treatment for buffers with the ktrace_zentries entries |
| 72 | */ |
| 73 | if (nentries == ktrace_zentries) { |
| 74 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, |
| 75 | sleep); |
| 76 | } else { |
| 77 | ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), |
Nathan Scott | efb8ad7 | 2006-09-28 11:03:05 +1000 | [diff] [blame] | 78 | sleep | KM_LARGE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | if (ktep == NULL) { |
| 82 | /* |
| 83 | * KM_SLEEP callers don't expect failure. |
| 84 | */ |
| 85 | if (sleep & KM_SLEEP) |
| 86 | panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); |
| 87 | |
| 88 | kmem_free(ktp, sizeof(*ktp)); |
| 89 | |
| 90 | return NULL; |
| 91 | } |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | ktp->kt_entries = ktep; |
| 94 | ktp->kt_nentries = nentries; |
| 95 | ktp->kt_index = 0; |
| 96 | ktp->kt_rollover = 0; |
| 97 | return ktp; |
| 98 | } |
| 99 | |
| 100 | |
| 101 | /* |
| 102 | * ktrace_free() |
| 103 | * |
| 104 | * Free up the ktrace header and buffer. It is up to the caller |
| 105 | * to ensure that no-one is referencing it. |
| 106 | */ |
| 107 | void |
| 108 | ktrace_free(ktrace_t *ktp) |
| 109 | { |
| 110 | int entries_size; |
| 111 | |
| 112 | if (ktp == (ktrace_t *)NULL) |
| 113 | return; |
| 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | /* |
| 116 | * Special treatment for the Vnode trace buffer. |
| 117 | */ |
| 118 | if (ktp->kt_nentries == ktrace_zentries) { |
| 119 | kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); |
| 120 | } else { |
| 121 | entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); |
| 122 | |
| 123 | kmem_free(ktp->kt_entries, entries_size); |
| 124 | } |
| 125 | |
| 126 | kmem_zone_free(ktrace_hdr_zone, ktp); |
| 127 | } |
| 128 | |
| 129 | |
| 130 | /* |
| 131 | * Enter the given values into the "next" entry in the trace buffer. |
| 132 | * kt_index is always the index of the next entry to be filled. |
| 133 | */ |
| 134 | void |
| 135 | ktrace_enter( |
| 136 | ktrace_t *ktp, |
| 137 | void *val0, |
| 138 | void *val1, |
| 139 | void *val2, |
| 140 | void *val3, |
| 141 | void *val4, |
| 142 | void *val5, |
| 143 | void *val6, |
| 144 | void *val7, |
| 145 | void *val8, |
| 146 | void *val9, |
| 147 | void *val10, |
| 148 | void *val11, |
| 149 | void *val12, |
| 150 | void *val13, |
| 151 | void *val14, |
| 152 | void *val15) |
| 153 | { |
Ingo Molnar | a9f6a0d | 2005-09-09 13:10:41 -0700 | [diff] [blame] | 154 | static DEFINE_SPINLOCK(wrap_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | unsigned long flags; |
| 156 | int index; |
| 157 | ktrace_entry_t *ktep; |
| 158 | |
| 159 | ASSERT(ktp != NULL); |
| 160 | |
| 161 | /* |
| 162 | * Grab an entry by pushing the index up to the next one. |
| 163 | */ |
| 164 | spin_lock_irqsave(&wrap_lock, flags); |
| 165 | index = ktp->kt_index; |
| 166 | if (++ktp->kt_index == ktp->kt_nentries) |
| 167 | ktp->kt_index = 0; |
| 168 | spin_unlock_irqrestore(&wrap_lock, flags); |
| 169 | |
| 170 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) |
| 171 | ktp->kt_rollover = 1; |
| 172 | |
| 173 | ASSERT((index >= 0) && (index < ktp->kt_nentries)); |
| 174 | |
| 175 | ktep = &(ktp->kt_entries[index]); |
| 176 | |
| 177 | ktep->val[0] = val0; |
| 178 | ktep->val[1] = val1; |
| 179 | ktep->val[2] = val2; |
| 180 | ktep->val[3] = val3; |
| 181 | ktep->val[4] = val4; |
| 182 | ktep->val[5] = val5; |
| 183 | ktep->val[6] = val6; |
| 184 | ktep->val[7] = val7; |
| 185 | ktep->val[8] = val8; |
| 186 | ktep->val[9] = val9; |
| 187 | ktep->val[10] = val10; |
| 188 | ktep->val[11] = val11; |
| 189 | ktep->val[12] = val12; |
| 190 | ktep->val[13] = val13; |
| 191 | ktep->val[14] = val14; |
| 192 | ktep->val[15] = val15; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Return the number of entries in the trace buffer. |
| 197 | */ |
| 198 | int |
| 199 | ktrace_nentries( |
| 200 | ktrace_t *ktp) |
| 201 | { |
| 202 | if (ktp == NULL) { |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * ktrace_first() |
| 211 | * |
| 212 | * This is used to find the start of the trace buffer. |
| 213 | * In conjunction with ktrace_next() it can be used to |
| 214 | * iterate through the entire trace buffer. This code does |
| 215 | * not do any locking because it is assumed that it is called |
| 216 | * from the debugger. |
| 217 | * |
| 218 | * The caller must pass in a pointer to a ktrace_snap |
| 219 | * structure in which we will keep some state used to |
| 220 | * iterate through the buffer. This state must not touched |
| 221 | * by any code outside of this module. |
| 222 | */ |
| 223 | ktrace_entry_t * |
| 224 | ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) |
| 225 | { |
| 226 | ktrace_entry_t *ktep; |
| 227 | int index; |
| 228 | int nentries; |
| 229 | |
| 230 | if (ktp->kt_rollover) |
| 231 | index = ktp->kt_index; |
| 232 | else |
| 233 | index = 0; |
| 234 | |
| 235 | ktsp->ks_start = index; |
| 236 | ktep = &(ktp->kt_entries[index]); |
| 237 | |
| 238 | nentries = ktrace_nentries(ktp); |
| 239 | index++; |
| 240 | if (index < nentries) { |
| 241 | ktsp->ks_index = index; |
| 242 | } else { |
| 243 | ktsp->ks_index = 0; |
| 244 | if (index > nentries) |
| 245 | ktep = NULL; |
| 246 | } |
| 247 | return ktep; |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * ktrace_next() |
| 252 | * |
| 253 | * This is used to iterate through the entries of the given |
| 254 | * trace buffer. The caller must pass in the ktrace_snap_t |
| 255 | * structure initialized by ktrace_first(). The return value |
| 256 | * will be either a pointer to the next ktrace_entry or NULL |
| 257 | * if all of the entries have been traversed. |
| 258 | */ |
| 259 | ktrace_entry_t * |
| 260 | ktrace_next( |
| 261 | ktrace_t *ktp, |
| 262 | ktrace_snap_t *ktsp) |
| 263 | { |
| 264 | int index; |
| 265 | ktrace_entry_t *ktep; |
| 266 | |
| 267 | index = ktsp->ks_index; |
| 268 | if (index == ktsp->ks_start) { |
| 269 | ktep = NULL; |
| 270 | } else { |
| 271 | ktep = &ktp->kt_entries[index]; |
| 272 | } |
| 273 | |
| 274 | index++; |
| 275 | if (index == ktrace_nentries(ktp)) { |
| 276 | ktsp->ks_index = 0; |
| 277 | } else { |
| 278 | ktsp->ks_index = index; |
| 279 | } |
| 280 | |
| 281 | return ktep; |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * ktrace_skip() |
| 286 | * |
| 287 | * Skip the next "count" entries and return the entry after that. |
| 288 | * Return NULL if this causes us to iterate past the beginning again. |
| 289 | */ |
| 290 | ktrace_entry_t * |
| 291 | ktrace_skip( |
| 292 | ktrace_t *ktp, |
| 293 | int count, |
| 294 | ktrace_snap_t *ktsp) |
| 295 | { |
| 296 | int index; |
| 297 | int new_index; |
| 298 | ktrace_entry_t *ktep; |
| 299 | int nentries = ktrace_nentries(ktp); |
| 300 | |
| 301 | index = ktsp->ks_index; |
| 302 | new_index = index + count; |
| 303 | while (new_index >= nentries) { |
| 304 | new_index -= nentries; |
| 305 | } |
| 306 | if (index == ktsp->ks_start) { |
| 307 | /* |
| 308 | * We've iterated around to the start, so we're done. |
| 309 | */ |
| 310 | ktep = NULL; |
| 311 | } else if ((new_index < index) && (index < ktsp->ks_index)) { |
| 312 | /* |
| 313 | * We've skipped past the start again, so we're done. |
| 314 | */ |
| 315 | ktep = NULL; |
| 316 | ktsp->ks_index = ktsp->ks_start; |
| 317 | } else { |
| 318 | ktep = &(ktp->kt_entries[new_index]); |
| 319 | new_index++; |
| 320 | if (new_index == nentries) { |
| 321 | ktsp->ks_index = 0; |
| 322 | } else { |
| 323 | ktsp->ks_index = new_index; |
| 324 | } |
| 325 | } |
| 326 | return ktep; |
| 327 | } |