Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1 | #ifndef BLKTRACE_H |
| 2 | #define BLKTRACE_H |
| 3 | |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 4 | #include <linux/blkdev.h> |
| 5 | #include <linux/relay.h> |
| 6 | |
| 7 | /* |
| 8 | * Trace categories |
| 9 | */ |
| 10 | enum blktrace_cat { |
| 11 | BLK_TC_READ = 1 << 0, /* reads */ |
| 12 | BLK_TC_WRITE = 1 << 1, /* writes */ |
| 13 | BLK_TC_BARRIER = 1 << 2, /* barrier */ |
Nathan Scott | fc0a75c | 2006-07-06 09:56:30 +0200 | [diff] [blame] | 14 | BLK_TC_SYNC = 1 << 3, /* sync IO */ |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 15 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ |
| 16 | BLK_TC_REQUEUE = 1 << 5, /* requeueing */ |
| 17 | BLK_TC_ISSUE = 1 << 6, /* issue */ |
| 18 | BLK_TC_COMPLETE = 1 << 7, /* completions */ |
| 19 | BLK_TC_FS = 1 << 8, /* fs requests */ |
| 20 | BLK_TC_PC = 1 << 9, /* pc requests */ |
| 21 | BLK_TC_NOTIFY = 1 << 10, /* special message */ |
Nathan Scott | 40359cc | 2006-07-06 10:03:28 +0200 | [diff] [blame] | 22 | BLK_TC_AHEAD = 1 << 11, /* readahead */ |
Jens Axboe | 7457e6e | 2006-07-23 02:12:01 +0200 | [diff] [blame] | 23 | BLK_TC_META = 1 << 12, /* metadata */ |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 24 | |
| 25 | BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ |
| 26 | }; |
| 27 | |
| 28 | #define BLK_TC_SHIFT (16) |
| 29 | #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) |
| 30 | |
| 31 | /* |
| 32 | * Basic trace actions |
| 33 | */ |
| 34 | enum blktrace_act { |
| 35 | __BLK_TA_QUEUE = 1, /* queued */ |
| 36 | __BLK_TA_BACKMERGE, /* back merged to existing rq */ |
| 37 | __BLK_TA_FRONTMERGE, /* front merge to existing rq */ |
| 38 | __BLK_TA_GETRQ, /* allocated new request */ |
| 39 | __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ |
| 40 | __BLK_TA_REQUEUE, /* request requeued */ |
| 41 | __BLK_TA_ISSUE, /* sent to driver */ |
| 42 | __BLK_TA_COMPLETE, /* completed by driver */ |
| 43 | __BLK_TA_PLUG, /* queue was plugged */ |
| 44 | __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ |
| 45 | __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ |
| 46 | __BLK_TA_INSERT, /* insert request */ |
| 47 | __BLK_TA_SPLIT, /* bio was split */ |
| 48 | __BLK_TA_BOUNCE, /* bio was bounced */ |
| 49 | __BLK_TA_REMAP, /* bio was remapped */ |
| 50 | }; |
| 51 | |
| 52 | /* |
Olaf Kirch | be1c634 | 2006-12-01 10:39:12 +0100 | [diff] [blame] | 53 | * Notify events. |
| 54 | */ |
| 55 | enum blktrace_notify { |
| 56 | __BLK_TN_PROCESS = 0, /* establish pid/name mapping */ |
| 57 | __BLK_TN_TIMESTAMP, /* include system clock */ |
Alan D. Brunelle | 9d5f09a | 2008-05-27 14:54:41 +0200 | [diff] [blame] | 58 | __BLK_TN_MESSAGE, /* Character string message */ |
Olaf Kirch | be1c634 | 2006-12-01 10:39:12 +0100 | [diff] [blame] | 59 | }; |
| 60 | |
| 61 | |
| 62 | /* |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 63 | * Trace actions in full. Additionally, read or write is masked |
| 64 | */ |
| 65 | #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 66 | #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 67 | #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 68 | #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 69 | #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 70 | #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) |
| 71 | #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) |
| 72 | #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) |
| 73 | #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 74 | #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 75 | #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 76 | #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 77 | #define BLK_TA_SPLIT (__BLK_TA_SPLIT) |
| 78 | #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) |
| 79 | #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) |
| 80 | |
Olaf Kirch | be1c634 | 2006-12-01 10:39:12 +0100 | [diff] [blame] | 81 | #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) |
| 82 | #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) |
Alan D. Brunelle | 9d5f09a | 2008-05-27 14:54:41 +0200 | [diff] [blame] | 83 | #define BLK_TN_MESSAGE (__BLK_TN_MESSAGE | BLK_TC_ACT(BLK_TC_NOTIFY)) |
Olaf Kirch | be1c634 | 2006-12-01 10:39:12 +0100 | [diff] [blame] | 84 | |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 85 | #define BLK_IO_TRACE_MAGIC 0x65617400 |
| 86 | #define BLK_IO_TRACE_VERSION 0x07 |
| 87 | |
| 88 | /* |
| 89 | * The trace itself |
| 90 | */ |
| 91 | struct blk_io_trace { |
| 92 | u32 magic; /* MAGIC << 8 | version */ |
| 93 | u32 sequence; /* event number */ |
| 94 | u64 time; /* in microseconds */ |
| 95 | u64 sector; /* disk offset */ |
| 96 | u32 bytes; /* transfer length */ |
| 97 | u32 action; /* what happened */ |
| 98 | u32 pid; /* who did it */ |
| 99 | u32 device; /* device number */ |
| 100 | u32 cpu; /* on what cpu did it happen */ |
| 101 | u16 error; /* completion error */ |
| 102 | u16 pdu_len; /* length of data after this trace */ |
| 103 | }; |
| 104 | |
| 105 | /* |
| 106 | * The remap event |
| 107 | */ |
| 108 | struct blk_io_trace_remap { |
Alexey Dobriyan | fda151d | 2006-06-05 12:09:50 +0200 | [diff] [blame] | 109 | __be32 device; |
Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 110 | __be32 device_from; |
Alexey Dobriyan | fda151d | 2006-06-05 12:09:50 +0200 | [diff] [blame] | 111 | __be64 sector; |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | enum { |
| 115 | Blktrace_setup = 1, |
| 116 | Blktrace_running, |
| 117 | Blktrace_stopped, |
| 118 | }; |
| 119 | |
| 120 | struct blk_trace { |
| 121 | int trace_state; |
| 122 | struct rchan *rchan; |
| 123 | unsigned long *sequence; |
Jens Axboe | 6456591 | 2008-05-28 14:45:33 +0200 | [diff] [blame] | 124 | unsigned char *msg_data; |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 125 | u16 act_mask; |
| 126 | u64 start_lba; |
| 127 | u64 end_lba; |
| 128 | u32 pid; |
| 129 | u32 dev; |
| 130 | struct dentry *dir; |
| 131 | struct dentry *dropped_file; |
| 132 | atomic_t dropped; |
| 133 | }; |
| 134 | |
| 135 | /* |
| 136 | * User setup structure passed with BLKTRACESTART |
| 137 | */ |
| 138 | struct blk_user_trace_setup { |
| 139 | char name[BDEVNAME_SIZE]; /* output */ |
| 140 | u16 act_mask; /* input */ |
| 141 | u32 buf_size; /* input */ |
| 142 | u32 buf_nr; /* input */ |
| 143 | u64 start_lba; |
| 144 | u64 end_lba; |
| 145 | u32 pid; |
| 146 | }; |
| 147 | |
Arnd Bergmann | 171044d4 | 2007-10-09 13:23:53 +0200 | [diff] [blame] | 148 | #ifdef __KERNEL__ |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 149 | #if defined(CONFIG_BLK_DEV_IO_TRACE) |
| 150 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 151 | extern void blk_trace_shutdown(struct request_queue *); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 152 | extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); |
Arnd Bergmann | 171044d4 | 2007-10-09 13:23:53 +0200 | [diff] [blame] | 153 | extern int do_blk_trace_setup(struct request_queue *q, |
Christof Schmitt | 6da127a | 2008-01-11 10:09:43 +0100 | [diff] [blame] | 154 | char *name, dev_t dev, struct blk_user_trace_setup *buts); |
Alan D. Brunelle | 9d5f09a | 2008-05-27 14:54:41 +0200 | [diff] [blame] | 155 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); |
Arnd Bergmann | 171044d4 | 2007-10-09 13:23:53 +0200 | [diff] [blame] | 156 | |
Alan D. Brunelle | 9d5f09a | 2008-05-27 14:54:41 +0200 | [diff] [blame] | 157 | /** |
| 158 | * blk_add_trace_msg - Add a (simple) message to the blktrace stream |
| 159 | * @q: queue the io is for |
| 160 | * @fmt: format to print message in |
| 161 | * args... Variable argument list for format |
| 162 | * |
| 163 | * Description: |
| 164 | * Records a (simple) message onto the blktrace stream. |
| 165 | * |
| 166 | * NOTE: BLK_TN_MAX_MSG characters are output at most. |
| 167 | * NOTE: Can not use 'static inline' due to presence of var args... |
| 168 | * |
| 169 | **/ |
| 170 | #define blk_add_trace_msg(q, fmt, ...) \ |
| 171 | do { \ |
| 172 | struct blk_trace *bt = (q)->blk_trace; \ |
| 173 | if (unlikely(bt)) \ |
| 174 | __trace_note_message(bt, fmt, ##__VA_ARGS__); \ |
| 175 | } while (0) |
Jens Axboe | 6456591 | 2008-05-28 14:45:33 +0200 | [diff] [blame] | 176 | #define BLK_TN_MAX_MSG 128 |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 177 | |
| 178 | /** |
| 179 | * blk_add_trace_rq - Add a trace for a request oriented action |
| 180 | * @q: queue the io is for |
| 181 | * @rq: the source request |
| 182 | * @what: the action |
| 183 | * |
| 184 | * Description: |
| 185 | * Records an action against a request. Will log the bio offset + size. |
| 186 | * |
| 187 | **/ |
| 188 | static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
| 189 | u32 what) |
| 190 | { |
| 191 | struct blk_trace *bt = q->blk_trace; |
Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 192 | int rw = rq->cmd_flags & 0x03; |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 193 | |
| 194 | if (likely(!bt)) |
| 195 | return; |
| 196 | |
| 197 | if (blk_pc_request(rq)) { |
| 198 | what |= BLK_TC_ACT(BLK_TC_PC); |
| 199 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); |
| 200 | } else { |
| 201 | what |= BLK_TC_ACT(BLK_TC_FS); |
| 202 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | /** |
| 207 | * blk_add_trace_bio - Add a trace for a bio oriented action |
| 208 | * @q: queue the io is for |
| 209 | * @bio: the source bio |
| 210 | * @what: the action |
| 211 | * |
| 212 | * Description: |
| 213 | * Records an action against a bio. Will log the bio offset + size. |
| 214 | * |
| 215 | **/ |
| 216 | static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, |
| 217 | u32 what) |
| 218 | { |
| 219 | struct blk_trace *bt = q->blk_trace; |
| 220 | |
| 221 | if (likely(!bt)) |
| 222 | return; |
| 223 | |
| 224 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); |
| 225 | } |
| 226 | |
| 227 | /** |
| 228 | * blk_add_trace_generic - Add a trace for a generic action |
| 229 | * @q: queue the io is for |
| 230 | * @bio: the source bio |
| 231 | * @rw: the data direction |
| 232 | * @what: the action |
| 233 | * |
| 234 | * Description: |
| 235 | * Records a simple trace |
| 236 | * |
| 237 | **/ |
| 238 | static inline void blk_add_trace_generic(struct request_queue *q, |
| 239 | struct bio *bio, int rw, u32 what) |
| 240 | { |
| 241 | struct blk_trace *bt = q->blk_trace; |
| 242 | |
| 243 | if (likely(!bt)) |
| 244 | return; |
| 245 | |
| 246 | if (bio) |
| 247 | blk_add_trace_bio(q, bio, what); |
| 248 | else |
| 249 | __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); |
| 250 | } |
| 251 | |
| 252 | /** |
| 253 | * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload |
| 254 | * @q: queue the io is for |
| 255 | * @what: the action |
| 256 | * @bio: the source bio |
| 257 | * @pdu: the integer payload |
| 258 | * |
| 259 | * Description: |
| 260 | * Adds a trace with some integer payload. This might be an unplug |
| 261 | * option given as the action, with the depth at unplug time given |
| 262 | * as the payload |
| 263 | * |
| 264 | **/ |
| 265 | static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, |
| 266 | struct bio *bio, unsigned int pdu) |
| 267 | { |
| 268 | struct blk_trace *bt = q->blk_trace; |
Alexey Dobriyan | fda151d | 2006-06-05 12:09:50 +0200 | [diff] [blame] | 269 | __be64 rpdu = cpu_to_be64(pdu); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 270 | |
| 271 | if (likely(!bt)) |
| 272 | return; |
| 273 | |
| 274 | if (bio) |
| 275 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); |
| 276 | else |
| 277 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); |
| 278 | } |
| 279 | |
| 280 | /** |
| 281 | * blk_add_trace_remap - Add a trace for a remap operation |
| 282 | * @q: queue the io is for |
| 283 | * @bio: the source bio |
| 284 | * @dev: target device |
| 285 | * @from: source sector |
| 286 | * @to: target sector |
| 287 | * |
| 288 | * Description: |
| 289 | * Device mapper or raid target sometimes need to split a bio because |
| 290 | * it spans a stripe (or similar). Add a trace for that action. |
| 291 | * |
| 292 | **/ |
| 293 | static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, |
| 294 | dev_t dev, sector_t from, sector_t to) |
| 295 | { |
| 296 | struct blk_trace *bt = q->blk_trace; |
| 297 | struct blk_io_trace_remap r; |
| 298 | |
| 299 | if (likely(!bt)) |
| 300 | return; |
| 301 | |
| 302 | r.device = cpu_to_be32(dev); |
Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 303 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 304 | r.sector = cpu_to_be64(to); |
| 305 | |
| 306 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); |
| 307 | } |
| 308 | |
Martin K. Petersen | 7da975a | 2008-01-29 19:12:06 +0100 | [diff] [blame] | 309 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
Christof Schmitt | 6da127a | 2008-01-11 10:09:43 +0100 | [diff] [blame] | 310 | char __user *arg); |
Martin K. Petersen | 7da975a | 2008-01-29 19:12:06 +0100 | [diff] [blame] | 311 | extern int blk_trace_startstop(struct request_queue *q, int start); |
| 312 | extern int blk_trace_remove(struct request_queue *q); |
Christof Schmitt | 6da127a | 2008-01-11 10:09:43 +0100 | [diff] [blame] | 313 | |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 314 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
| 315 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
| 316 | #define blk_trace_shutdown(q) do { } while (0) |
| 317 | #define blk_add_trace_rq(q, rq, what) do { } while (0) |
| 318 | #define blk_add_trace_bio(q, rq, what) do { } while (0) |
| 319 | #define blk_add_trace_generic(q, rq, rw, what) do { } while (0) |
| 320 | #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) |
| 321 | #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) |
Christof Schmitt | 6da127a | 2008-01-11 10:09:43 +0100 | [diff] [blame] | 322 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) |
| 323 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) |
| 324 | #define blk_trace_startstop(q, start) (-ENOTTY) |
| 325 | #define blk_trace_remove(q) (-ENOTTY) |
Alan D. Brunelle | 9d5f09a | 2008-05-27 14:54:41 +0200 | [diff] [blame] | 326 | #define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
| 327 | |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 328 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
Arnd Bergmann | 171044d4 | 2007-10-09 13:23:53 +0200 | [diff] [blame] | 329 | #endif /* __KERNEL__ */ |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 330 | #endif |