blob: d08569fd40ebe9f217489bb843383f20bf6fa795 [file] [log] [blame]
Jens Axboeac9b9102011-10-01 15:01:08 -04001/*
2 * Code related to writing an iolog of what a thread is doing, and to
3 * later read that back and replay
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <libgen.h>
8#include <assert.h>
Jens Axboebac4af12014-07-03 13:42:28 -06009#include <sys/types.h>
10#include <sys/stat.h>
11#include <unistd.h>
Jens Axboe38a812d2014-07-03 09:10:39 -060012#ifdef CONFIG_ZLIB
13#include <zlib.h>
14#endif
15
Jens Axboeac9b9102011-10-01 15:01:08 -040016#include "flist.h"
17#include "fio.h"
18#include "verify.h"
19#include "trim.h"
Jens Axboe243bfe12014-04-02 15:46:58 -060020#include "filelock.h"
Jens Axboe65f93ca2014-07-03 14:16:25 -060021#include "lib/tp.h"
Jens Axboeac9b9102011-10-01 15:01:08 -040022
23static const char iolog_ver2[] = "fio version 2 iolog";
24
Jens Axboe38a812d2014-07-03 09:10:39 -060025#ifdef CONFIG_ZLIB
26
27struct iolog_compress {
28 struct flist_head list;
29 void *buf;
30 size_t len;
31 unsigned int seq;
Jens Axboebac4af12014-07-03 13:42:28 -060032 int nofree;
Jens Axboe38a812d2014-07-03 09:10:39 -060033};
34
35#define GZ_CHUNK 131072
36
37static struct iolog_compress *get_new_chunk(unsigned int seq)
38{
39 struct iolog_compress *c;
40
41 c = malloc(sizeof(*c));
42 INIT_FLIST_HEAD(&c->list);
43 c->buf = malloc(GZ_CHUNK);
44 c->len = 0;
45 c->seq = seq;
Jens Axboebac4af12014-07-03 13:42:28 -060046 c->nofree = 0;
Jens Axboe38a812d2014-07-03 09:10:39 -060047 return c;
48}
49
50static void free_chunk(struct iolog_compress *ic)
51{
Jens Axboebac4af12014-07-03 13:42:28 -060052 if (!ic->nofree) {
53 free(ic->buf);
54 free(ic);
55 }
Jens Axboe38a812d2014-07-03 09:10:39 -060056}
57
58#endif
59
Jens Axboeac9b9102011-10-01 15:01:08 -040060void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
61{
62 flist_add_tail(&ipo->list, &td->io_log_list);
63 td->total_io_size += ipo->len;
64}
65
66void log_io_u(struct thread_data *td, struct io_u *io_u)
67{
68 const char *act[] = { "read", "write", "sync", "datasync",
69 "sync_file_range", "wait", "trim" };
70
71 assert(io_u->ddir <= 6);
72
73 if (!td->o.write_iolog_file)
74 return;
75
76 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
77 act[io_u->ddir], io_u->offset,
78 io_u->buflen);
79}
80
81void log_file(struct thread_data *td, struct fio_file *f,
82 enum file_log_act what)
83{
84 const char *act[] = { "add", "open", "close" };
85
86 assert(what < 3);
87
88 if (!td->o.write_iolog_file)
89 return;
90
91
92 /*
93 * this happens on the pre-open/close done before the job starts
94 */
95 if (!td->iolog_f)
96 return;
97
98 fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
99}
100
101static void iolog_delay(struct thread_data *td, unsigned long delay)
102{
103 unsigned long usec = utime_since_now(&td->last_issue);
Jens Axboe30b18672014-04-11 00:03:32 +0200104 unsigned long this_delay;
Jens Axboeac9b9102011-10-01 15:01:08 -0400105
106 if (delay < usec)
107 return;
108
109 delay -= usec;
110
111 /*
112 * less than 100 usec delay, just regard it as noise
113 */
114 if (delay < 100)
115 return;
116
Jens Axboe30b18672014-04-11 00:03:32 +0200117 while (delay && !td->terminate) {
118 this_delay = delay;
119 if (this_delay > 500000)
120 this_delay = 500000;
121
122 usec_sleep(td, this_delay);
123 delay -= this_delay;
124 }
Jens Axboeac9b9102011-10-01 15:01:08 -0400125}
126
127static int ipo_special(struct thread_data *td, struct io_piece *ipo)
128{
129 struct fio_file *f;
130 int ret;
131
132 /*
133 * Not a special ipo
134 */
135 if (ipo->ddir != DDIR_INVAL)
136 return 0;
137
138 f = td->files[ipo->fileno];
139
140 switch (ipo->file_action) {
141 case FIO_LOG_OPEN_FILE:
142 ret = td_io_open_file(td, f);
143 if (!ret)
144 break;
145 td_verror(td, ret, "iolog open file");
146 return -1;
147 case FIO_LOG_CLOSE_FILE:
148 td_io_close_file(td, f);
149 break;
150 case FIO_LOG_UNLINK_FILE:
151 unlink(f->file_name);
152 break;
153 default:
154 log_err("fio: bad file action %d\n", ipo->file_action);
155 break;
156 }
157
158 return 1;
159}
160
161int read_iolog_get(struct thread_data *td, struct io_u *io_u)
162{
163 struct io_piece *ipo;
164 unsigned long elapsed;
Jens Axboe3c3ed072012-03-27 09:12:39 +0200165
Jens Axboeac9b9102011-10-01 15:01:08 -0400166 while (!flist_empty(&td->io_log_list)) {
167 int ret;
168
169 ipo = flist_entry(td->io_log_list.next, struct io_piece, list);
170 flist_del(&ipo->list);
171 remove_trim_entry(td, ipo);
172
173 ret = ipo_special(td, ipo);
174 if (ret < 0) {
175 free(ipo);
176 break;
177 } else if (ret > 0) {
178 free(ipo);
179 continue;
180 }
181
182 io_u->ddir = ipo->ddir;
183 if (ipo->ddir != DDIR_WAIT) {
184 io_u->offset = ipo->offset;
185 io_u->buflen = ipo->len;
186 io_u->file = td->files[ipo->fileno];
187 get_file(io_u->file);
188 dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
189 io_u->buflen, io_u->file->file_name);
190 if (ipo->delay)
191 iolog_delay(td, ipo->delay);
192 } else {
193 elapsed = mtime_since_genesis();
194 if (ipo->delay > elapsed)
195 usec_sleep(td, (ipo->delay - elapsed) * 1000);
Jens Axboeac9b9102011-10-01 15:01:08 -0400196 }
197
198 free(ipo);
Jens Axboe3c3ed072012-03-27 09:12:39 +0200199
Jens Axboeac9b9102011-10-01 15:01:08 -0400200 if (io_u->ddir != DDIR_WAIT)
201 return 0;
202 }
203
204 td->done = 1;
205 return 1;
206}
207
208void prune_io_piece_log(struct thread_data *td)
209{
210 struct io_piece *ipo;
211 struct rb_node *n;
212
213 while ((n = rb_first(&td->io_hist_tree)) != NULL) {
214 ipo = rb_entry(n, struct io_piece, rb_node);
215 rb_erase(n, &td->io_hist_tree);
216 remove_trim_entry(td, ipo);
217 td->io_hist_len--;
218 free(ipo);
219 }
220
221 while (!flist_empty(&td->io_hist_list)) {
222 ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
223 flist_del(&ipo->list);
224 remove_trim_entry(td, ipo);
225 td->io_hist_len--;
226 free(ipo);
227 }
228}
229
230/*
231 * log a successful write, so we can unwind the log for verify
232 */
233void log_io_piece(struct thread_data *td, struct io_u *io_u)
234{
235 struct rb_node **p, *parent;
236 struct io_piece *ipo, *__ipo;
237
238 ipo = malloc(sizeof(struct io_piece));
239 init_ipo(ipo);
240 ipo->file = io_u->file;
241 ipo->offset = io_u->offset;
242 ipo->len = io_u->buflen;
Juan Casseda0a7bd2013-09-17 14:06:12 -0700243 ipo->numberio = io_u->numberio;
Jens Axboef9401282014-02-06 12:17:37 -0700244 ipo->flags = IP_F_IN_FLIGHT;
245
246 io_u->ipo = ipo;
Jens Axboeac9b9102011-10-01 15:01:08 -0400247
248 if (io_u_should_trim(td, io_u)) {
249 flist_add_tail(&ipo->trim_list, &td->trim_list);
250 td->trim_entries++;
251 }
252
253 /*
254 * We don't need to sort the entries, if:
255 *
256 * Sequential writes, or
257 * Random writes that lay out the file as it goes along
258 *
259 * For both these cases, just reading back data in the order we
260 * wrote it out is the fastest.
261 *
262 * One exception is if we don't have a random map AND we are doing
263 * verifies, in that case we need to check for duplicate blocks and
264 * drop the old one, which we rely on the rb insert/lookup for
265 * handling.
266 */
Puthikorn Voravootivatc4b61172014-02-05 10:17:11 -0700267 if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
Jens Axboeac9b9102011-10-01 15:01:08 -0400268 (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
269 INIT_FLIST_HEAD(&ipo->list);
270 flist_add_tail(&ipo->list, &td->io_hist_list);
271 ipo->flags |= IP_F_ONLIST;
272 td->io_hist_len++;
273 return;
274 }
275
276 RB_CLEAR_NODE(&ipo->rb_node);
277
278 /*
279 * Sort the entry into the verification list
280 */
281restart:
282 p = &td->io_hist_tree.rb_node;
283 parent = NULL;
284 while (*p) {
285 parent = *p;
286
287 __ipo = rb_entry(parent, struct io_piece, rb_node);
288 if (ipo->file < __ipo->file)
289 p = &(*p)->rb_left;
290 else if (ipo->file > __ipo->file)
291 p = &(*p)->rb_right;
292 else if (ipo->offset < __ipo->offset)
293 p = &(*p)->rb_left;
294 else if (ipo->offset > __ipo->offset)
295 p = &(*p)->rb_right;
296 else {
Jens Axboe885ac622012-04-04 14:11:58 -0600297 dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
298 __ipo->offset, __ipo->len,
299 ipo->offset, ipo->len);
Jens Axboeac9b9102011-10-01 15:01:08 -0400300 td->io_hist_len--;
301 rb_erase(parent, &td->io_hist_tree);
302 remove_trim_entry(td, __ipo);
303 free(__ipo);
304 goto restart;
305 }
306 }
307
308 rb_link_node(&ipo->rb_node, parent, p);
309 rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
310 ipo->flags |= IP_F_ONRB;
311 td->io_hist_len++;
312}
313
Jens Axboe890b6652014-05-06 19:06:51 -0600314void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
315{
316 struct io_piece *ipo = io_u->ipo;
317
318 if (!ipo)
319 return;
320
321 if (ipo->flags & IP_F_ONRB)
322 rb_erase(&ipo->rb_node, &td->io_hist_tree);
323 else if (ipo->flags & IP_F_ONLIST)
324 flist_del(&ipo->list);
325
326 free(ipo);
327 io_u->ipo = NULL;
328 td->io_hist_len--;
329}
330
331void trim_io_piece(struct thread_data *td, struct io_u *io_u)
332{
333 struct io_piece *ipo = io_u->ipo;
334
335 if (!ipo)
336 return;
337
338 ipo->len = io_u->xfer_buflen - io_u->resid;
339}
340
Jens Axboeac9b9102011-10-01 15:01:08 -0400341void write_iolog_close(struct thread_data *td)
342{
343 fflush(td->iolog_f);
344 fclose(td->iolog_f);
345 free(td->iolog_buf);
346 td->iolog_f = NULL;
347 td->iolog_buf = NULL;
348}
349
350/*
351 * Read version 2 iolog data. It is enhanced to include per-file logging,
352 * syncs, etc.
353 */
354static int read_iolog2(struct thread_data *td, FILE *f)
355{
356 unsigned long long offset;
357 unsigned int bytes;
358 int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
359 char *fname, *act;
360 char *str, *p;
361 enum fio_ddir rw;
362
363 free_release_files(td);
364
365 /*
366 * Read in the read iolog and store it, reuse the infrastructure
367 * for doing verifications.
368 */
369 str = malloc(4096);
370 fname = malloc(256+16);
371 act = malloc(256+16);
372
373 reads = writes = waits = 0;
374 while ((p = fgets(str, 4096, f)) != NULL) {
375 struct io_piece *ipo;
376 int r;
377
378 r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset,
379 &bytes);
380 if (r == 4) {
381 /*
382 * Check action first
383 */
384 if (!strcmp(act, "wait"))
385 rw = DDIR_WAIT;
386 else if (!strcmp(act, "read"))
387 rw = DDIR_READ;
388 else if (!strcmp(act, "write"))
389 rw = DDIR_WRITE;
390 else if (!strcmp(act, "sync"))
391 rw = DDIR_SYNC;
392 else if (!strcmp(act, "datasync"))
393 rw = DDIR_DATASYNC;
394 else if (!strcmp(act, "trim"))
395 rw = DDIR_TRIM;
396 else {
397 log_err("fio: bad iolog file action: %s\n",
398 act);
399 continue;
400 }
Nikolaus Jeremic033ace12013-02-18 19:44:41 +0100401 fileno = get_fileno(td, fname);
Jens Axboeac9b9102011-10-01 15:01:08 -0400402 } else if (r == 2) {
403 rw = DDIR_INVAL;
404 if (!strcmp(act, "add")) {
Jens Axboe5903e7b2014-02-26 13:42:13 -0800405 fileno = add_file(td, fname, 0, 1);
Jens Axboeac9b9102011-10-01 15:01:08 -0400406 file_action = FIO_LOG_ADD_FILE;
407 continue;
408 } else if (!strcmp(act, "open")) {
409 fileno = get_fileno(td, fname);
410 file_action = FIO_LOG_OPEN_FILE;
411 } else if (!strcmp(act, "close")) {
412 fileno = get_fileno(td, fname);
413 file_action = FIO_LOG_CLOSE_FILE;
414 } else {
415 log_err("fio: bad iolog file action: %s\n",
416 act);
417 continue;
418 }
419 } else {
420 log_err("bad iolog2: %s", p);
421 continue;
422 }
423
424 if (rw == DDIR_READ)
425 reads++;
426 else if (rw == DDIR_WRITE) {
427 /*
428 * Don't add a write for ro mode
429 */
430 if (read_only)
431 continue;
432 writes++;
433 } else if (rw == DDIR_WAIT) {
434 waits++;
435 } else if (rw == DDIR_INVAL) {
436 } else if (!ddir_sync(rw)) {
437 log_err("bad ddir: %d\n", rw);
438 continue;
439 }
440
441 /*
442 * Make note of file
443 */
444 ipo = malloc(sizeof(*ipo));
445 init_ipo(ipo);
446 ipo->ddir = rw;
447 if (rw == DDIR_WAIT) {
448 ipo->delay = offset;
449 } else {
450 ipo->offset = offset;
451 ipo->len = bytes;
Jens Axboe42793d92014-04-08 21:18:37 -0600452 if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
Jens Axboeac9b9102011-10-01 15:01:08 -0400453 td->o.max_bs[rw] = bytes;
454 ipo->fileno = fileno;
455 ipo->file_action = file_action;
Jens Axboeecf53692014-06-09 19:56:51 -0600456 td->o.size += bytes;
Jens Axboeac9b9102011-10-01 15:01:08 -0400457 }
Jens Axboe3c3ed072012-03-27 09:12:39 +0200458
Jens Axboeac9b9102011-10-01 15:01:08 -0400459 queue_io_piece(td, ipo);
460 }
461
462 free(str);
463 free(act);
464 free(fname);
465
466 if (writes && read_only) {
467 log_err("fio: <%s> skips replay of %d writes due to"
468 " read-only\n", td->o.name, writes);
469 writes = 0;
470 }
471
472 if (!reads && !writes && !waits)
473 return 1;
474 else if (reads && !writes)
475 td->o.td_ddir = TD_DDIR_READ;
476 else if (!reads && writes)
477 td->o.td_ddir = TD_DDIR_WRITE;
478 else
479 td->o.td_ddir = TD_DDIR_RW;
480
481 return 0;
482}
483
484/*
485 * open iolog, check version, and call appropriate parser
486 */
487static int init_iolog_read(struct thread_data *td)
488{
489 char buffer[256], *p;
490 FILE *f;
491 int ret;
492
493 f = fopen(td->o.read_iolog_file, "r");
494 if (!f) {
495 perror("fopen read iolog");
496 return 1;
497 }
498
499 p = fgets(buffer, sizeof(buffer), f);
500 if (!p) {
501 td_verror(td, errno, "iolog read");
502 log_err("fio: unable to read iolog\n");
503 fclose(f);
504 return 1;
505 }
506
507 /*
508 * version 2 of the iolog stores a specific string as the
509 * first line, check for that
510 */
511 if (!strncmp(iolog_ver2, buffer, strlen(iolog_ver2)))
512 ret = read_iolog2(td, f);
513 else {
514 log_err("fio: iolog version 1 is no longer supported\n");
515 ret = 1;
516 }
517
518 fclose(f);
519 return ret;
520}
521
522/*
523 * Set up a log for storing io patterns.
524 */
525static int init_iolog_write(struct thread_data *td)
526{
527 struct fio_file *ff;
528 FILE *f;
529 unsigned int i;
530
531 f = fopen(td->o.write_iolog_file, "a");
532 if (!f) {
533 perror("fopen write iolog");
534 return 1;
535 }
536
537 /*
538 * That's it for writing, setup a log buffer and we're done.
539 */
540 td->iolog_f = f;
541 td->iolog_buf = malloc(8192);
542 setvbuf(f, td->iolog_buf, _IOFBF, 8192);
543
544 /*
545 * write our version line
546 */
547 if (fprintf(f, "%s\n", iolog_ver2) < 0) {
548 perror("iolog init\n");
549 return 1;
550 }
551
552 /*
553 * add all known files
554 */
555 for_each_file(td, ff, i)
556 log_file(td, ff, FIO_LOG_ADD_FILE);
557
558 return 0;
559}
560
561int init_iolog(struct thread_data *td)
562{
563 int ret = 0;
564
565 if (td->o.read_iolog_file) {
Jens Axboed95b34a2013-11-21 09:55:49 -0700566 int need_swap;
567
Jens Axboeac9b9102011-10-01 15:01:08 -0400568 /*
569 * Check if it's a blktrace file and load that if possible.
570 * Otherwise assume it's a normal log file and load that.
571 */
Jens Axboed95b34a2013-11-21 09:55:49 -0700572 if (is_blktrace(td->o.read_iolog_file, &need_swap))
573 ret = load_blktrace(td, td->o.read_iolog_file, need_swap);
Jens Axboeac9b9102011-10-01 15:01:08 -0400574 else
575 ret = init_iolog_read(td);
576 } else if (td->o.write_iolog_file)
577 ret = init_iolog_write(td);
578
Jens Axboef01b34a2013-11-21 11:13:12 -0700579 if (ret)
580 td_verror(td, EINVAL, "failed initializing iolog");
581
Jens Axboeac9b9102011-10-01 15:01:08 -0400582 return ret;
583}
584
Jens Axboe38a812d2014-07-03 09:10:39 -0600585void setup_log(struct io_log **log, struct log_params *p,
586 const char *filename)
Jens Axboeac9b9102011-10-01 15:01:08 -0400587{
588 struct io_log *l = malloc(sizeof(*l));
589
Jens Axboeb8bc8cb2011-12-01 09:04:31 +0100590 memset(l, 0, sizeof(*l));
Jens Axboeac9b9102011-10-01 15:01:08 -0400591 l->nr_samples = 0;
592 l->max_samples = 1024;
Jens Axboe38a812d2014-07-03 09:10:39 -0600593 l->log_type = p->log_type;
594 l->log_offset = p->log_offset;
595 l->log_gz = p->log_gz;
Jens Axboebac4af12014-07-03 13:42:28 -0600596 l->log_gz_store = p->log_gz_store;
Jens Axboeccefd5f2014-06-30 20:59:03 -0600597 l->log = malloc(l->max_samples * log_entry_sz(l));
Jens Axboe38a812d2014-07-03 09:10:39 -0600598 l->avg_msec = p->avg_msec;
Jens Axboe987c4f42014-07-01 16:29:12 -0600599 l->filename = strdup(filename);
Jens Axboe38a812d2014-07-03 09:10:39 -0600600 l->td = p->td;
601
Jens Axboebac4af12014-07-03 13:42:28 -0600602 if (l->log_offset)
603 l->log_ddir_mask = 0x80000000;
604
Jens Axboe38a812d2014-07-03 09:10:39 -0600605 INIT_FLIST_HEAD(&l->chunk_list);
606
607 if (l->log_gz && !p->td)
608 l->log_gz = 0;
609 else if (l->log_gz) {
610 pthread_mutex_init(&l->chunk_lock, NULL);
611 p->td->flags |= TD_F_COMPRESS_LOG;
612 }
613
Jens Axboeac9b9102011-10-01 15:01:08 -0400614 *log = l;
615}
616
Jens Axboe2e802282014-04-02 21:47:27 -0600617#ifdef CONFIG_SETVBUF
618static void *set_file_buffer(FILE *f)
619{
620 size_t size = 1048576;
621 void *buf;
622
623 buf = malloc(size);
624 setvbuf(f, buf, _IOFBF, size);
625 return buf;
626}
627
628static void clear_file_buffer(void *buf)
629{
630 free(buf);
631}
632#else
633static void *set_file_buffer(FILE *f)
634{
635 return NULL;
636}
637
638static void clear_file_buffer(void *buf)
639{
640}
641#endif
642
Jens Axboe633d8252014-07-02 13:36:34 -0600643void free_log(struct io_log *log)
Jens Axboe987c4f42014-07-01 16:29:12 -0600644{
645 free(log->log);
646 free(log->filename);
647 free(log);
648}
649
Jens Axboebac4af12014-07-03 13:42:28 -0600650static void flush_samples(FILE *f, void *samples, uint64_t sample_size)
Jens Axboeac9b9102011-10-01 15:01:08 -0400651{
Jens Axboebac4af12014-07-03 13:42:28 -0600652 struct io_sample *s;
653 int log_offset;
654 uint64_t i, nr_samples;
655
656 if (!sample_size)
657 return;
658
659 s = __get_sample(samples, 0, 0);
660 if (s->__ddir & 0x80000000)
661 log_offset = 1;
662 else
663 log_offset = 0;
664
665 nr_samples = sample_size / __log_entry_sz(log_offset);
Jens Axboeac9b9102011-10-01 15:01:08 -0400666
Jens Axboe38a812d2014-07-03 09:10:39 -0600667 for (i = 0; i < nr_samples; i++) {
Jens Axboebac4af12014-07-03 13:42:28 -0600668 s = __get_sample(samples, log_offset, i);
Jens Axboeac9b9102011-10-01 15:01:08 -0400669
Jens Axboe38a812d2014-07-03 09:10:39 -0600670 if (!log_offset) {
Jens Axboeccefd5f2014-06-30 20:59:03 -0600671 fprintf(f, "%lu, %lu, %u, %u\n",
672 (unsigned long) s->time,
673 (unsigned long) s->val,
Jens Axboebac4af12014-07-03 13:42:28 -0600674 io_sample_ddir(s), s->bs);
Jens Axboeccefd5f2014-06-30 20:59:03 -0600675 } else {
676 struct io_sample_offset *so = (void *) s;
677
678 fprintf(f, "%lu, %lu, %u, %u, %llu\n",
679 (unsigned long) s->time,
680 (unsigned long) s->val,
Jens Axboebac4af12014-07-03 13:42:28 -0600681 io_sample_ddir(s), s->bs,
Jens Axboeccefd5f2014-06-30 20:59:03 -0600682 (unsigned long long) so->offset);
683 }
Jens Axboeac9b9102011-10-01 15:01:08 -0400684 }
Jens Axboe38a812d2014-07-03 09:10:39 -0600685}
686
687#ifdef CONFIG_ZLIB
Jens Axboebac4af12014-07-03 13:42:28 -0600688static int z_stream_init(z_stream *stream, int gz_hdr)
Jens Axboe38a812d2014-07-03 09:10:39 -0600689{
Jens Axboebac4af12014-07-03 13:42:28 -0600690 int wbits = 15;
691
Jens Axboe38a812d2014-07-03 09:10:39 -0600692 stream->zalloc = Z_NULL;
693 stream->zfree = Z_NULL;
694 stream->opaque = Z_NULL;
695 stream->next_in = Z_NULL;
696
Jens Axboebac4af12014-07-03 13:42:28 -0600697 if (gz_hdr)
698 wbits += 32;
699
700 if (inflateInit2(stream, wbits) != Z_OK)
Jens Axboe38a812d2014-07-03 09:10:39 -0600701 return 1;
702
703 return 0;
704}
705
706struct flush_chunk_iter {
707 unsigned int seq;
708 void *buf;
709 size_t buf_size;
710 size_t buf_used;
711 size_t chunk_sz;
712};
713
Jens Axboebac4af12014-07-03 13:42:28 -0600714static void finish_chunk(z_stream *stream, FILE *f,
Jens Axboe38a812d2014-07-03 09:10:39 -0600715 struct flush_chunk_iter *iter)
716{
Jens Axboe38a812d2014-07-03 09:10:39 -0600717 int ret;
718
719 ret = inflateEnd(stream);
720 if (ret != Z_OK)
721 log_err("fio: failed to end log inflation (%d)\n", ret);
722
Jens Axboebac4af12014-07-03 13:42:28 -0600723 flush_samples(f, iter->buf, iter->buf_used);
Jens Axboe38a812d2014-07-03 09:10:39 -0600724 free(iter->buf);
725 iter->buf = NULL;
726 iter->buf_size = iter->buf_used = 0;
727}
728
Jens Axboebac4af12014-07-03 13:42:28 -0600729static int flush_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
Jens Axboe38a812d2014-07-03 09:10:39 -0600730 z_stream *stream, struct flush_chunk_iter *iter)
731{
732 if (ic->seq != iter->seq) {
733 if (iter->seq)
Jens Axboebac4af12014-07-03 13:42:28 -0600734 finish_chunk(stream, f, iter);
Jens Axboe38a812d2014-07-03 09:10:39 -0600735
Jens Axboebac4af12014-07-03 13:42:28 -0600736 z_stream_init(stream, gz_hdr);
Jens Axboe38a812d2014-07-03 09:10:39 -0600737 iter->seq = ic->seq;
738 }
739
740 stream->avail_in = ic->len;
741 stream->next_in = ic->buf;
742
743 if (!iter->buf_size) {
744 iter->buf_size = iter->chunk_sz;
745 iter->buf = malloc(iter->buf_size);
746 }
747
748 while (stream->avail_in) {
Jens Axboebac4af12014-07-03 13:42:28 -0600749 size_t this_out = iter->buf_size - iter->buf_used;
Jens Axboe38a812d2014-07-03 09:10:39 -0600750 int err;
751
Jens Axboebac4af12014-07-03 13:42:28 -0600752 stream->avail_out = this_out;
Jens Axboe38a812d2014-07-03 09:10:39 -0600753 stream->next_out = iter->buf + iter->buf_used;
754
755 err = inflate(stream, Z_NO_FLUSH);
756 if (err < 0) {
757 log_err("fio: failed inflating log: %d\n", err);
758 break;
759 }
760
Jens Axboebac4af12014-07-03 13:42:28 -0600761 iter->buf_used += this_out - stream->avail_out;
762
763 if (!stream->avail_out) {
764 iter->buf_size += iter->chunk_sz;
765 iter->buf = realloc(iter->buf, iter->buf_size);
766 continue;
767 }
768
769 if (err == Z_STREAM_END)
770 break;
Jens Axboe38a812d2014-07-03 09:10:39 -0600771 }
772
773 free_chunk(ic);
774 return 0;
775}
776
777static void flush_gz_chunks(struct io_log *log, FILE *f)
778{
779 struct flush_chunk_iter iter = { .chunk_sz = log->log_gz, };
780 struct flist_head *node;
781 z_stream stream;
782
783 while (!flist_empty(&log->chunk_list)) {
784 struct iolog_compress *ic;
785
786 node = log->chunk_list.next;
787 ic = flist_entry(node, struct iolog_compress, list);
788 flist_del(&ic->list);
Jens Axboebac4af12014-07-03 13:42:28 -0600789
790 if (log->log_gz_store) {
791 fwrite(ic->buf, ic->len, 1, f);
792 free_chunk(ic);
793 } else
794 flush_chunk(ic, log->log_gz_store, f, &stream, &iter);
Jens Axboe38a812d2014-07-03 09:10:39 -0600795 }
796
797 if (iter.seq) {
Jens Axboebac4af12014-07-03 13:42:28 -0600798 finish_chunk(&stream, f, &iter);
Jens Axboe38a812d2014-07-03 09:10:39 -0600799 free(iter.buf);
800 }
801}
802
Jens Axboebac4af12014-07-03 13:42:28 -0600803int iolog_file_inflate(const char *file)
804{
805 struct flush_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
806 struct iolog_compress ic;
807 z_stream stream;
808 struct stat sb;
809 size_t ret;
810 FILE *f;
811
812 if (stat(file, &sb) < 0) {
813 perror("stat");
814 return 1;
815 }
816
817 f = fopen(file, "r");
818 if (!f) {
819 perror("fopen");
820 return 1;
821 }
822
823 ic.buf = malloc(sb.st_size);
824 ic.len = sb.st_size;
825 ic.nofree = 1;
826 ic.seq = 1;
827
828 ret = fread(ic.buf, ic.len, 1, f);
829 if (ret < 0) {
830 perror("fread");
831 fclose(f);
832 return 1;
833 } else if (ret != 1) {
834 log_err("fio: short read on reading log\n");
835 fclose(f);
836 return 1;
837 }
838
839 fclose(f);
840
841 flush_chunk(&ic, 1, stdout, &stream, &iter);
842
843 if (iter.seq) {
844 finish_chunk(&stream, stdout, &iter);
845 free(iter.buf);
846 }
847
848 free(ic.buf);
849 return 0;
850}
851
Jens Axboe38a812d2014-07-03 09:10:39 -0600852#else
853
854static void flush_gz_chunks(struct io_log *log, FILE *f)
855{
856}
857
858#endif
859
860void flush_log(struct io_log *log)
861{
862 void *buf;
863 FILE *f;
864
865 f = fopen(log->filename, "w");
866 if (!f) {
867 perror("fopen log");
868 return;
869 }
870
871 buf = set_file_buffer(f);
872
873 flush_gz_chunks(log, f);
874
Jens Axboebac4af12014-07-03 13:42:28 -0600875 flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
Jens Axboeac9b9102011-10-01 15:01:08 -0400876
877 fclose(f);
Jens Axboe2e802282014-04-02 21:47:27 -0600878 clear_file_buffer(buf);
Jens Axboeac9b9102011-10-01 15:01:08 -0400879}
880
Jens Axboe987c4f42014-07-01 16:29:12 -0600881static int finish_log(struct thread_data *td, struct io_log *log, int trylock)
Jens Axboeac9b9102011-10-01 15:01:08 -0400882{
Jens Axboe38a812d2014-07-03 09:10:39 -0600883 if (td->tp_data)
884 iolog_flush(log, 1);
885
Jens Axboe243bfe12014-04-02 15:46:58 -0600886 if (trylock) {
Jens Axboe987c4f42014-07-01 16:29:12 -0600887 if (fio_trylock_file(log->filename))
Jens Axboe243bfe12014-04-02 15:46:58 -0600888 return 1;
889 } else
Jens Axboe987c4f42014-07-01 16:29:12 -0600890 fio_lock_file(log->filename);
Jens Axboe243bfe12014-04-02 15:46:58 -0600891
Jens Axboe38a812d2014-07-03 09:10:39 -0600892 if (td->client_type == FIO_CLIENT_TYPE_GUI)
Jens Axboe987c4f42014-07-01 16:29:12 -0600893 fio_send_iolog(td, log, log->filename);
Jens Axboe38a812d2014-07-03 09:10:39 -0600894 else
895 flush_log(log);
Jens Axboe243bfe12014-04-02 15:46:58 -0600896
Jens Axboe987c4f42014-07-01 16:29:12 -0600897 fio_unlock_file(log->filename);
Jens Axboe633d8252014-07-02 13:36:34 -0600898 free_log(log);
Jens Axboe243bfe12014-04-02 15:46:58 -0600899 return 0;
Jens Axboeac9b9102011-10-01 15:01:08 -0400900}
901
Jens Axboe38a812d2014-07-03 09:10:39 -0600902#ifdef CONFIG_ZLIB
903
904struct iolog_flush_data {
905 struct tp_work work;
906 struct io_log *log;
907 void *samples;
908 uint64_t nr_samples;
909};
910
911static int gz_work(struct tp_work *work)
912{
913 struct iolog_flush_data *data;
914 struct iolog_compress *c;
915 struct flist_head list;
916 unsigned int seq;
917 z_stream stream;
918 size_t total = 0;
Jens Axboeaba6b602014-07-03 14:17:45 -0600919 int ret;
Jens Axboe38a812d2014-07-03 09:10:39 -0600920
921 INIT_FLIST_HEAD(&list);
922
923 data = container_of(work, struct iolog_flush_data, work);
924
925 stream.zalloc = Z_NULL;
926 stream.zfree = Z_NULL;
927 stream.opaque = Z_NULL;
928
Jens Axboebac4af12014-07-03 13:42:28 -0600929 ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
Jens Axboebac4af12014-07-03 13:42:28 -0600930 if (ret != Z_OK) {
Jens Axboe38a812d2014-07-03 09:10:39 -0600931 log_err("fio: failed to init gz stream\n");
932 return 0;
933 }
934
935 seq = ++data->log->chunk_seq;
Jens Axboebac4af12014-07-03 13:42:28 -0600936
Jens Axboe38a812d2014-07-03 09:10:39 -0600937 stream.next_in = (void *) data->samples;
938 stream.avail_in = data->nr_samples * log_entry_sz(data->log);
939
940 do {
941 c = get_new_chunk(seq);
942 stream.avail_out = GZ_CHUNK;
943 stream.next_out = c->buf;
944 ret = deflate(&stream, Z_NO_FLUSH);
945 if (ret < 0) {
946 log_err("fio: deflate log (%d)\n", ret);
947 break;
948 }
949
950 c->len = GZ_CHUNK - stream.avail_out;
951 flist_add_tail(&c->list, &list);
952 total += c->len;
953 } while (stream.avail_in);
954
955 stream.next_out = c->buf + c->len;
956 stream.avail_out = GZ_CHUNK - c->len;
957
958 ret = deflate(&stream, Z_FINISH);
959 if (ret == Z_STREAM_END)
960 c->len = GZ_CHUNK - stream.avail_out;
961 else {
962 do {
963 c = get_new_chunk(seq);
964 stream.avail_out = GZ_CHUNK;
965 stream.next_out = c->buf;
966 ret = deflate(&stream, Z_FINISH);
967 c->len = GZ_CHUNK - stream.avail_out;
968 flist_add_tail(&c->list, &list);
969 } while (ret != Z_STREAM_END);
970 }
971
972 ret = deflateEnd(&stream);
973 if (ret != Z_OK)
974 log_err("fio: deflateEnd %d\n", ret);
975
976 free(data->samples);
977
978 if (!flist_empty(&list)) {
979 pthread_mutex_lock(&data->log->chunk_lock);
980 flist_splice_tail(&list, &data->log->chunk_list);
981 pthread_mutex_unlock(&data->log->chunk_lock);
982 }
983
984 if (work->wait) {
985 work->done = 1;
986 pthread_cond_signal(&work->cv);
987 } else
988 free(data);
989
990 return 0;
991}
992
993int iolog_flush(struct io_log *log, int wait)
994{
Jens Axboebac4af12014-07-03 13:42:28 -0600995 struct tp_data *tdat = log->td->tp_data;
Jens Axboe38a812d2014-07-03 09:10:39 -0600996 struct iolog_flush_data *data;
997 size_t sample_size;
998
999 data = malloc(sizeof(*data));
1000 if (!data)
1001 return 1;
1002
1003 data->log = log;
1004
1005 sample_size = log->nr_samples * log_entry_sz(log);
1006 data->samples = malloc(sample_size);
1007 if (!data->samples) {
1008 free(data);
1009 return 1;
1010 }
1011
1012 memcpy(data->samples, log->log, sample_size);
1013 data->nr_samples = log->nr_samples;
1014 data->work.fn = gz_work;
1015 log->nr_samples = 0;
1016
1017 if (wait) {
1018 pthread_mutex_init(&data->work.lock, NULL);
1019 pthread_cond_init(&data->work.cv, NULL);
1020 data->work.wait = 1;
1021 } else
1022 data->work.wait = 0;
1023
Jens Axboebac4af12014-07-03 13:42:28 -06001024 tp_queue_work(tdat, &data->work);
Jens Axboe38a812d2014-07-03 09:10:39 -06001025
1026 if (wait) {
1027 pthread_mutex_lock(&data->work.lock);
1028 while (!data->work.done)
1029 pthread_cond_wait(&data->work.cv, &data->work.lock);
1030 pthread_mutex_unlock(&data->work.lock);
1031 free(data);
1032 }
1033
1034 return 0;
1035}
1036
1037#else
1038
1039int iolog_flush(struct io_log *log, int wait)
1040{
1041 return 1;
1042}
1043
1044#endif
1045
Jens Axboe905e3d42014-04-02 20:01:27 -06001046static int write_iops_log(struct thread_data *td, int try)
1047{
Jens Axboe987c4f42014-07-01 16:29:12 -06001048 struct io_log *log = td->iops_log;
Jens Axboe905e3d42014-04-02 20:01:27 -06001049
Jens Axboe987c4f42014-07-01 16:29:12 -06001050 if (!log)
1051 return 0;
1052
1053 return finish_log(td, log, try);
Jens Axboe905e3d42014-04-02 20:01:27 -06001054}
1055
1056static int write_slat_log(struct thread_data *td, int try)
1057{
Jens Axboe987c4f42014-07-01 16:29:12 -06001058 struct io_log *log = td->slat_log;
Jens Axboe905e3d42014-04-02 20:01:27 -06001059
Jens Axboe987c4f42014-07-01 16:29:12 -06001060 if (!log)
1061 return 0;
1062
1063 return finish_log(td, log, try);
Jens Axboe905e3d42014-04-02 20:01:27 -06001064}
1065
1066static int write_clat_log(struct thread_data *td, int try)
1067{
Jens Axboe987c4f42014-07-01 16:29:12 -06001068 struct io_log *log = td->clat_log;
Jens Axboe905e3d42014-04-02 20:01:27 -06001069
Jens Axboe987c4f42014-07-01 16:29:12 -06001070 if (!log)
1071 return 0;
1072
1073 return finish_log(td, log, try);
Jens Axboe905e3d42014-04-02 20:01:27 -06001074}
1075
1076static int write_lat_log(struct thread_data *td, int try)
1077{
Jens Axboe987c4f42014-07-01 16:29:12 -06001078 struct io_log *log = td->lat_log;
Jens Axboe905e3d42014-04-02 20:01:27 -06001079
Jens Axboe987c4f42014-07-01 16:29:12 -06001080 if (!log)
1081 return 0;
1082
1083 return finish_log(td, log, try);
Jens Axboe905e3d42014-04-02 20:01:27 -06001084}
1085
1086static int write_bandw_log(struct thread_data *td, int try)
1087{
Jens Axboe987c4f42014-07-01 16:29:12 -06001088 struct io_log *log = td->bw_log;
Jens Axboe905e3d42014-04-02 20:01:27 -06001089
Jens Axboe987c4f42014-07-01 16:29:12 -06001090 if (!log)
1091 return 0;
1092
1093 return finish_log(td, log, try);
Jens Axboe905e3d42014-04-02 20:01:27 -06001094}
1095
1096enum {
1097 BW_LOG_MASK = 1,
1098 LAT_LOG_MASK = 2,
1099 SLAT_LOG_MASK = 4,
1100 CLAT_LOG_MASK = 8,
1101 IOPS_LOG_MASK = 16,
1102
Jens Axboe905e3d42014-04-02 20:01:27 -06001103 ALL_LOG_NR = 5,
1104};
1105
1106struct log_type {
1107 unsigned int mask;
1108 int (*fn)(struct thread_data *, int);
1109};
1110
1111static struct log_type log_types[] = {
1112 {
1113 .mask = BW_LOG_MASK,
1114 .fn = write_bandw_log,
1115 },
1116 {
1117 .mask = LAT_LOG_MASK,
1118 .fn = write_lat_log,
1119 },
1120 {
1121 .mask = SLAT_LOG_MASK,
1122 .fn = write_slat_log,
1123 },
1124 {
1125 .mask = CLAT_LOG_MASK,
1126 .fn = write_clat_log,
1127 },
1128 {
1129 .mask = IOPS_LOG_MASK,
1130 .fn = write_iops_log,
1131 },
1132};
1133
1134void fio_writeout_logs(struct thread_data *td)
1135{
Jens Axboeea5409f2014-04-02 20:07:28 -06001136 unsigned int log_mask = 0;
Jens Axboe905e3d42014-04-02 20:01:27 -06001137 unsigned int log_left = ALL_LOG_NR;
1138 int old_state, i;
1139
1140 old_state = td_bump_runstate(td, TD_FINISHING);
1141
1142 finalize_logs(td);
1143
1144 while (log_left) {
1145 int prev_log_left = log_left;
1146
1147 for (i = 0; i < ALL_LOG_NR && log_left; i++) {
1148 struct log_type *lt = &log_types[i];
1149 int ret;
1150
Jens Axboeea5409f2014-04-02 20:07:28 -06001151 if (!(log_mask & lt->mask)) {
Jens Axboe905e3d42014-04-02 20:01:27 -06001152 ret = lt->fn(td, log_left != 1);
1153 if (!ret) {
1154 log_left--;
Jens Axboeea5409f2014-04-02 20:07:28 -06001155 log_mask |= lt->mask;
Jens Axboe905e3d42014-04-02 20:01:27 -06001156 }
1157 }
1158 }
1159
1160 if (prev_log_left == log_left)
1161 usleep(5000);
1162 }
1163
1164 td_restore_runstate(td, old_state);
1165}