Move thread options into a seperate structure

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
diff --git a/engines/cpu.c b/engines/cpu.c
index d10669d..c908cab 100644
--- a/engines/cpu.c
+++ b/engines/cpu.c
@@ -10,7 +10,7 @@
 
 static int fio_cpuio_queue(struct thread_data *td, struct io_u fio_unused *io_u)
 {
-	__usec_sleep(td->cpucycle);
+	__usec_sleep(td->o.cpucycle);
 	return FIO_Q_COMPLETED;
 }
 
@@ -19,8 +19,8 @@
 	struct fio_file *f;
 	unsigned int i;
 
-	td->total_file_size = -1;
-	td->io_size = td->total_file_size;
+	td->o.size = -1;
+	td->io_size = td->o.size;
 	td->total_io_size = td->io_size;
 
 	for_each_file(td, f, i) {
@@ -33,22 +33,24 @@
 
 static int fio_cpuio_init(struct thread_data *td)
 {
-	if (!td->cpuload) {
+	struct thread_options *o = &td->o;
+
+	if (!o->cpuload) {
 		td_vmsg(td, EINVAL, "cpu thread needs rate (cpuload=)","cpuio");
 		return 1;
 	}
 
-	if (td->cpuload > 100)
-		td->cpuload = 100;
+	if (o->cpuload > 100)
+		o->cpuload = 100;
 
 	/*
 	 * set thinktime_sleep and thinktime_spin appropriately
 	 */
-	td->thinktime_blocks = 1;
-	td->thinktime_spin = 0;
-	td->thinktime = (td->cpucycle * (100 - td->cpuload)) / td->cpuload;
+	o->thinktime_blocks = 1;
+	o->thinktime_spin = 0;
+	o->thinktime = (o->cpucycle * (100 - o->cpuload)) / o->cpuload;
 
-	td->nr_files = td->open_files = 1;
+	o->nr_files = o->open_files = 1;
 	return 0;
 }
 
diff --git a/engines/libaio.c b/engines/libaio.c
index 78f43ef..895b914 100644
--- a/engines/libaio.c
+++ b/engines/libaio.c
@@ -87,7 +87,7 @@
 {
 	struct libaio_data *ld = td->io_ops->data;
 
-	if (ld->iocbs_nr == (int) td->iodepth)
+	if (ld->iocbs_nr == (int) td->o.iodepth)
 		return FIO_Q_BUSY;
 
 	/*
@@ -192,18 +192,18 @@
 	struct libaio_data *ld = malloc(sizeof(*ld));
 
 	memset(ld, 0, sizeof(*ld));
-	if (io_queue_init(td->iodepth, &ld->aio_ctx)) {
+	if (io_queue_init(td->o.iodepth, &ld->aio_ctx)) {
 		td_verror(td, errno, "io_queue_init");
 		free(ld);
 		return 1;
 	}
 
-	ld->aio_events = malloc(td->iodepth * sizeof(struct io_event));
-	memset(ld->aio_events, 0, td->iodepth * sizeof(struct io_event));
-	ld->iocbs = malloc(td->iodepth * sizeof(struct iocb *));
+	ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
+	memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
+	ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
 	memset(ld->iocbs, 0, sizeof(struct iocb *));
-	ld->io_us = malloc(td->iodepth * sizeof(struct io_u *));
-	memset(ld->io_us, 0, td->iodepth * sizeof(struct io_u *));
+	ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
+	memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
 	ld->iocbs_nr = 0;
 
 	td->io_ops->data = ld;
diff --git a/engines/mmap.c b/engines/mmap.c
index 17cf5fc..556e56d 100644
--- a/engines/mmap.c
+++ b/engines/mmap.c
@@ -36,7 +36,7 @@
 	/*
 	 * not really direct, but should drop the pages from the cache
 	 */
-	if (td->odirect && io_u->ddir != DDIR_SYNC) {
+	if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
 		size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
 		unsigned long long off = real_off & ~page_mask;
 
@@ -66,7 +66,7 @@
 	else if (td_write(td)) {
 		flags = PROT_WRITE;
 
-		if (td->verify != VERIFY_NONE)
+		if (td->o.verify != VERIFY_NONE)
 			flags |= PROT_READ;
 	} else
 		flags = PROT_READ;
diff --git a/engines/net.c b/engines/net.c
index f7e4f2d..2bbbb0e 100644
--- a/engines/net.c
+++ b/engines/net.c
@@ -230,7 +230,7 @@
 	char *sep;
 	int ret;
 
-	if (!td->total_file_size) {
+	if (!td->o.size) {
 		log_err("fio: need size= set\n");
 		return 1;
 	}
@@ -240,11 +240,11 @@
 		return 1;
 	}
 
-	strcpy(buf, td->filename);
+	strcpy(buf, td->o.filename);
 
 	sep = strchr(buf, '/');
 	if (!sep) {
-		log_err("fio: bad network host/port <<%s>>\n", td->filename);
+		log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
 		return 1;
 	}
 
@@ -264,11 +264,11 @@
 	if (ret)
 		return ret;
 
-	td->io_size = td->total_file_size;
+	td->io_size = td->o.size;
 	td->total_io_size = td->io_size;
 
 	for_each_file(td, f, i) {
-		f->file_size = td->total_file_size / td->nr_files;
+		f->file_size = td->o.size / td->o.nr_files;
 		f->real_file_size = f->file_size;
 	}
 
diff --git a/engines/null.c b/engines/null.c
index 6397ec0..acb8cf4 100644
--- a/engines/null.c
+++ b/engines/null.c
@@ -66,16 +66,16 @@
 	struct fio_file *f;
 	unsigned int i;
 
-	if (!td->total_file_size) {
+	if (!td->o.size) {
 		log_err("fio: need size= set\n");
 		return 1;
 	}
 
-	td->io_size = td->total_file_size;
+	td->io_size = td->o.size;
 	td->total_io_size = td->io_size;
 
 	for_each_file(td, f, i) {
-		f->real_file_size = td->total_io_size / td->nr_files;
+		f->real_file_size = td->total_io_size / td->o.nr_files;
 		f->file_size = f->real_file_size;
 	}
 
@@ -107,9 +107,9 @@
 
 	memset(nd, 0, sizeof(*nd));
 
-	if (td->iodepth != 1) {
-		nd->io_us = malloc(td->iodepth * sizeof(struct io_u *));
-		memset(nd->io_us, 0, td->iodepth * sizeof(struct io_u *));
+	if (td->o.iodepth != 1) {
+		nd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
+		memset(nd->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
 	} else
 		td->io_ops->flags |= FIO_SYNCIO;
 
diff --git a/engines/posixaio.c b/engines/posixaio.c
index 88dc0e9..4aa7420 100644
--- a/engines/posixaio.c
+++ b/engines/posixaio.c
@@ -181,8 +181,8 @@
 	struct posixaio_data *pd = malloc(sizeof(*pd));
 
 	memset(pd, 0, sizeof(*pd));
-	pd->aio_events = malloc(td->iodepth * sizeof(struct io_u *));
-	memset(pd->aio_events, 0, td->iodepth * sizeof(struct io_u *));
+	pd->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
+	memset(pd->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
 
 	td->io_ops->data = pd;
 	return 0;
diff --git a/engines/sg.c b/engines/sg.c
index 84c2afe..99c5856 100644
--- a/engines/sg.c
+++ b/engines/sg.c
@@ -97,7 +97,7 @@
 			if (!min)
 				break;
 
-			ret = poll(sd->pfds, td->nr_files, -1);
+			ret = poll(sd->pfds, td->o.nr_files, -1);
 			if (ret < 0) {
 				if (!r)
 					r = -errno;
@@ -106,7 +106,7 @@
 			} else if (!ret)
 				continue;
 
-			if (pollin_events(sd->pfds, td->nr_files))
+			if (pollin_events(sd->pfds, td->o.nr_files))
 				break;
 		} while (1);
 
@@ -325,23 +325,23 @@
 
 	sd = malloc(sizeof(*sd));
 	memset(sd, 0, sizeof(*sd));
-	sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
-	memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
-	sd->events = malloc(td->iodepth * sizeof(struct io_u *));
-	memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
-	sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
-	memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
-	sd->fd_flags = malloc(sizeof(int) * td->nr_files);
-	memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
-	sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
-	memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
+	sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
+	memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
+	sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
+	memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
+	sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
+	memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
+	sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
+	memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
+	sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
+	memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
 
 	td->io_ops->data = sd;
 
 	/*
 	 * we want to do it, regardless of whether odirect is set or not
 	 */
-	td->override_sync = 1;
+	td->o.override_sync = 1;
 	return 0;
 }
 
diff --git a/engines/syslet-rw.c b/engines/syslet-rw.c
index 0b2497a..a293e7f 100644
--- a/engines/syslet-rw.c
+++ b/engines/syslet-rw.c
@@ -63,7 +63,7 @@
 		else if (ret < 0)
 			io_u->error = ret;
 
-		assert(sd->nr_events < td->iodepth);
+		assert(sd->nr_events < td->o.iodepth);
 		sd->events[sd->nr_events++] = io_u;
 
 		if (atom == last)
@@ -90,7 +90,7 @@
 			break;
 
 		sd->ring[sd->ahu.user_ring_idx] = NULL;
-		if (++sd->ahu.user_ring_idx == td->iodepth)
+		if (++sd->ahu.user_ring_idx == td->o.iodepth)
 			sd->ahu.user_ring_idx = 0;
 
 		fio_syslet_complete_atom(td, atom);
@@ -303,13 +303,13 @@
 
 	sd = malloc(sizeof(*sd));
 	memset(sd, 0, sizeof(*sd));
-	sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
-	memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
+	sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
+	memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
 
 	/*
 	 * This will handily fail for kernels where syslet isn't available
 	 */
-	if (async_head_init(sd, td->iodepth)) {
+	if (async_head_init(sd, td->o.iodepth)) {
 		free(sd->events);
 		free(sd);
 		return 1;
diff --git a/eta.c b/eta.c
index 3c23162..09d7daa 100644
--- a/eta.c
+++ b/eta.c
@@ -108,15 +108,15 @@
 	 * if writing, bytes_total will be twice the size. If mixing,
 	 * assume a 50/50 split and thus bytes_total will be 50% larger.
 	 */
-	if (td->verify) {
+	if (td->o.verify) {
 		if (td_rw(td))
 			bytes_total = bytes_total * 3 / 2;
 		else
 			bytes_total <<= 1;
 	}
 
-	if (td->zone_size && td->zone_skip)
-		bytes_total /= (td->zone_skip / td->zone_size);
+	if (td->o.zone_size && td->o.zone_skip)
+		bytes_total /= (td->o.zone_skip / td->o.zone_size);
 
 	if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
 		double perc;
@@ -128,8 +128,8 @@
 
 		eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
 
-		if (td->timeout && eta_sec > (td->timeout - elapsed))
-			eta_sec = td->timeout - elapsed;
+		if (td->o.timeout && eta_sec > (td->o.timeout - elapsed))
+			eta_sec = td->o.timeout - elapsed;
 	} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
 			|| td->runstate == TD_INITIALIZED) {
 		int t_eta = 0, r_eta = 0;
@@ -138,11 +138,11 @@
 		 * We can only guess - assume it'll run the full timeout
 		 * if given, otherwise assume it'll run at the specified rate.
 		 */
-		if (td->timeout)
-			t_eta = td->timeout + td->start_delay - elapsed;
-		if (td->rate) {
-			r_eta = (bytes_total / 1024) / td->rate;
-			r_eta += td->start_delay - elapsed;
+		if (td->o.timeout)
+			t_eta = td->o.timeout + td->o.start_delay - elapsed;
+		if (td->o.rate) {
+			r_eta = (bytes_total / 1024) / td->o.rate;
+			r_eta += td->o.start_delay - elapsed;
 		}
 
 		if (r_eta && t_eta)
@@ -207,13 +207,13 @@
 	nr_pending = nr_running = t_rate = m_rate = 0;
 	bw_avg_time = ULONG_MAX;
 	for_each_td(td, i) {
-		if (td->bw_avg_time < bw_avg_time)
-			bw_avg_time = td->bw_avg_time;
+		if (td->o.bw_avg_time < bw_avg_time)
+			bw_avg_time = td->o.bw_avg_time;
 		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
 		    || td->runstate == TD_FSYNCING) {
 			nr_running++;
-			t_rate += td->rate;
-			m_rate += td->ratemin;
+			t_rate += td->o.rate;
+			m_rate += td->o.ratemin;
 		} else if (td->runstate < TD_RUNNING)
 			nr_pending++;
 
diff --git a/filesetup.c b/filesetup.c
index 5584a36..3b3f868 100644
--- a/filesetup.c
+++ b/filesetup.c
@@ -58,12 +58,12 @@
 		goto err;
 	}
 
-	b = malloc(td->max_bs[DDIR_WRITE]);
-	memset(b, 0, td->max_bs[DDIR_WRITE]);
+	b = malloc(td->o.max_bs[DDIR_WRITE]);
+	memset(b, 0, td->o.max_bs[DDIR_WRITE]);
 
 	left = f->file_size;
 	while (left && !td->terminate) {
-		bs = td->max_bs[DDIR_WRITE];
+		bs = td->o.max_bs[DDIR_WRITE];
 		if (bs > left)
 			bs = left;
 
@@ -84,7 +84,7 @@
 
 	if (td->terminate)
 		unlink(f->file_name);
-	else if (td->create_fsync)
+	else if (td->o.create_fsync)
 		fsync(f->fd);
 
 	free(b);
@@ -104,16 +104,16 @@
 	unsigned long long ret;
 	long r;
 
-	if (upper > td->file_size_high)
-		upper = td->file_size_high;
-	else if (upper < td->file_size_low)
+	if (upper > td->o.file_size_high)
+		upper = td->o.file_size_high;
+	else if (upper < td->o.file_size_low)
 		return 0;
 	else if (!upper)
 		return 0;
 
 	r = os_random_long(&td->file_size_state);
-	ret = td->file_size_low + (unsigned long long) ((double) upper * (r / (RAND_MAX + 1.0)));
-	ret -= (ret % td->rw_min_bs);
+	ret = td->o.file_size_low + (unsigned long long) ((double) upper * (r / (RAND_MAX + 1.0)));
+	ret -= (ret % td->o.rw_min_bs);
 	if (ret > upper)
 		ret = upper;
 	return ret;
@@ -127,11 +127,11 @@
 	unsigned int i, new_files;
 
 	new_files = 0;
-	total_file_size = td->total_file_size;
+	total_file_size = td->o.size;
 	for_each_file(td, f, i) {
 		unsigned long long s;
 
-		f->file_offset = td->start_offset;
+		f->file_offset = td->o.start_offset;
 
 		if (f->filetype != FIO_TYPE_FILE)
 			continue;
@@ -149,7 +149,7 @@
 	/*
 	 * unless specifically asked for overwrite, let normal io extend it
 	 */
-	can_extend = !td->overwrite && !(td->io_ops->flags & FIO_NOEXTEND);
+	can_extend = !td->o.overwrite && !(td->io_ops->flags & FIO_NOEXTEND);
 	if (can_extend)
 		return 0;
 
@@ -170,17 +170,17 @@
 			continue;
 		}
 
-		if (!td->file_size_low)
+		if (!td->o.file_size_low)
 			f->file_size = total_file_size / new_files;
 		else {
 			/*
 			 * If we don't have enough space left for a file
 			 * of the minimum size, bail.
 			 */
-			if (local_file_size < td->file_size_low) {
+			if (local_file_size < td->o.file_size_low) {
 				log_info("fio: limited to %d files\n", i);
-				new_files -= (td->nr_files - i);
-				td->nr_files = i;
+				new_files -= (td->o.nr_files - i);
+				td->o.nr_files = i;
 				break;
 			}
 
@@ -192,7 +192,7 @@
 		create_size += f->file_size;
 		file_there = !file_ok(td, f);
 
-		if (file_there && td_write(td) && !td->overwrite) {
+		if (file_there && td_write(td) && !td->o.overwrite) {
 			unlink(f->file_name);
 			file_there = 0;
 		}
@@ -203,7 +203,7 @@
 	if (!need_create)
 		return 0;
 
-	if (!td->total_file_size && !total_file_size) {
+	if (!td->o.size && !total_file_size) {
 		log_err("Need size for create\n");
 		td_verror(td, EINVAL, "file_size");
 		return 1;
@@ -211,7 +211,7 @@
 
 	temp_stall_ts = 1;
 	log_info("%s: Laying out IO file(s) (%u files / %LuMiB)\n",
-				td->name, new_files, create_size >> 20);
+				td->o.name, new_files, create_size >> 20);
 
 	err = 0;
 	for_each_file(td, f, i) {
@@ -220,7 +220,7 @@
 		 */
 		f->flags &= ~FIO_FILE_UNLINK;
 		if (file_ok(td, f)) {
-			if (td->unlink)
+			if (td->o.unlink)
 				f->flags |= FIO_FILE_UNLINK;
 
 			err = create_file(td, f);
@@ -237,7 +237,7 @@
 {
 	struct stat st;
 
-	if (td->overwrite) {
+	if (td->o.overwrite) {
 		if (fstat(f->fd, &st) == -1) {
 			td_verror(td, errno, "fstat");
 			return 1;
@@ -292,7 +292,7 @@
 		return ret;
 
 	if (f->file_offset > f->real_file_size) {
-		log_err("%s: offset extends end (%Lu > %Lu)\n", td->name, f->file_offset, f->real_file_size);
+		log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
 		return 1;
 	}
 
@@ -303,7 +303,7 @@
 {
 	int ret = 0;
 
-	if (td->odirect)
+	if (td->o.odirect)
 		return 0;
 
 	/*
@@ -336,9 +336,9 @@
 {
 	int flags = 0;
 
-	if (td->odirect)
+	if (td->o.odirect)
 		flags |= OS_O_DIRECT;
-	if (td->sync_io)
+	if (td->o.sync_io)
 		flags |= O_SYNC;
 
 	if (td_write(td) || td_rw(td)) {
@@ -361,17 +361,17 @@
 		int __e = errno;
 
 		td_verror(td, __e, "open");
-		if (__e == EINVAL && td->odirect)
+		if (__e == EINVAL && td->o.odirect)
 			log_err("fio: destination does not support O_DIRECT\n");
 		if (__e == EMFILE)
-			log_err("fio: try reducing/setting openfiles (failed at %u of %u)\n", td->nr_open_files, td->nr_files);
+			log_err("fio: try reducing/setting openfiles (failed at %u of %u)\n", td->nr_open_files, td->o.nr_files);
 		return 1;
 	}
 
 	if (get_file_size(td, f))
 		goto err;
 
-	if (td->invalidate_cache && file_invalidate_cache(td, f))
+	if (td->o.invalidate_cache && file_invalidate_cache(td, f))
 		goto err;
 
 	if (!td_random(td)) {
@@ -403,7 +403,7 @@
 		if (err)
 			break;
 
-		if (td->open_files == td->nr_open_files)
+		if (td->o.open_files == td->nr_open_files)
 			break;
 	}
 
@@ -439,21 +439,21 @@
 	/*
 	 * Recalculate the total file size now that files are set up.
 	 */
-	td->total_file_size = 0;
+	td->o.size = 0;
 	for_each_file(td, f, i)
-		td->total_file_size += f->file_size;
+		td->o.size += f->file_size;
 
-	td->io_size = td->total_file_size;
+	td->io_size = td->o.size;
 	if (td->io_size == 0) {
-		log_err("%s: no io blocks\n", td->name);
+		log_err("%s: no io blocks\n", td->o.name);
 		td_verror(td, EINVAL, "total_file_size");
 		return 1;
 	}
 
-	if (!td->zone_size)
-		td->zone_size = td->io_size;
+	if (!td->o.zone_size)
+		td->o.zone_size = td->io_size;
 
-	td->total_io_size = td->io_size * td->loops;
+	td->total_io_size = td->io_size * td->o.loops;
 
 	for_each_file(td, f, i)
 		td_io_close_file(td, f);
@@ -480,9 +480,9 @@
 			free(f->file_map);
 	}
 
-	td->filename = NULL;
+	td->o.filename = NULL;
 	td->files = NULL;
-	td->nr_files = 0;
+	td->o.nr_files = 0;
 }
 
 static void get_file_type(struct fio_file *f)
@@ -541,7 +541,7 @@
 	if (--f->references)
 		return;
 
-	if (should_fsync(td) && td->fsync_on_close)
+	if (should_fsync(td) && td->o.fsync_on_close)
 		fsync(f->fd);
 
 	if (td->io_ops->close_file)
@@ -580,7 +580,7 @@
 
 		if (S_ISREG(sb.st_mode)) {
 			add_file(td, full_path);
-			td->nr_files++;
+			td->o.nr_files++;
 			continue;
 		}
 
diff --git a/fio.c b/fio.c
index 478ef28..de5c976 100644
--- a/fio.c
+++ b/fio.c
@@ -76,7 +76,7 @@
 			if (td->runstate < TD_RUNNING)
 				kill(td->pid, SIGQUIT);
 			td->terminate = 1;
-			td->start_delay = 0;
+			td->o.start_delay = 0;
 		}
 	}
 }
@@ -110,7 +110,7 @@
 	/*
 	 * No minimum rate set, always ok
 	 */
-	if (!td->ratemin && !td->rate_iops_min)
+	if (!td->o.ratemin && !td->o.rate_iops_min)
 		return 0;
 
 	/*
@@ -133,20 +133,20 @@
 	 */
 	if (td->rate_bytes || td->rate_blocks) {
 		spent = mtime_since(&td->lastrate, now);
-		if (spent < td->ratecycle)
+		if (spent < td->o.ratecycle)
 			return 0;
 
-		if (td->rate) {
+		if (td->o.rate) {
 			/*
 			 * check bandwidth specified rate
 			 */
 			if (bytes < td->rate_bytes) {
-				log_err("%s: min rate %u not met\n", td->name, td->ratemin);
+				log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin);
 				return 1;
 			} else {
 				rate = (bytes - td->rate_bytes) / spent;
-				if (rate < td->ratemin || bytes < td->rate_bytes) {
-					log_err("%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+				if (rate < td->o.ratemin || bytes < td->rate_bytes) {
+					log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate);
 					return 1;
 				}
 			}
@@ -154,13 +154,13 @@
 			/*
 			 * checks iops specified rate
 			 */
-			if (iops < td->rate_iops) {
-				log_err("%s: min iops rate %u not met\n", td->name, td->rate_iops);
+			if (iops < td->o.rate_iops) {
+				log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops);
 				return 1;
 			} else {
 				rate = (iops - td->rate_blocks) / spent;
-				if (rate < td->rate_iops_min || iops < td->rate_blocks) {
-					log_err("%s: min iops rate %u not met, got %lu\n", td->name, td->rate_iops_min, rate);
+				if (rate < td->o.rate_iops_min || iops < td->rate_blocks) {
+					log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate);
 				}
 			}
 		}
@@ -174,9 +174,9 @@
 
 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
 {
-	if (!td->timeout)
+	if (!td->o.timeout)
 		return 0;
-	if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
+	if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
 		return 1;
 
 	return 0;
@@ -376,8 +376,8 @@
 		if (queue_full(td) || ret == FIO_Q_BUSY) {
 			min_events = 1;
 
-			if (td->cur_depth > td->iodepth_low)
-				min_events = td->cur_depth - td->iodepth_low;
+			if (td->cur_depth > td->o.iodepth_low)
+				min_events = td->cur_depth - td->o.iodepth_low;
 		}
 
 		/*
@@ -492,8 +492,8 @@
 			if (queue_full(td) || ret == FIO_Q_BUSY) {
 				min_evts = 1;
 
-				if (td->cur_depth > td->iodepth_low)
-					min_evts = td->cur_depth - td->iodepth_low;
+				if (td->cur_depth > td->o.iodepth_low)
+					min_evts = td->cur_depth - td->o.iodepth_low;
 			}
 
 			fio_gettime(&comp_time, NULL);
@@ -521,17 +521,17 @@
 			break;
 		}
 
-		if (td->thinktime) {
+		if (td->o.thinktime) {
 			unsigned long long b;
 
 			b = td->io_blocks[0] + td->io_blocks[1];
-			if (!(b % td->thinktime_blocks)) {
+			if (!(b % td->o.thinktime_blocks)) {
 				int left;
 
-				if (td->thinktime_spin)
-					__usec_sleep(td->thinktime_spin);
+				if (td->o.thinktime_spin)
+					__usec_sleep(td->o.thinktime_spin);
 
-				left = td->thinktime - td->thinktime_spin;
+				left = td->o.thinktime - td->o.thinktime_spin;
 				if (left)
 					usec_sleep(td, left);
 			}
@@ -545,7 +545,7 @@
 		if (i)
 			ret = io_u_queued_complete(td, i);
 
-		if (should_fsync(td) && td->end_fsync) {
+		if (should_fsync(td) && td->o.end_fsync) {
 			td_set_runstate(td, TD_FSYNCING);
 
 			for_each_file(td, f, i) {
@@ -596,13 +596,13 @@
 	if (td->io_ops->flags & FIO_SYNCIO)
 		max_units = 1;
 	else
-		max_units = td->iodepth;
+		max_units = td->o.iodepth;
 
-	max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
+	max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
 	td->orig_buffer_size = max_bs * max_units;
 
-	if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
-		td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
+	if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE)
+		td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1);
 	else
 		td->orig_buffer_size += page_mask;
 
@@ -649,7 +649,7 @@
 	/*
 	 * Set io scheduler.
 	 */
-	ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
+	ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
 	if (ferror(f) || ret != 1) {
 		td_verror(td, errno, "fwrite");
 		fclose(f);
@@ -668,9 +668,9 @@
 		return 1;
 	}
 
-	sprintf(tmp2, "[%s]", td->ioscheduler);
+	sprintf(tmp2, "[%s]", td->o.ioscheduler);
 	if (!strstr(tmp, tmp2)) {
-		log_err("fio: io scheduler %s not found\n", td->ioscheduler);
+		log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
 		td_verror(td, EINVAL, "iosched_switch");
 		fclose(f);
 		return 1;
@@ -717,7 +717,7 @@
 	struct thread_data *td = data;
 	int clear_state;
 
-	if (!td->use_thread)
+	if (!td->o.use_thread)
 		setsid();
 
 	td->pid = getpid();
@@ -746,12 +746,12 @@
 		}
 	}
 
-	if (nice(td->nice) == -1) {
+	if (nice(td->o.nice) == -1) {
 		td_verror(td, errno, "nice");
 		goto err_sem;
 	}
 
-	if (td->ioscheduler && switch_ioscheduler(td))
+	if (td->o.ioscheduler && switch_ioscheduler(td))
 		goto err_sem;
 
 	td_set_runstate(td, TD_INITIALIZED);
@@ -764,7 +764,7 @@
 	 */
 	fio_sem_remove(td->mutex);
 
-	if (!td->create_serialize && setup_files(td))
+	if (!td->o.create_serialize && setup_files(td))
 		goto err;
 
 	if (td_io_init(td))
@@ -773,8 +773,8 @@
 	if (open_files(td))
 		goto err;
 
-	if (td->exec_prerun) {
-		if (system(td->exec_prerun) < 0)
+	if (td->o.exec_prerun) {
+		if (system(td->o.exec_prerun) < 0)
 			goto err;
 	}
 
@@ -784,11 +784,11 @@
 
 	runtime[0] = runtime[1] = 0;
 	clear_state = 0;
-	while (td->loops--) {
+	while (td->o.loops--) {
 		fio_gettime(&td->start, NULL);
 		memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
 
-		if (td->ratemin)
+		if (td->o.ratemin)
 			memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
 
 		if (clear_state && clear_io_state(td))
@@ -808,7 +808,7 @@
 		if (td->error || td->terminate)
 			break;
 
-		if (td->verify == VERIFY_NONE)
+		if (td->o.verify == VERIFY_NONE)
 			continue;
 
 		if (clear_io_state(td))
@@ -837,11 +837,11 @@
 		finish_log(td, td->ts.slat_log, "slat");
 	if (td->ts.clat_log)
 		finish_log(td, td->ts.clat_log, "clat");
-	if (td->write_iolog_file)
+	if (td->o.write_iolog_file)
 		write_iolog_close(td);
-	if (td->exec_postrun) {
-		if (system(td->exec_postrun) < 0)
-			log_err("fio: postrun %s failed\n", td->exec_postrun);
+	if (td->o.exec_postrun) {
+		if (system(td->o.exec_postrun) < 0)
+			log_err("fio: postrun %s failed\n", td->o.exec_postrun);
 	}
 
 	if (exitall_on_terminate)
@@ -907,7 +907,7 @@
 
 		if (!td->pid || td->runstate == TD_REAPED)
 			continue;
-		if (td->use_thread) {
+		if (td->o.use_thread) {
 			if (td->runstate == TD_EXITED) {
 				td_set_runstate(td, TD_REAPED);
 				goto reaped;
@@ -953,7 +953,7 @@
 		 */
 		continue;
 reaped:
-		if (td->use_thread) {
+		if (td->o.use_thread) {
 			long ret;
 
 			if (pthread_join(td->thread, (void *) &ret))
@@ -961,8 +961,8 @@
 		}
 
 		(*nr_running)--;
-		(*m_rate) -= td->ratemin;
-		(*t_rate) -= td->rate;
+		(*m_rate) -= td->o.ratemin;
+		(*t_rate) -= td->o.rate;
 
 		if (td->error)
 			exit_value++;
@@ -1008,7 +1008,7 @@
 	for_each_td(td, i) {
 		print_status_init(td->thread_number - 1);
 
-		if (!td->create_serialize) {
+		if (!td->o.create_serialize) {
 			init_disk_util(td);
 			continue;
 		}
@@ -1052,14 +1052,14 @@
 				continue;
 			}
 
-			if (td->start_delay) {
+			if (td->o.start_delay) {
 				spent = mtime_since_genesis();
 
-				if (td->start_delay * 1000 > spent)
+				if (td->o.start_delay * 1000 > spent)
 					continue;
 			}
 
-			if (td->stonewall && (nr_started || nr_running))
+			if (td->o.stonewall && (nr_started || nr_running))
 				break;
 
 			/*
@@ -1070,7 +1070,7 @@
 			map[this_jobs++] = td;
 			nr_started++;
 
-			if (td->use_thread) {
+			if (td->o.use_thread) {
 				if (pthread_create(&td->thread, NULL, thread_main, td)) {
 					perror("thread_create");
 					nr_started--;
@@ -1135,8 +1135,8 @@
 			td_set_runstate(td, TD_RUNNING);
 			nr_running++;
 			nr_started--;
-			m_rate += td->ratemin;
-			t_rate += td->rate;
+			m_rate += td->o.ratemin;
+			t_rate += td->o.rate;
 			todo--;
 			fio_sem_up(td->mutex);
 		}
diff --git a/fio.h b/fio.h
index e1b35ea..cfbe378 100644
--- a/fio.h
+++ b/fio.h
@@ -309,39 +309,30 @@
 	unsigned long total_run_time;
 };
 
-/*
- * This describes a single thread/process executing a fio job.
- */
-struct thread_data {
+struct thread_options {
 	int pad;
 	char *description;
 	char *name;
 	char *directory;
 	char *filename;
+	char *opendir;
 	char *ioengine;
-	char verror[128];
-	pthread_t thread;
-	int thread_number;
-	int groupid;
-	struct thread_stat ts;
-	struct fio_file *files;
-	unsigned int files_index;
-	unsigned int nr_files;
-	unsigned int nr_open_files;
-	unsigned int nr_normal_files;
-	union {
-		unsigned int next_file;
-		os_random_state_t next_file_state;
-	};
-	int error;
-	pid_t pid;
-	char *orig_buffer;
-	size_t orig_buffer_size;
-	volatile int terminate;
-	volatile int runstate;
 	enum td_ddir td_ddir;
-	unsigned int ioprio;
-	unsigned int last_was_sync;
+	unsigned int iodepth;
+	unsigned int iodepth_low;
+	unsigned int iodepth_batch;
+
+	unsigned long long size;
+	unsigned long long file_size_low;
+	unsigned long long file_size_high;
+	unsigned long long start_offset;
+
+	unsigned int bs[2];
+	unsigned int min_bs[2];
+	unsigned int max_bs[2];
+
+	unsigned int nr_files;
+	unsigned int open_files;
 
 	unsigned int odirect;
 	unsigned int invalidate_cache;
@@ -361,9 +352,6 @@
 	unsigned int bs_unaligned;
 	unsigned int fsync_on_close;
 
-	unsigned int bs[2];
-	unsigned int min_bs[2];
-	unsigned int max_bs[2];
 	unsigned int hugepage_size;
 	unsigned int rw_min_bs;
 	unsigned int thinktime;
@@ -378,13 +366,9 @@
 	unsigned long long zone_size;
 	unsigned long long zone_skip;
 	enum fio_memtype mem_type;
-	char *mmapfile;
-	int mmapfd;
+
 	unsigned int stonewall;
 	unsigned int numjobs;
-	unsigned int iodepth;
-	unsigned int iodepth_low;
-	unsigned int iodepth_batch;
 	os_cpu_mask_t cpumask;
 	unsigned int iolog;
 	unsigned int read_iolog;
@@ -394,16 +378,65 @@
 	unsigned int nice;
 	unsigned int file_service_type;
 	unsigned int group_reporting;
-	unsigned int open_files;
-	char *opendir;
 
 	char *read_iolog_file;
 	char *write_iolog_file;
+
+	/*
+	 * Pre-run and post-run shell
+	 */
+	char *exec_prerun;
+	char *exec_postrun;
+
+	unsigned int rate;
+	unsigned int ratemin;
+	unsigned int ratecycle;
+	unsigned int rate_iops;
+	unsigned int rate_iops_min;
+
+	char *ioscheduler;
+
+	/*
+	 * CPU "io" cycle burner
+	 */
+	unsigned int cpuload;
+	unsigned int cpucycle;
+};
+
+/*
+ * This describes a single thread/process executing a fio job.
+ */
+struct thread_data {
+	struct thread_options o;
+	char verror[128];
+	pthread_t thread;
+	int thread_number;
+	int groupid;
+	struct thread_stat ts;
+	struct fio_file *files;
+	unsigned int files_index;
+	unsigned int nr_open_files;
+	unsigned int nr_normal_files;
+	union {
+		unsigned int next_file;
+		os_random_state_t next_file_state;
+	};
+	int error;
+	pid_t pid;
+	char *orig_buffer;
+	size_t orig_buffer_size;
+	volatile int terminate;
+	volatile int runstate;
+	unsigned int ioprio;
+	unsigned int last_was_sync;
+
+	char *mmapfile;
+	int mmapfd;
+
 	void *iolog_buf;
 	FILE *iolog_f;
 
 	char *sysfs_root;
-	char *ioscheduler;
 
 	os_random_state_t bsrange_state;
 	os_random_state_t verify_state;
@@ -428,11 +461,6 @@
 	/*
 	 * Rate state
 	 */
-	unsigned int rate;
-	unsigned int ratemin;
-	unsigned int ratecycle;
-	unsigned int rate_iops;
-	unsigned int rate_iops_min;
 	unsigned long rate_usec_cycle;
 	long rate_pending_usleep;
 	unsigned long rate_bytes;
@@ -440,8 +468,6 @@
 	struct timeval lastrate;
 
 	unsigned long long io_size;
-	unsigned long long total_file_size;
-	unsigned long long start_offset;
 	unsigned long long total_io_size;
 
 	unsigned long io_issues[2];
@@ -456,12 +482,6 @@
 	 */
 	os_random_state_t random_state;
 
-	/*
-	 * CPU "io" cycle burner
-	 */
-	unsigned int cpuload;
-	unsigned int cpucycle;
-
 	struct timeval start;	/* start of this loop */
 	struct timeval epoch;	/* time job was started */
 
@@ -473,12 +493,6 @@
 	enum fio_ddir rwmix_ddir;
 
 	/*
-	 * Pre-run and post-run shell
-	 */
-	char *exec_prerun;
-	char *exec_postrun;
-
-	/*
 	 * IO historic logs
 	 */
 	struct list_head io_hist_list;
@@ -501,8 +515,6 @@
 	 * For generating file sizes
 	 */
 	os_random_state_t file_size_state;
-	unsigned long long file_size_low;
-	unsigned long long file_size_high;
 };
 
 /*
@@ -549,13 +561,13 @@
 
 extern struct thread_data *threads;
 
-#define td_read(td)		((td)->td_ddir & TD_DDIR_READ)
-#define td_write(td)		((td)->td_ddir & TD_DDIR_WRITE)
-#define td_rw(td)		(((td)->td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
-#define td_random(td)		((td)->td_ddir & TD_DDIR_RAND)
+#define td_read(td)		((td)->o.td_ddir & TD_DDIR_READ)
+#define td_write(td)		((td)->o.td_ddir & TD_DDIR_WRITE)
+#define td_rw(td)		(((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
+#define td_random(td)		((td)->o.td_ddir & TD_DDIR_RAND)
 
 #define BLOCKS_PER_MAP		(8 * sizeof(long))
-#define TO_MAP_BLOCK(td, f, b)	((b) - ((f)->file_offset / (td)->rw_min_bs))
+#define TO_MAP_BLOCK(td, f, b)	((b) - ((f)->file_offset / (td)->o.rw_min_bs))
 #define RAND_MAP_IDX(td, f, b)	(TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
 #define RAND_MAP_BIT(td, f, b)	(TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
 
@@ -565,9 +577,9 @@
 {
 	if (td->last_was_sync)
 		return 0;
-	if (td->odirect)
+	if (td->o.odirect)
 		return 0;
-	if (td_write(td) || td_rw(td) || td->override_sync)
+	if (td_write(td) || td_rw(td) || td->o.override_sync)
 		return 1;
 
 	return 0;
@@ -798,7 +810,7 @@
 #define for_each_td(td, i)	\
 	for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
 #define for_each_file(td, f, i)	\
-	for ((i) = 0, (f) = &(td)->files[0]; (i) < (td)->nr_files; (i)++, (f)++)
+	for ((i) = 0, (f) = &(td)->files[0]; (i) < (td)->o.nr_files; (i)++, (f)++)
 
 #define fio_assert(td, cond)	do {	\
 	if (!(cond)) {			\
diff --git a/init.c b/init.c
index b044e28..e38d677 100644
--- a/init.c
+++ b/init.c
@@ -137,21 +137,21 @@
 	unsigned long long rate;
 	unsigned int bs;
 
-	if (!td->rate && !td->rate_iops)
+	if (!td->o.rate && !td->o.rate_iops)
 		return 0;
 
 	if (td_rw(td))
-		bs = td->rw_min_bs;
+		bs = td->o.rw_min_bs;
 	else if (td_read(td))
-		bs = td->min_bs[DDIR_READ];
+		bs = td->o.min_bs[DDIR_READ];
 	else
-		bs = td->min_bs[DDIR_WRITE];
+		bs = td->o.min_bs[DDIR_WRITE];
 
-	if (td->rate) {
-		rate = td->rate;
+	if (td->o.rate) {
+		rate = td->o.rate;
 		nr_reads_per_msec = (rate * 1024 * 1000LL) / bs;
 	} else
-		nr_reads_per_msec = td->rate_iops * 1000UL;
+		nr_reads_per_msec = td->o.rate_iops * 1000UL;
 
 	if (!nr_reads_per_msec) {
 		log_err("rate lower than supported\n");
@@ -169,96 +169,98 @@
  */
 static int fixup_options(struct thread_data *td)
 {
-	if (!td->rwmixread && td->rwmixwrite)
-		td->rwmixread = 100 - td->rwmixwrite;
+	struct thread_options *o = &td->o;
 
-	if (td->write_iolog_file && td->read_iolog_file) {
+	if (!o->rwmixread && o->rwmixwrite)
+		o->rwmixread = 100 - o->rwmixwrite;
+
+	if (o->write_iolog_file && o->read_iolog_file) {
 		log_err("fio: read iolog overrides write_iolog\n");
-		free(td->write_iolog_file);
-		td->write_iolog_file = NULL;
+		free(o->write_iolog_file);
+		o->write_iolog_file = NULL;
 	}
 
 	if (td->io_ops->flags & FIO_SYNCIO)
-		td->iodepth = 1;
+		o->iodepth = 1;
 	else {
-		if (!td->iodepth)
-			td->iodepth = td->open_files;
+		if (!o->iodepth)
+			o->iodepth = o->open_files;
 	}
 
 	/*
 	 * only really works for sequential io for now, and with 1 file
 	 */
-	if (td->zone_size && td_random(td) && td->open_files == 1)
-		td->zone_size = 0;
+	if (o->zone_size && td_random(td) && o->open_files == 1)
+		o->zone_size = 0;
 
 	/*
 	 * Reads can do overwrites, we always need to pre-create the file
 	 */
 	if (td_read(td) || td_rw(td))
-		td->overwrite = 1;
+		o->overwrite = 1;
 
-	if (!td->min_bs[DDIR_READ])
-		td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
-	if (!td->max_bs[DDIR_READ])
-		td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
-	if (!td->min_bs[DDIR_WRITE])
-		td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
-	if (!td->max_bs[DDIR_WRITE])
-		td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
+	if (!o->min_bs[DDIR_READ])
+		o->min_bs[DDIR_READ]= o->bs[DDIR_READ];
+	if (!o->max_bs[DDIR_READ])
+		o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
+	if (!o->min_bs[DDIR_WRITE])
+		o->min_bs[DDIR_WRITE]= o->bs[DDIR_WRITE];
+	if (!o->max_bs[DDIR_WRITE])
+		o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
 
-	td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
+	o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
 
-	if (!td->file_size_high)
-		td->file_size_high = td->file_size_low;
+	if (!o->file_size_high)
+		o->file_size_high = o->file_size_low;
 
 	if (td_read(td) && !td_rw(td))
-		td->verify = 0;
+		o->verify = 0;
 
-	if (td->norandommap && td->verify != VERIFY_NONE) {
+	if (o->norandommap && o->verify != VERIFY_NONE) {
 		log_err("fio: norandommap given, verify disabled\n");
-		td->verify = VERIFY_NONE;
+		o->verify = VERIFY_NONE;
 	}
-	if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
+	if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
 		log_err("fio: bs_unaligned may not work with raw io\n");
 
 	/*
 	 * thinktime_spin must be less than thinktime
 	 */
-	if (td->thinktime_spin > td->thinktime)
-		td->thinktime_spin = td->thinktime;
+	if (o->thinktime_spin > o->thinktime)
+		o->thinktime_spin = o->thinktime;
 
 	/*
 	 * The low water mark cannot be bigger than the iodepth
 	 */
-	if (td->iodepth_low > td->iodepth || !td->iodepth_low) {
+	if (o->iodepth_low > o->iodepth || !o->iodepth_low) {
 		/*
 		 * syslet work around - if the workload is sequential,
 		 * we want to let the queue drain all the way down to
 		 * avoid seeking between async threads
 		 */
 		if (!strcmp(td->io_ops->name, "syslet-rw") && !td_random(td))
-			td->iodepth_low = 1;
+			o->iodepth_low = 1;
 		else
-			td->iodepth_low = td->iodepth;
+			o->iodepth_low = o->iodepth;
 	}
 
 	/*
 	 * If batch number isn't set, default to the same as iodepth
 	 */
-	if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
-		td->iodepth_batch = td->iodepth;
+	if (o->iodepth_batch > o->iodepth || !o->iodepth_batch)
+		o->iodepth_batch = o->iodepth;
 
-	if (td->nr_files > td->files_index)
-		td->nr_files = td->files_index;
+	if (o->nr_files > td->files_index)
+		o->nr_files = td->files_index;
 
-	if (td->open_files > td->nr_files || !td->open_files)
-		td->open_files = td->nr_files;
+	if (o->open_files > o->nr_files || !o->open_files)
+		o->open_files = o->nr_files;
 
-	if ((td->rate && td->rate_iops) || (td->ratemin && td->rate_iops_min)) {
+	if ((o->rate && o->rate_iops) || (o->ratemin && o->rate_iops_min)) {
 		log_err("fio: rate and rate_iops are mutually exclusive\n");
 		return 1;
 	}
-	if ((td->rate < td->ratemin) || (td->rate_iops < td->rate_iops_min)) {
+	if ((o->rate < o->ratemin) || (o->rate_iops < o->rate_iops_min)) {
 		log_err("fio: minimum rate exceeds rate\n");
 		return 1;
 	}
@@ -343,7 +345,7 @@
 	os_random_seed(seeds[1], &td->verify_state);
 	os_random_seed(seeds[2], &td->rwmix_state);
 
-	if (td->file_service_type == FIO_FSERVICE_RANDOM)
+	if (td->o.file_service_type == FIO_FSERVICE_RANDOM)
 		os_random_seed(seeds[3], &td->next_file_state);
 
 	os_random_seed(seeds[5], &td->file_size_state);
@@ -351,12 +353,12 @@
 	if (!td_random(td))
 		return 0;
 
-	if (td->rand_repeatable)
+	if (td->o.rand_repeatable)
 		seeds[4] = FIO_RANDSEED * td->thread_number;
 
-	if (!td->norandommap) {
+	if (!td->o.norandommap) {
 		for_each_file(td, f, i) {
-			blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
+			blocks = (f->real_file_size + td->o.rw_min_bs - 1) / td->o.rw_min_bs;
 			num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
 			f->file_map = malloc(num_maps * sizeof(long));
 			if (!f->file_map) {
@@ -394,29 +396,29 @@
 	if (td == &def_thread)
 		return 0;
 
-	engine = get_engine_name(td->ioengine);
+	engine = get_engine_name(td->o.ioengine);
 	td->io_ops = load_ioengine(td, engine);
 	if (!td->io_ops) {
 		log_err("fio: failed to load engine %s\n", engine);
 		goto err;
 	}
 
-	if (td->use_thread)
+	if (td->o.use_thread)
 		nr_thread++;
 	else
 		nr_process++;
 
-	if (td->odirect)
+	if (td->o.odirect)
 		td->io_ops->flags |= FIO_RAWIO;
 
 	file_alloced = 0;
-	if (!td->filename && !td->files_index) {
+	if (!td->o.filename && !td->files_index) {
 		file_alloced = 1;
 
-		if (td->nr_files == 1 && exists_and_not_file(jobname))
+		if (td->o.nr_files == 1 && exists_and_not_file(jobname))
 			add_file(td, jobname);
 		else {
-			for (i = 0; i < td->nr_files; i++) {
+			for (i = 0; i < td->o.nr_files; i++) {
 				sprintf(fname, "%s.%d.%d", jobname, td->thread_number, i);
 				add_file(td, fname);
 			}
@@ -427,8 +429,8 @@
 		goto err;
 
 	for_each_file(td, f, i) {
-		if (td->directory && f->filetype == FIO_TYPE_FILE) {
-			sprintf(fname, "%s/%s", td->directory, f->file_name);
+		if (td->o.directory && f->filetype == FIO_TYPE_FILE) {
+			sprintf(fname, "%s/%s", td->o.directory, f->file_name);
 			f->file_name = strdup(fname);
 		}
 	}
@@ -439,7 +441,7 @@
 	td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
 	td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
 
-	if ((td->stonewall || td->numjobs > 1) && prev_group_jobs) {
+	if ((td->o.stonewall || td->o.numjobs > 1) && prev_group_jobs) {
 		prev_group_jobs = 0;
 		groupid++;
 	}
@@ -453,29 +455,29 @@
 	if (setup_rate(td))
 		goto err;
 
-	if (td->write_lat_log) {
+	if (td->o.write_lat_log) {
 		setup_log(&td->ts.slat_log);
 		setup_log(&td->ts.clat_log);
 	}
-	if (td->write_bw_log)
+	if (td->o.write_bw_log)
 		setup_log(&td->ts.bw_log);
 
-	if (!td->name)
-		td->name = strdup(jobname);
+	if (!td->o.name)
+		td->o.name = strdup(jobname);
 
 	if (!terse_output) {
 		if (!job_add_num) {
 			if (!strcmp(td->io_ops->name, "cpuio"))
-				log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
+				log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->o.name, td->o.cpuload, td->o.cpucycle);
 			else {
 				char *c1, *c2, *c3, *c4;
 
-				c1 = to_kmg(td->min_bs[DDIR_READ]);
-				c2 = to_kmg(td->max_bs[DDIR_READ]);
-				c3 = to_kmg(td->min_bs[DDIR_WRITE]);
-				c4 = to_kmg(td->max_bs[DDIR_WRITE]);
+				c1 = to_kmg(td->o.min_bs[DDIR_READ]);
+				c2 = to_kmg(td->o.max_bs[DDIR_READ]);
+				c3 = to_kmg(td->o.min_bs[DDIR_WRITE]);
+				c4 = to_kmg(td->o.max_bs[DDIR_WRITE]);
 
-				log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[td->td_ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
+				log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->o.name, td->groupid, ddir_str[td->o.td_ddir], c1, c2, c3, c4, td->io_ops->name, td->o.iodepth);
 
 				free(c1);
 				free(c2);
@@ -490,18 +492,18 @@
 	 * recurse add identical jobs, clear numjobs and stonewall options
 	 * as they don't apply to sub-jobs
 	 */
-	numjobs = td->numjobs;
+	numjobs = td->o.numjobs;
 	while (--numjobs) {
 		struct thread_data *td_new = get_new_job(0, td);
 
 		if (!td_new)
 			goto err;
 
-		td_new->numjobs = 1;
-		td_new->stonewall = 0;
+		td_new->o.numjobs = 1;
+		td_new->o.stonewall = 0;
 
 		if (file_alloced) {
-			td_new->filename = NULL;
+			td_new->o.filename = NULL;
 			td_new->files_index = 0;
 			td_new->files = NULL;
 		}
@@ -512,7 +514,7 @@
 			goto err;
 	}
 
-	if (td->numjobs > 1) {
+	if (td->o.numjobs > 1) {
 		groupid++;
 		prev_group_jobs = 0;
 	}
@@ -586,7 +588,7 @@
 		 * Seperate multiple job files by a stonewall
 		 */
 		if (!global && stonewall) {
-			td->stonewall = stonewall;
+			td->o.stonewall = stonewall;
 			stonewall = 0;
 		}
 
@@ -631,7 +633,7 @@
 {
 	memset(&def_thread, 0, sizeof(def_thread));
 
-	if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
+	if (fio_getaffinity(getpid(), &def_thread.o.cpumask) == -1) {
 		perror("sched_getaffinity");
 		return 1;
 	}
@@ -641,12 +643,12 @@
 	 */
 	fio_fill_default_options(&def_thread);
 
-	def_thread.timeout = def_timeout;
-	def_thread.write_bw_log = write_bw_log;
-	def_thread.write_lat_log = write_lat_log;
+	def_thread.o.timeout = def_timeout;
+	def_thread.o.write_bw_log = write_bw_log;
+	def_thread.o.write_lat_log = write_lat_log;
 
 #ifdef FIO_HAVE_DISK_UTIL
-	def_thread.do_disk_util = 1;
+	def_thread.o.do_disk_util = 1;
 #endif
 
 	return 0;
@@ -754,7 +756,7 @@
 			char *val = optarg;
 
 			if (!strncmp(opt, "name", 4) && td) {
-				ret = add_job(td, td->name ?: "fio", 0);
+				ret = add_job(td, td->o.name ?: "fio", 0);
 				if (ret) {
 					put_job(td);
 					return 0;
@@ -783,7 +785,7 @@
 		if (dont_add_job)
 			put_job(td);
 		else {
-			ret = add_job(td, td->name ?: "fio", 0);
+			ret = add_job(td, td->o.name ?: "fio", 0);
 			if (ret)
 				put_job(td);
 		}
diff --git a/io_u.c b/io_u.c
index c788656..6477513 100644
--- a/io_u.c
+++ b/io_u.c
@@ -39,7 +39,7 @@
  */
 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
 {
-	unsigned int min_bs = td->rw_min_bs;
+	unsigned int min_bs = td->o.rw_min_bs;
 	struct fio_file *f = io_u->file;
 	unsigned long long block;
 	unsigned int blocks;
@@ -79,7 +79,7 @@
 
 	i = f->last_free_lookup;
 	*b = (i * BLOCKS_PER_MAP);
-	while ((*b) * td->rw_min_bs < f->real_file_size) {
+	while ((*b) * td->o.rw_min_bs < f->real_file_size) {
 		if (f->file_map[i] != -1UL) {
 			*b += ffz(f->file_map[i]);
 			f->last_free_lookup = i;
@@ -106,7 +106,7 @@
 	long r;
 
 	if (td_random(td)) {
-		unsigned long long max_blocks = f->file_size / td->min_bs[ddir];
+		unsigned long long max_blocks = f->file_size / td->o.min_bs[ddir];
 		int loops = 5;
 
 		do {
@@ -115,9 +115,9 @@
 				b = 0;
 			else
 				b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
-			if (td->norandommap)
+			if (td->o.norandommap)
 				break;
-			rb = b + (f->file_offset / td->min_bs[ddir]);
+			rb = b + (f->file_offset / td->o.min_bs[ddir]);
 			loops--;
 		} while (!random_map_free(td, f, rb) && loops);
 
@@ -128,9 +128,9 @@
 		if (!loops && get_next_free_block(td, f, &b))
 			return 1;
 	} else
-		b = f->last_pos / td->min_bs[ddir];
+		b = f->last_pos / td->o.min_bs[ddir];
 
-	io_u->offset = (b * td->min_bs[ddir]) + f->file_offset;
+	io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset;
 	if (io_u->offset >= f->real_file_size)
 		return 1;
 
@@ -144,18 +144,18 @@
 	unsigned int buflen;
 	long r;
 
-	if (td->min_bs[ddir] == td->max_bs[ddir])
-		buflen = td->min_bs[ddir];
+	if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+		buflen = td->o.min_bs[ddir];
 	else {
 		r = os_random_long(&td->bsrange_state);
-		buflen = (unsigned int) (1 + (double) (td->max_bs[ddir] - 1) * r / (RAND_MAX + 1.0));
-		if (!td->bs_unaligned)
-			buflen = (buflen + td->min_bs[ddir] - 1) & ~(td->min_bs[ddir] - 1);
+		buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) * r / (RAND_MAX + 1.0));
+		if (!td->o.bs_unaligned)
+			buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1);
 	}
 
 	while (buflen + io_u->offset > f->real_file_size) {
-		if (buflen == td->min_bs[ddir]) {
-			if (!td->odirect) {
+		if (buflen == td->o.min_bs[ddir]) {
+			if (!td->o.odirect) {
 				assert(io_u->offset <= f->real_file_size);
 				buflen = f->real_file_size - io_u->offset;
 				return buflen;
@@ -163,7 +163,7 @@
 			return 0;
 		}
 
-		buflen = td->min_bs[ddir];
+		buflen = td->o.min_bs[ddir];
 	}
 
 	return buflen;
@@ -186,13 +186,13 @@
 		/*
 		 * Check if it's time to seed a new data direction.
 		 */
-		if (elapsed >= td->rwmixcycle) {
+		if (elapsed >= td->o.rwmixcycle) {
 			unsigned int v;
 			long r;
 
 			r = os_random_long(&td->rwmix_state);
 			v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
-			if (v < td->rwmixread)
+			if (v < td->o.rwmixread)
 				td->rwmix_ddir = DDIR_READ;
 			else
 				td->rwmix_ddir = DDIR_WRITE;
@@ -234,14 +234,15 @@
 	/*
 	 * If using an iolog, grab next piece if any available.
 	 */
-	if (td->read_iolog)
+	if (td->o.read_iolog)
 		return read_iolog_get(td, io_u);
 
 	/*
 	 * see if it's time to sync
 	 */
-	if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks)
-	    && td->io_issues[DDIR_WRITE] && should_fsync(td)) {
+	if (td->o.fsync_blocks &&
+	   !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
+	     td->io_issues[DDIR_WRITE] && should_fsync(td)) {
 		io_u->ddir = DDIR_SYNC;
 		return 0;
 	}
@@ -262,13 +263,13 @@
 	/*
 	 * mark entry before potentially trimming io_u
 	 */
-	if (!td->read_iolog && td_random(td) && !td->norandommap)
+	if (!td->o.read_iolog && td_random(td) && !td->o.norandommap)
 		mark_random_map(td, io_u);
 
 	/*
 	 * If using a write iolog, store this entry.
 	 */
-	if (td->write_iolog_file)
+	if (td->o.write_iolog_file)
 		write_iolog_put(td, io_u);
 
 	return 0;
@@ -348,7 +349,7 @@
 	do {
 		long r = os_random_long(&td->next_file_state);
 
-		fno = (unsigned int) ((double) td->nr_files * (r / (RAND_MAX + 1.0)));
+		fno = (unsigned int) ((double) td->o.nr_files * (r / (RAND_MAX + 1.0)));
 		f = &td->files[fno];
 
 		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
@@ -369,7 +370,7 @@
 		f = &td->files[td->next_file];
 
 		td->next_file++;
-		if (td->next_file >= td->nr_files)
+		if (td->next_file >= td->o.nr_files)
 			td->next_file = 0;
 
 		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
@@ -385,7 +386,7 @@
 {
 	struct fio_file *f;
 
-	assert(td->nr_files <= td->files_index);
+	assert(td->o.nr_files <= td->files_index);
 
 	if (!td->nr_open_files)
 		return NULL;
@@ -394,7 +395,7 @@
 	if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--)
 		return f;
 
-	if (td->file_service_type == FIO_FSERVICE_RR)
+	if (td->o.file_service_type == FIO_FSERVICE_RR)
 		f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
 	else
 		f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
@@ -408,7 +409,7 @@
 {
 	struct fio_file *f;
 
-	if (td->file_service_type == FIO_FSERVICE_RR)
+	if (td->o.file_service_type == FIO_FSERVICE_RR)
 		f = get_next_file_rr(td, 0, FIO_FILE_OPEN);
 	else
 		f = get_next_file_rand(td, 0, FIO_FILE_OPEN);
@@ -487,8 +488,8 @@
 		 * probably not the right place to do this, but see
 		 * if we need to open a new file
 		 */
-		if (td->nr_open_files < td->open_files &&
-		    td->open_files != td->nr_files) {
+		if (td->nr_open_files < td->o.open_files &&
+		    td->o.open_files != td->o.nr_files) {
 			f = find_next_new_file(td);
 
 			if (!f || (ret = td_io_open_file(td, f))) {
@@ -499,9 +500,9 @@
 		}
 	} while (1);
 
-	if (td->zone_bytes >= td->zone_size) {
+	if (td->zone_bytes >= td->o.zone_size) {
 		td->zone_bytes = 0;
-		f->last_pos += td->zone_skip;
+		f->last_pos += td->o.zone_skip;
 	}
 
 	if (io_u->buflen + io_u->offset > f->real_file_size) {
@@ -521,7 +522,7 @@
 
 		f->last_pos = io_u->offset + io_u->buflen;
 
-		if (td->verify != VERIFY_NONE)
+		if (td->o.verify != VERIFY_NONE)
 			populate_verify_io_u(td, io_u);
 	}
 
diff --git a/ioengines.c b/ioengines.c
index b18bc9a..0e5ea62 100644
--- a/ioengines.c
+++ b/ioengines.c
@@ -215,7 +215,7 @@
 		int r;
 
 		td->io_u_queued++;
-		if (td->io_u_queued > td->iodepth_batch) {
+		if (td->io_u_queued > td->o.iodepth_batch) {
 			r = td_io_commit(td);
 			if (r < 0)
 				return r;
diff --git a/log.c b/log.c
index 298716b..dbca3cc 100644
--- a/log.c
+++ b/log.c
@@ -56,7 +56,7 @@
 	 * be laid out with the block scattered as written. it's faster to
 	 * read them in in that order again, so don't sort
 	 */
-	if (!td_random(td) || !td->overwrite) {
+	if (!td_random(td) || !td->o.overwrite) {
 		list_add_tail(&ipo->list, &td->io_hist_list);
 		return;
 	}
@@ -93,7 +93,7 @@
 	FILE *f;
 	int rw, reads, writes;
 
-	f = fopen(td->read_iolog_file, "r");
+	f = fopen(td->o.read_iolog_file, "r");
 	if (!f) {
 		perror("fopen read iolog");
 		return 1;
@@ -126,8 +126,8 @@
 		ipo->offset = offset;
 		ipo->len = bytes;
 		ipo->ddir = (enum fio_ddir) rw;
-		if (bytes > td->max_bs[rw])
-			td->max_bs[rw] = bytes;
+		if (bytes > td->o.max_bs[rw])
+			td->o.max_bs[rw] = bytes;
 		list_add_tail(&ipo->list, &td->io_log_list);
 	}
 
@@ -137,11 +137,11 @@
 	if (!reads && !writes)
 		return 1;
 	else if (reads && !writes)
-		td->td_ddir = TD_DDIR_READ;
+		td->o.td_ddir = TD_DDIR_READ;
 	else if (!reads && writes)
-		td->td_ddir = TD_DDIR_READ;
+		td->o.td_ddir = TD_DDIR_READ;
 	else
-		td->td_ddir = TD_DDIR_RW;
+		td->o.td_ddir = TD_DDIR_RW;
 
 	return 0;
 }
@@ -153,7 +153,7 @@
 {
 	FILE *f;
 
-	f = fopen(td->write_iolog_file, "w+");
+	f = fopen(td->o.write_iolog_file, "w+");
 	if (!f) {
 		perror("fopen write iolog");
 		return 1;
@@ -175,9 +175,9 @@
 	if (td->io_ops->flags & FIO_DISKLESSIO)
 		return 0;
 
-	if (td->read_iolog_file)
+	if (td->o.read_iolog_file)
 		ret = init_iolog_read(td);
-	else if (td->write_iolog_file)
+	else if (td->o.write_iolog_file)
 		ret = init_iolog_write(td);
 
 	return ret;
diff --git a/memory.c b/memory.c
index 9df2321..f108e47 100644
--- a/memory.c
+++ b/memory.c
@@ -59,12 +59,12 @@
  */
 int allocate_io_mem(struct thread_data *td)
 {
-	if (td->mem_type == MEM_MALLOC)
+	if (td->o.mem_type == MEM_MALLOC)
 		td->orig_buffer = malloc(td->orig_buffer_size);
-	else if (td->mem_type == MEM_SHM || td->mem_type == MEM_SHMHUGE) {
+	else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) {
 		int flags = IPC_CREAT | SHM_R | SHM_W;
 
-		if (td->mem_type == MEM_SHMHUGE)
+		if (td->o.mem_type == MEM_SHMHUGE)
 			flags |= SHM_HUGETLB;
 
 		td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, flags);
@@ -81,7 +81,8 @@
 			td->orig_buffer = NULL;
 			return 1;
 		}
-	} else if (td->mem_type == MEM_MMAP || td->mem_type == MEM_MMAPHUGE) {
+	} else if (td->o.mem_type == MEM_MMAP ||
+		   td->o.mem_type == MEM_MMAPHUGE) {
 		int flags = MAP_PRIVATE;
 
 		td->mmapfd = 0;
@@ -117,14 +118,15 @@
 
 void free_io_mem(struct thread_data *td)
 {
-	if (td->mem_type == MEM_MALLOC)
+	if (td->o.mem_type == MEM_MALLOC)
 		free(td->orig_buffer);
-	else if (td->mem_type == MEM_SHM || td->mem_type == MEM_SHMHUGE) {
+	else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) {
 		struct shmid_ds sbuf;
 
 		shmdt(td->orig_buffer);
 		shmctl(td->shm_id, IPC_RMID, &sbuf);
-	} else if (td->mem_type == MEM_MMAP || td->mem_type == MEM_MMAPHUGE) {
+	} else if (td->o.mem_type == MEM_MMAP ||
+		   td->o.mem_type == MEM_MMAPHUGE) {
 		munmap(td->orig_buffer, td->orig_buffer_size);
 		if (td->mmapfile) {
 			close(td->mmapfd);
@@ -132,7 +134,7 @@
 			free(td->mmapfile);
 		}
 	} else
-		log_err("Bad memory type %u\n", td->mem_type);
+		log_err("Bad memory type %u\n", td->o.mem_type);
 
 	td->orig_buffer = NULL;
 }
diff --git a/options.c b/options.c
index 81c864d..e7356f6 100644
--- a/options.c
+++ b/options.c
@@ -9,7 +9,7 @@
 #include "fio.h"
 #include "parse.h"
 
-#define td_var_offset(var)	((size_t) &((struct thread_data *)0)->var)
+#define td_var_offset(var)	((size_t) &((struct thread_options *)0)->var)
 
 /*
  * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
@@ -31,9 +31,9 @@
 {
 	struct thread_data *td = data;
 
-	if (td->mem_type == MEM_MMAPHUGE || td->mem_type == MEM_MMAP) {
+	if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP) {
 		td->mmapfile = get_opt_postfix(mem);
-		if (td->mem_type == MEM_MMAPHUGE && !td->mmapfile) {
+		if (td->o.mem_type == MEM_MMAPHUGE && !td->mmapfile) {
 			log_err("fio: mmaphuge:/path/to/file\n");
 			return 1;
 		}
@@ -90,7 +90,7 @@
 {
 	struct thread_data *td = data;
 
-	fill_cpu_mask(td->cpumask, *val);
+	fill_cpu_mask(td->o.cpumask, *val);
 	return 0;
 }
 
@@ -117,13 +117,13 @@
 	strip_blank_end(str);
 
 	if (!td->files_index)
-		td->nr_files = 0;
+		td->o.nr_files = 0;
 
 	while ((fname = strsep(&str, ":")) != NULL) {
 		if (!strlen(fname))
 			break;
 		add_file(td, fname);
-		td->nr_files++;
+		td->o.nr_files++;
 	}
 
 	free(p);
@@ -135,13 +135,13 @@
 	struct thread_data *td = data;
 	struct stat sb;
 
-	if (lstat(td->directory, &sb) < 0) {
-		log_err("fio: %s is not a directory\n", td->directory);
+	if (lstat(td->o.directory, &sb) < 0) {
+		log_err("fio: %s is not a directory\n", td->o.directory);
 		td_verror(td, errno, "lstat");
 		return 1;
 	}
 	if (!S_ISDIR(sb.st_mode)) {
-		log_err("fio: %s is not a directory\n", td->directory);
+		log_err("fio: %s is not a directory\n", td->o.directory);
 		return 1;
 	}
 
@@ -153,9 +153,9 @@
 	struct thread_data *td = data;
 
 	if (!td->files_index)
-		td->nr_files = 0;
+		td->o.nr_files = 0;
 
-	return add_dir_files(td, td->opendir);
+	return add_dir_files(td, td->o.opendir);
 }
 
 
@@ -307,7 +307,7 @@
 	{
 		.name	= "size",
 		.type	= FIO_OPT_STR_VAL,
-		.off1	= td_var_offset(total_file_size),
+		.off1	= td_var_offset(size),
 		.help	= "Total size of device or files",
 	},
 	{
diff --git a/os-linux.h b/os-linux.h
index d3b2e93..1ed3d3b 100644
--- a/os-linux.h
+++ b/os-linux.h
@@ -32,7 +32,7 @@
 	posix_fadvise((fd), (off_t)(off), (len), (advice))
 
 #define fio_setaffinity(td)		\
-	sched_setaffinity((td)->pid, sizeof((td)->cpumask), &(td)->cpumask)
+	sched_setaffinity((td)->pid, sizeof((td)->o.cpumask), &(td)->o.cpumask)
 #define fio_getaffinity(pid, ptr)	\
 	sched_getaffinity((pid), sizeof(cpu_set_t), (ptr))
 
diff --git a/stat.c b/stat.c
index dda2687..4fe0dbd 100644
--- a/stat.c
+++ b/stat.c
@@ -302,7 +302,7 @@
 		sprintf(foo, "%s", tmp);
 	}
 
-	if (td->ioscheduler && !td->sysfs_root)
+	if (td->o.ioscheduler && !td->sysfs_root)
 		td->sysfs_root = strdup(foo);
 
 	disk_util_add(dev, foo);
@@ -313,7 +313,7 @@
 	struct fio_file *f;
 	unsigned int i;
 
-	if (!td->do_disk_util ||
+	if (!td->o.do_disk_util ||
 	    (td->io_ops->flags & (FIO_DISKLESSIO | FIO_NODISKUTIL)))
 		return;
 
@@ -652,7 +652,7 @@
 	nr_ts = 0;
 	last_ts = -1;
 	for_each_td(td, i) {
-		if (!td->group_reporting) {
+		if (!td->o.group_reporting) {
 			nr_ts++;
 			continue;
 		}
@@ -681,8 +681,8 @@
 	last_ts = -1;
 	idx = 0;
 	for_each_td(td, i) {
-		if (idx && (!td->group_reporting ||
-		    (td->group_reporting && last_ts != td->groupid))) {
+		if (idx && (!td->o.group_reporting ||
+		    (td->o.group_reporting && last_ts != td->groupid))) {
 			idx = 0;
 			j++;
 		}
@@ -698,8 +698,8 @@
 			/*
 			 * These are per-group shared already
 			 */
-			ts->name = td->name;
-			ts->description = td->description;
+			ts->name = td->o.name;
+			ts->description = td->o.description;
 			ts->groupid = td->groupid;
 
 			/*
@@ -877,7 +877,7 @@
 	unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
 	unsigned long rate;
 
-	if (spent < td->bw_avg_time)
+	if (spent < td->o.bw_avg_time)
 		return;
 
 	rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
diff --git a/time.c b/time.c
index 80f8ca4..108c0fc 100644
--- a/time.c
+++ b/time.c
@@ -114,15 +114,15 @@
 	unsigned long usec_cycle;
 	unsigned int bs;
 
-	if (!td->rate && !td->rate_iops)
+	if (!td->o.rate && !td->o.rate_iops)
 		return;
 
 	if (td_rw(td))
-		bs = td->rw_min_bs;
+		bs = td->o.rw_min_bs;
 	else if (td_read(td))
-		bs = td->min_bs[DDIR_READ];
+		bs = td->o.min_bs[DDIR_READ];
 	else
-		bs = td->min_bs[DDIR_WRITE];
+		bs = td->o.min_bs[DDIR_WRITE];
 
 	usec_cycle = td->rate_usec_cycle * (bytes / bs);
 
diff --git a/verify.c b/verify.c
index a9441b4..f748065 100644
--- a/verify.c
+++ b/verify.c
@@ -131,7 +131,7 @@
 	p += sizeof(hdr);
 	fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
 
-	if (td->verify == VERIFY_MD5) {
+	if (td->o.verify == VERIFY_MD5) {
 		fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
 		hdr.verify_type = VERIFY_MD5;
 	} else {