blob: d394a85aa92e35611787d482848920b0f0dce1c9 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
11
12struct nullb_cmd {
13 struct list_head list;
14 struct llist_node ll_list;
15 struct call_single_data csd;
16 struct request *rq;
17 struct bio *bio;
18 unsigned int tag;
19 struct nullb_queue *nq;
20};
21
22struct nullb_queue {
23 unsigned long *tag_map;
24 wait_queue_head_t wait;
25 unsigned int queue_depth;
26
27 struct nullb_cmd *cmds;
28};
29
30struct nullb {
31 struct list_head list;
32 unsigned int index;
33 struct request_queue *q;
34 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060035 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010036 struct hrtimer timer;
37 unsigned int queue_depth;
38 spinlock_t lock;
39
40 struct nullb_queue *queues;
41 unsigned int nr_queues;
42};
43
44static LIST_HEAD(nullb_list);
45static struct mutex lock;
46static int null_major;
47static int nullb_indexes;
48
49struct completion_queue {
50 struct llist_head list;
51 struct hrtimer timer;
52};
53
54/*
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
57 */
58static DEFINE_PER_CPU(struct completion_queue, completion_queues);
59
60enum {
61 NULL_IRQ_NONE = 0,
62 NULL_IRQ_SOFTIRQ = 1,
63 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080064};
Jens Axboef2298c02013-10-25 11:52:25 +010065
Christoph Hellwigce2c3502014-02-10 03:24:40 -080066enum {
Jens Axboef2298c02013-10-25 11:52:25 +010067 NULL_Q_BIO = 0,
68 NULL_Q_RQ = 1,
69 NULL_Q_MQ = 2,
70};
71
Matias Bjorling2d263a782013-12-18 13:41:43 +010072static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010073module_param(submit_queues, int, S_IRUGO);
74MODULE_PARM_DESC(submit_queues, "Number of submission queues");
75
76static int home_node = NUMA_NO_NODE;
77module_param(home_node, int, S_IRUGO);
78MODULE_PARM_DESC(home_node, "Home node for the device");
79
80static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070081
82static int null_param_store_val(const char *str, int *val, int min, int max)
83{
84 int ret, new_val;
85
86 ret = kstrtoint(str, 10, &new_val);
87 if (ret)
88 return -EINVAL;
89
90 if (new_val < min || new_val > max)
91 return -EINVAL;
92
93 *val = new_val;
94 return 0;
95}
96
97static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
98{
99 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
100}
101
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930102static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700103 .set = null_set_queue_mode,
104 .get = param_get_int,
105};
106
107device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400108MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100109
110static int gb = 250;
111module_param(gb, int, S_IRUGO);
112MODULE_PARM_DESC(gb, "Size in GB");
113
114static int bs = 512;
115module_param(bs, int, S_IRUGO);
116MODULE_PARM_DESC(bs, "Block size (in bytes)");
117
118static int nr_devices = 2;
119module_param(nr_devices, int, S_IRUGO);
120MODULE_PARM_DESC(nr_devices, "Number of devices to register");
121
122static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700123
124static int null_set_irqmode(const char *str, const struct kernel_param *kp)
125{
126 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
127 NULL_IRQ_TIMER);
128}
129
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930130static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700131 .set = null_set_irqmode,
132 .get = param_get_int,
133};
134
135device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100136MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
137
138static int completion_nsec = 10000;
139module_param(completion_nsec, int, S_IRUGO);
140MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
141
142static int hw_queue_depth = 64;
143module_param(hw_queue_depth, int, S_IRUGO);
144MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
145
Matias Bjørling20005242013-12-21 00:11:00 +0100146static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100147module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100148MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100149
150static void put_tag(struct nullb_queue *nq, unsigned int tag)
151{
152 clear_bit_unlock(tag, nq->tag_map);
153
154 if (waitqueue_active(&nq->wait))
155 wake_up(&nq->wait);
156}
157
158static unsigned int get_tag(struct nullb_queue *nq)
159{
160 unsigned int tag;
161
162 do {
163 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
164 if (tag >= nq->queue_depth)
165 return -1U;
166 } while (test_and_set_bit_lock(tag, nq->tag_map));
167
168 return tag;
169}
170
171static void free_cmd(struct nullb_cmd *cmd)
172{
173 put_tag(cmd->nq, cmd->tag);
174}
175
176static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177{
178 struct nullb_cmd *cmd;
179 unsigned int tag;
180
181 tag = get_tag(nq);
182 if (tag != -1U) {
183 cmd = &nq->cmds[tag];
184 cmd->tag = tag;
185 cmd->nq = nq;
186 return cmd;
187 }
188
189 return NULL;
190}
191
192static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
193{
194 struct nullb_cmd *cmd;
195 DEFINE_WAIT(wait);
196
197 cmd = __alloc_cmd(nq);
198 if (cmd || !can_wait)
199 return cmd;
200
201 do {
202 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
203 cmd = __alloc_cmd(nq);
204 if (cmd)
205 break;
206
207 io_schedule();
208 } while (1);
209
210 finish_wait(&nq->wait, &wait);
211 return cmd;
212}
213
214static void end_cmd(struct nullb_cmd *cmd)
215{
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800216 switch (queue_mode) {
217 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700218 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800219 return;
220 case NULL_Q_RQ:
221 INIT_LIST_HEAD(&cmd->rq->queuelist);
222 blk_end_request_all(cmd->rq, 0);
223 break;
224 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200225 bio_endio(cmd->bio);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800226 break;
227 }
Jens Axboef2298c02013-10-25 11:52:25 +0100228
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800229 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100230}
231
232static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
233{
234 struct completion_queue *cq;
235 struct llist_node *entry;
236 struct nullb_cmd *cmd;
237
238 cq = &per_cpu(completion_queues, smp_processor_id());
239
240 while ((entry = llist_del_all(&cq->list)) != NULL) {
Shlomo Pongratzd7790b92014-02-06 18:33:17 +0200241 entry = llist_reverse_order(entry);
Jens Axboef2298c02013-10-25 11:52:25 +0100242 do {
Mike Krinkin21974062015-07-19 09:53:17 +0300243 struct request_queue *q = NULL;
244
Jens Axboef2298c02013-10-25 11:52:25 +0100245 cmd = container_of(entry, struct nullb_cmd, ll_list);
Jens Axboef2298c02013-10-25 11:52:25 +0100246 entry = entry->next;
Mike Krinkin21974062015-07-19 09:53:17 +0300247 if (cmd->rq)
248 q = cmd->rq->q;
Ming Leifc276912014-05-01 15:12:36 +0800249 end_cmd(cmd);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900250
Mike Krinkin21974062015-07-19 09:53:17 +0300251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
252 spin_lock(q->queue_lock);
253 if (blk_queue_stopped(q))
254 blk_start_queue(q);
255 spin_unlock(q->queue_lock);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900256 }
Jens Axboef2298c02013-10-25 11:52:25 +0100257 } while (entry);
258 }
259
260 return HRTIMER_NORESTART;
261}
262
263static void null_cmd_end_timer(struct nullb_cmd *cmd)
264{
265 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
266
267 cmd->ll_list.next = NULL;
268 if (llist_add(&cmd->ll_list, &cq->list)) {
269 ktime_t kt = ktime_set(0, completion_nsec);
270
Akinobu Mita419c21a2015-06-02 08:35:09 +0900271 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
Jens Axboef2298c02013-10-25 11:52:25 +0100272 }
273
274 put_cpu();
275}
276
277static void null_softirq_done_fn(struct request *rq)
278{
Jens Axboed891fa72014-06-16 11:40:25 -0600279 if (queue_mode == NULL_Q_MQ)
280 end_cmd(blk_mq_rq_to_pdu(rq));
281 else
282 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100283}
284
Jens Axboef2298c02013-10-25 11:52:25 +0100285static inline void null_handle_cmd(struct nullb_cmd *cmd)
286{
287 /* Complete IO by inline, softirq or timer */
288 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800289 case NULL_IRQ_SOFTIRQ:
290 switch (queue_mode) {
291 case NULL_Q_MQ:
292 blk_mq_complete_request(cmd->rq);
293 break;
294 case NULL_Q_RQ:
295 blk_complete_request(cmd->rq);
296 break;
297 case NULL_Q_BIO:
298 /*
299 * XXX: no proper submitting cpu information available.
300 */
301 end_cmd(cmd);
302 break;
303 }
304 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100305 case NULL_IRQ_NONE:
306 end_cmd(cmd);
307 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100308 case NULL_IRQ_TIMER:
309 null_cmd_end_timer(cmd);
310 break;
311 }
312}
313
314static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
315{
316 int index = 0;
317
318 if (nullb->nr_queues != 1)
319 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
320
321 return &nullb->queues[index];
322}
323
324static void null_queue_bio(struct request_queue *q, struct bio *bio)
325{
326 struct nullb *nullb = q->queuedata;
327 struct nullb_queue *nq = nullb_to_queue(nullb);
328 struct nullb_cmd *cmd;
329
330 cmd = alloc_cmd(nq, 1);
331 cmd->bio = bio;
332
333 null_handle_cmd(cmd);
334}
335
336static int null_rq_prep_fn(struct request_queue *q, struct request *req)
337{
338 struct nullb *nullb = q->queuedata;
339 struct nullb_queue *nq = nullb_to_queue(nullb);
340 struct nullb_cmd *cmd;
341
342 cmd = alloc_cmd(nq, 0);
343 if (cmd) {
344 cmd->rq = req;
345 req->special = cmd;
346 return BLKPREP_OK;
347 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900348 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100349
350 return BLKPREP_DEFER;
351}
352
353static void null_request_fn(struct request_queue *q)
354{
355 struct request *rq;
356
357 while ((rq = blk_fetch_request(q)) != NULL) {
358 struct nullb_cmd *cmd = rq->special;
359
360 spin_unlock_irq(q->queue_lock);
361 null_handle_cmd(cmd);
362 spin_lock_irq(q->queue_lock);
363 }
364}
365
Jens Axboe74c45052014-10-29 11:14:52 -0600366static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
367 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100368{
Jens Axboe74c45052014-10-29 11:14:52 -0600369 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100370
Jens Axboe74c45052014-10-29 11:14:52 -0600371 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100372 cmd->nq = hctx->driver_data;
373
Jens Axboe74c45052014-10-29 11:14:52 -0600374 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700375
Jens Axboef2298c02013-10-25 11:52:25 +0100376 null_handle_cmd(cmd);
377 return BLK_MQ_RQ_QUEUE_OK;
378}
379
Matias Bjorling2d263a782013-12-18 13:41:43 +0100380static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
381{
382 BUG_ON(!nullb);
383 BUG_ON(!nq);
384
385 init_waitqueue_head(&nq->wait);
386 nq->queue_depth = nullb->queue_depth;
387}
388
Jens Axboef2298c02013-10-25 11:52:25 +0100389static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
390 unsigned int index)
391{
392 struct nullb *nullb = data;
393 struct nullb_queue *nq = &nullb->queues[index];
394
Jens Axboef2298c02013-10-25 11:52:25 +0100395 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100396 null_init_queue(nullb, nq);
397 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100398
399 return 0;
400}
401
402static struct blk_mq_ops null_mq_ops = {
403 .queue_rq = null_queue_rq,
404 .map_queue = blk_mq_map_queue,
405 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800406 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100407};
408
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200409static void cleanup_queue(struct nullb_queue *nq)
410{
411 kfree(nq->tag_map);
412 kfree(nq->cmds);
413}
414
415static void cleanup_queues(struct nullb *nullb)
416{
417 int i;
418
419 for (i = 0; i < nullb->nr_queues; i++)
420 cleanup_queue(&nullb->queues[i]);
421
422 kfree(nullb->queues);
423}
424
Jens Axboef2298c02013-10-25 11:52:25 +0100425static void null_del_dev(struct nullb *nullb)
426{
427 list_del_init(&nullb->list);
428
429 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800430 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600431 if (queue_mode == NULL_Q_MQ)
432 blk_mq_free_tag_set(&nullb->tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +0100433 put_disk(nullb->disk);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200434 cleanup_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100435 kfree(nullb);
436}
437
438static int null_open(struct block_device *bdev, fmode_t mode)
439{
440 return 0;
441}
442
443static void null_release(struct gendisk *disk, fmode_t mode)
444{
445}
446
447static const struct block_device_operations null_fops = {
448 .owner = THIS_MODULE,
449 .open = null_open,
450 .release = null_release,
451};
452
453static int setup_commands(struct nullb_queue *nq)
454{
455 struct nullb_cmd *cmd;
456 int i, tag_size;
457
458 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
459 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100460 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100461
462 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
463 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
464 if (!nq->tag_map) {
465 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100466 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100467 }
468
469 for (i = 0; i < nq->queue_depth; i++) {
470 cmd = &nq->cmds[i];
471 INIT_LIST_HEAD(&cmd->list);
472 cmd->ll_list.next = NULL;
473 cmd->tag = -1U;
474 }
475
476 return 0;
477}
478
Jens Axboef2298c02013-10-25 11:52:25 +0100479static int setup_queues(struct nullb *nullb)
480{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100481 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
482 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100483 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100484 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100485
486 nullb->nr_queues = 0;
487 nullb->queue_depth = hw_queue_depth;
488
Matias Bjorling2d263a782013-12-18 13:41:43 +0100489 return 0;
490}
491
492static int init_driver_queues(struct nullb *nullb)
493{
494 struct nullb_queue *nq;
495 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100496
497 for (i = 0; i < submit_queues; i++) {
498 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100499
500 null_init_queue(nullb, nq);
501
502 ret = setup_commands(nq);
503 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200504 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100505 nullb->nr_queues++;
506 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100507 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100508}
509
510static int null_add_dev(void)
511{
512 struct gendisk *disk;
513 struct nullb *nullb;
514 sector_t size;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500515 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100516
517 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500518 if (!nullb) {
519 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600520 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500521 }
Jens Axboef2298c02013-10-25 11:52:25 +0100522
523 spin_lock_init(&nullb->lock);
524
Matias Bjorling57053d82013-12-10 16:50:38 +0100525 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
526 submit_queues = nr_online_nodes;
527
Robert Elliottdc501dc2014-09-02 11:38:49 -0500528 rv = setup_queues(nullb);
529 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600530 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100531
532 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200533 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600534 nullb->tag_set.nr_hw_queues = submit_queues;
535 nullb->tag_set.queue_depth = hw_queue_depth;
536 nullb->tag_set.numa_node = home_node;
537 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
538 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
539 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100540
Robert Elliottdc501dc2014-09-02 11:38:49 -0500541 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
542 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600543 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100544
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600545 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000546 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500547 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600548 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500549 }
Jens Axboef2298c02013-10-25 11:52:25 +0100550 } else if (queue_mode == NULL_Q_BIO) {
551 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500552 if (!nullb->q) {
553 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600554 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500555 }
Jens Axboef2298c02013-10-25 11:52:25 +0100556 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200557 rv = init_driver_queues(nullb);
558 if (rv)
559 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100560 } else {
561 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500562 if (!nullb->q) {
563 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600564 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500565 }
Jens Axboef2298c02013-10-25 11:52:25 +0100566 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600567 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200568 rv = init_driver_queues(nullb);
569 if (rv)
570 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100571 }
572
Jens Axboef2298c02013-10-25 11:52:25 +0100573 nullb->q->queuedata = nullb;
574 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600575 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100576
577 disk = nullb->disk = alloc_disk_node(1, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500578 if (!disk) {
579 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600580 goto out_cleanup_blk_queue;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500581 }
Jens Axboef2298c02013-10-25 11:52:25 +0100582
583 mutex_lock(&lock);
584 list_add_tail(&nullb->list, &nullb_list);
585 nullb->index = nullb_indexes++;
586 mutex_unlock(&lock);
587
588 blk_queue_logical_block_size(nullb->q, bs);
589 blk_queue_physical_block_size(nullb->q, bs);
590
591 size = gb * 1024 * 1024 * 1024ULL;
592 sector_div(size, bs);
593 set_capacity(disk, size);
594
Jens Axboe227290b2015-01-16 16:02:24 -0700595 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
Jens Axboef2298c02013-10-25 11:52:25 +0100596 disk->major = null_major;
597 disk->first_minor = nullb->index;
598 disk->fops = &null_fops;
599 disk->private_data = nullb;
600 disk->queue = nullb->q;
601 sprintf(disk->disk_name, "nullb%d", nullb->index);
602 add_disk(disk);
603 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600604
605out_cleanup_blk_queue:
606 blk_cleanup_queue(nullb->q);
607out_cleanup_tags:
608 if (queue_mode == NULL_Q_MQ)
609 blk_mq_free_tag_set(&nullb->tag_set);
610out_cleanup_queues:
611 cleanup_queues(nullb);
612out_free_nullb:
613 kfree(nullb);
614out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500615 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100616}
617
618static int __init null_init(void)
619{
620 unsigned int i;
621
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530622 if (bs > PAGE_SIZE) {
623 pr_warn("null_blk: invalid block size\n");
624 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
625 bs = PAGE_SIZE;
626 }
Jens Axboef2298c02013-10-25 11:52:25 +0100627
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100628 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100629 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100630 pr_warn("null_blk: submit_queues param is set to %u.",
631 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100632 submit_queues = nr_online_nodes;
633 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100634 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100635 submit_queues = nr_cpu_ids;
636 else if (!submit_queues)
637 submit_queues = 1;
638
639 mutex_init(&lock);
640
641 /* Initialize a separate list for each CPU for issuing softirqs */
642 for_each_possible_cpu(i) {
643 struct completion_queue *cq = &per_cpu(completion_queues, i);
644
645 init_llist_head(&cq->list);
646
647 if (irqmode != NULL_IRQ_TIMER)
648 continue;
649
650 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
651 cq->timer.function = null_cmd_timer_expired;
652 }
653
654 null_major = register_blkdev(0, "nullb");
655 if (null_major < 0)
656 return null_major;
657
658 for (i = 0; i < nr_devices; i++) {
659 if (null_add_dev()) {
660 unregister_blkdev(null_major, "nullb");
661 return -EINVAL;
662 }
663 }
664
665 pr_info("null: module loaded\n");
666 return 0;
667}
668
669static void __exit null_exit(void)
670{
671 struct nullb *nullb;
672
673 unregister_blkdev(null_major, "nullb");
674
675 mutex_lock(&lock);
676 while (!list_empty(&nullb_list)) {
677 nullb = list_entry(nullb_list.next, struct nullb, list);
678 null_del_dev(nullb);
679 }
680 mutex_unlock(&lock);
681}
682
683module_init(null_init);
684module_exit(null_exit);
685
686MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
687MODULE_LICENSE("GPL");