blob: 8e7e3a0b0d24834ed46e6cd5c3f47c444b016733 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
11
12struct nullb_cmd {
13 struct list_head list;
14 struct llist_node ll_list;
15 struct call_single_data csd;
16 struct request *rq;
17 struct bio *bio;
18 unsigned int tag;
19 struct nullb_queue *nq;
20};
21
22struct nullb_queue {
23 unsigned long *tag_map;
24 wait_queue_head_t wait;
25 unsigned int queue_depth;
26
27 struct nullb_cmd *cmds;
28};
29
30struct nullb {
31 struct list_head list;
32 unsigned int index;
33 struct request_queue *q;
34 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060035 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010036 struct hrtimer timer;
37 unsigned int queue_depth;
38 spinlock_t lock;
39
40 struct nullb_queue *queues;
41 unsigned int nr_queues;
42};
43
44static LIST_HEAD(nullb_list);
45static struct mutex lock;
46static int null_major;
47static int nullb_indexes;
48
49struct completion_queue {
50 struct llist_head list;
51 struct hrtimer timer;
52};
53
54/*
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
57 */
58static DEFINE_PER_CPU(struct completion_queue, completion_queues);
59
60enum {
61 NULL_IRQ_NONE = 0,
62 NULL_IRQ_SOFTIRQ = 1,
63 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080064};
Jens Axboef2298c02013-10-25 11:52:25 +010065
Christoph Hellwigce2c3502014-02-10 03:24:40 -080066enum {
Jens Axboef2298c02013-10-25 11:52:25 +010067 NULL_Q_BIO = 0,
68 NULL_Q_RQ = 1,
69 NULL_Q_MQ = 2,
70};
71
Matias Bjorling2d263a782013-12-18 13:41:43 +010072static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010073module_param(submit_queues, int, S_IRUGO);
74MODULE_PARM_DESC(submit_queues, "Number of submission queues");
75
76static int home_node = NUMA_NO_NODE;
77module_param(home_node, int, S_IRUGO);
78MODULE_PARM_DESC(home_node, "Home node for the device");
79
80static int queue_mode = NULL_Q_MQ;
81module_param(queue_mode, int, S_IRUGO);
82MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
83
84static int gb = 250;
85module_param(gb, int, S_IRUGO);
86MODULE_PARM_DESC(gb, "Size in GB");
87
88static int bs = 512;
89module_param(bs, int, S_IRUGO);
90MODULE_PARM_DESC(bs, "Block size (in bytes)");
91
92static int nr_devices = 2;
93module_param(nr_devices, int, S_IRUGO);
94MODULE_PARM_DESC(nr_devices, "Number of devices to register");
95
96static int irqmode = NULL_IRQ_SOFTIRQ;
97module_param(irqmode, int, S_IRUGO);
98MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
99
100static int completion_nsec = 10000;
101module_param(completion_nsec, int, S_IRUGO);
102MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
103
104static int hw_queue_depth = 64;
105module_param(hw_queue_depth, int, S_IRUGO);
106MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
107
Matias Bjørling20005242013-12-21 00:11:00 +0100108static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100109module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100110MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100111
112static void put_tag(struct nullb_queue *nq, unsigned int tag)
113{
114 clear_bit_unlock(tag, nq->tag_map);
115
116 if (waitqueue_active(&nq->wait))
117 wake_up(&nq->wait);
118}
119
120static unsigned int get_tag(struct nullb_queue *nq)
121{
122 unsigned int tag;
123
124 do {
125 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
126 if (tag >= nq->queue_depth)
127 return -1U;
128 } while (test_and_set_bit_lock(tag, nq->tag_map));
129
130 return tag;
131}
132
133static void free_cmd(struct nullb_cmd *cmd)
134{
135 put_tag(cmd->nq, cmd->tag);
136}
137
138static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
139{
140 struct nullb_cmd *cmd;
141 unsigned int tag;
142
143 tag = get_tag(nq);
144 if (tag != -1U) {
145 cmd = &nq->cmds[tag];
146 cmd->tag = tag;
147 cmd->nq = nq;
148 return cmd;
149 }
150
151 return NULL;
152}
153
154static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
155{
156 struct nullb_cmd *cmd;
157 DEFINE_WAIT(wait);
158
159 cmd = __alloc_cmd(nq);
160 if (cmd || !can_wait)
161 return cmd;
162
163 do {
164 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
165 cmd = __alloc_cmd(nq);
166 if (cmd)
167 break;
168
169 io_schedule();
170 } while (1);
171
172 finish_wait(&nq->wait, &wait);
173 return cmd;
174}
175
176static void end_cmd(struct nullb_cmd *cmd)
177{
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800178 switch (queue_mode) {
179 case NULL_Q_MQ:
180 blk_mq_end_io(cmd->rq, 0);
181 return;
182 case NULL_Q_RQ:
183 INIT_LIST_HEAD(&cmd->rq->queuelist);
184 blk_end_request_all(cmd->rq, 0);
185 break;
186 case NULL_Q_BIO:
Jens Axboef2298c02013-10-25 11:52:25 +0100187 bio_endio(cmd->bio, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800188 break;
189 }
Jens Axboef2298c02013-10-25 11:52:25 +0100190
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800191 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100192}
193
194static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
195{
196 struct completion_queue *cq;
197 struct llist_node *entry;
198 struct nullb_cmd *cmd;
199
200 cq = &per_cpu(completion_queues, smp_processor_id());
201
202 while ((entry = llist_del_all(&cq->list)) != NULL) {
Shlomo Pongratzd7790b92014-02-06 18:33:17 +0200203 entry = llist_reverse_order(entry);
Jens Axboef2298c02013-10-25 11:52:25 +0100204 do {
205 cmd = container_of(entry, struct nullb_cmd, ll_list);
206 end_cmd(cmd);
207 entry = entry->next;
208 } while (entry);
209 }
210
211 return HRTIMER_NORESTART;
212}
213
214static void null_cmd_end_timer(struct nullb_cmd *cmd)
215{
216 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
217
218 cmd->ll_list.next = NULL;
219 if (llist_add(&cmd->ll_list, &cq->list)) {
220 ktime_t kt = ktime_set(0, completion_nsec);
221
222 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
223 }
224
225 put_cpu();
226}
227
228static void null_softirq_done_fn(struct request *rq)
229{
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200230 end_cmd(blk_mq_rq_to_pdu(rq));
Jens Axboef2298c02013-10-25 11:52:25 +0100231}
232
Jens Axboef2298c02013-10-25 11:52:25 +0100233static inline void null_handle_cmd(struct nullb_cmd *cmd)
234{
235 /* Complete IO by inline, softirq or timer */
236 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800237 case NULL_IRQ_SOFTIRQ:
238 switch (queue_mode) {
239 case NULL_Q_MQ:
240 blk_mq_complete_request(cmd->rq);
241 break;
242 case NULL_Q_RQ:
243 blk_complete_request(cmd->rq);
244 break;
245 case NULL_Q_BIO:
246 /*
247 * XXX: no proper submitting cpu information available.
248 */
249 end_cmd(cmd);
250 break;
251 }
252 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100253 case NULL_IRQ_NONE:
254 end_cmd(cmd);
255 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100256 case NULL_IRQ_TIMER:
257 null_cmd_end_timer(cmd);
258 break;
259 }
260}
261
262static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
263{
264 int index = 0;
265
266 if (nullb->nr_queues != 1)
267 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
268
269 return &nullb->queues[index];
270}
271
272static void null_queue_bio(struct request_queue *q, struct bio *bio)
273{
274 struct nullb *nullb = q->queuedata;
275 struct nullb_queue *nq = nullb_to_queue(nullb);
276 struct nullb_cmd *cmd;
277
278 cmd = alloc_cmd(nq, 1);
279 cmd->bio = bio;
280
281 null_handle_cmd(cmd);
282}
283
284static int null_rq_prep_fn(struct request_queue *q, struct request *req)
285{
286 struct nullb *nullb = q->queuedata;
287 struct nullb_queue *nq = nullb_to_queue(nullb);
288 struct nullb_cmd *cmd;
289
290 cmd = alloc_cmd(nq, 0);
291 if (cmd) {
292 cmd->rq = req;
293 req->special = cmd;
294 return BLKPREP_OK;
295 }
296
297 return BLKPREP_DEFER;
298}
299
300static void null_request_fn(struct request_queue *q)
301{
302 struct request *rq;
303
304 while ((rq = blk_fetch_request(q)) != NULL) {
305 struct nullb_cmd *cmd = rq->special;
306
307 spin_unlock_irq(q->queue_lock);
308 null_handle_cmd(cmd);
309 spin_lock_irq(q->queue_lock);
310 }
311}
312
313static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
314{
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200315 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100316
317 cmd->rq = rq;
318 cmd->nq = hctx->driver_data;
319
320 null_handle_cmd(cmd);
321 return BLK_MQ_RQ_QUEUE_OK;
322}
323
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600324static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
325 unsigned int hctx_index)
Jens Axboef2298c02013-10-25 11:52:25 +0100326{
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600327 int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes);
328 int tip = (set->nr_hw_queues % nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100329 int node = 0, i, n;
330
331 /*
332 * Split submit queues evenly wrt to the number of nodes. If uneven,
333 * fill the first buckets with one extra, until the rest is filled with
334 * no extra.
335 */
336 for (i = 0, n = 1; i < hctx_index; i++, n++) {
337 if (n % b_size == 0) {
338 n = 0;
339 node++;
340
341 tip--;
342 if (!tip)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600343 b_size = set->nr_hw_queues / nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100344 }
345 }
346
347 /*
348 * A node might not be online, therefore map the relative node id to the
349 * real node id.
350 */
351 for_each_online_node(n) {
352 if (!node)
353 break;
354 node--;
355 }
356
357 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
Jens Axboef2298c02013-10-25 11:52:25 +0100358}
359
360static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
361{
362 kfree(hctx);
363}
364
Matias Bjorling2d263a782013-12-18 13:41:43 +0100365static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
366{
367 BUG_ON(!nullb);
368 BUG_ON(!nq);
369
370 init_waitqueue_head(&nq->wait);
371 nq->queue_depth = nullb->queue_depth;
372}
373
Jens Axboef2298c02013-10-25 11:52:25 +0100374static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
375 unsigned int index)
376{
377 struct nullb *nullb = data;
378 struct nullb_queue *nq = &nullb->queues[index];
379
Jens Axboef2298c02013-10-25 11:52:25 +0100380 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100381 null_init_queue(nullb, nq);
382 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100383
384 return 0;
385}
386
387static struct blk_mq_ops null_mq_ops = {
388 .queue_rq = null_queue_rq,
389 .map_queue = blk_mq_map_queue,
390 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800391 .complete = null_softirq_done_fn,
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600392 .alloc_hctx = blk_mq_alloc_single_hw_queue,
393 .free_hctx = blk_mq_free_single_hw_queue,
Jens Axboef2298c02013-10-25 11:52:25 +0100394};
395
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600396static struct blk_mq_ops null_mq_ops_pernode = {
397 .queue_rq = null_queue_rq,
398 .map_queue = blk_mq_map_queue,
399 .init_hctx = null_init_hctx,
400 .complete = null_softirq_done_fn,
401 .alloc_hctx = null_alloc_hctx,
402 .free_hctx = null_free_hctx,
Jens Axboef2298c02013-10-25 11:52:25 +0100403};
404
405static void null_del_dev(struct nullb *nullb)
406{
407 list_del_init(&nullb->list);
408
409 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800410 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600411 if (queue_mode == NULL_Q_MQ)
412 blk_mq_free_tag_set(&nullb->tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +0100413 put_disk(nullb->disk);
414 kfree(nullb);
415}
416
417static int null_open(struct block_device *bdev, fmode_t mode)
418{
419 return 0;
420}
421
422static void null_release(struct gendisk *disk, fmode_t mode)
423{
424}
425
426static const struct block_device_operations null_fops = {
427 .owner = THIS_MODULE,
428 .open = null_open,
429 .release = null_release,
430};
431
432static int setup_commands(struct nullb_queue *nq)
433{
434 struct nullb_cmd *cmd;
435 int i, tag_size;
436
437 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
438 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100439 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100440
441 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
442 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
443 if (!nq->tag_map) {
444 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100445 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100446 }
447
448 for (i = 0; i < nq->queue_depth; i++) {
449 cmd = &nq->cmds[i];
450 INIT_LIST_HEAD(&cmd->list);
451 cmd->ll_list.next = NULL;
452 cmd->tag = -1U;
453 }
454
455 return 0;
456}
457
458static void cleanup_queue(struct nullb_queue *nq)
459{
460 kfree(nq->tag_map);
461 kfree(nq->cmds);
462}
463
464static void cleanup_queues(struct nullb *nullb)
465{
466 int i;
467
468 for (i = 0; i < nullb->nr_queues; i++)
469 cleanup_queue(&nullb->queues[i]);
470
471 kfree(nullb->queues);
472}
473
474static int setup_queues(struct nullb *nullb)
475{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100476 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
477 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100478 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100479 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100480
481 nullb->nr_queues = 0;
482 nullb->queue_depth = hw_queue_depth;
483
Matias Bjorling2d263a782013-12-18 13:41:43 +0100484 return 0;
485}
486
487static int init_driver_queues(struct nullb *nullb)
488{
489 struct nullb_queue *nq;
490 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100491
492 for (i = 0; i < submit_queues; i++) {
493 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100494
495 null_init_queue(nullb, nq);
496
497 ret = setup_commands(nq);
498 if (ret)
499 goto err_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100500 nullb->nr_queues++;
501 }
502
Matias Bjorling2d263a782013-12-18 13:41:43 +0100503 return 0;
504err_queue:
Jens Axboef2298c02013-10-25 11:52:25 +0100505 cleanup_queues(nullb);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100506 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100507}
508
509static int null_add_dev(void)
510{
511 struct gendisk *disk;
512 struct nullb *nullb;
513 sector_t size;
514
515 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
516 if (!nullb)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600517 goto out;
Jens Axboef2298c02013-10-25 11:52:25 +0100518
519 spin_lock_init(&nullb->lock);
520
Matias Bjorling57053d82013-12-10 16:50:38 +0100521 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
522 submit_queues = nr_online_nodes;
523
Jens Axboef2298c02013-10-25 11:52:25 +0100524 if (setup_queues(nullb))
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600525 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100526
527 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600528 if (use_per_node_hctx)
529 nullb->tag_set.ops = &null_mq_ops_pernode;
530 else
531 nullb->tag_set.ops = &null_mq_ops;
532 nullb->tag_set.nr_hw_queues = submit_queues;
533 nullb->tag_set.queue_depth = hw_queue_depth;
534 nullb->tag_set.numa_node = home_node;
535 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
536 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
537 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100538
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600539 if (blk_mq_alloc_tag_set(&nullb->tag_set))
540 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100541
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600542 nullb->q = blk_mq_init_queue(&nullb->tag_set);
543 if (!nullb->q)
544 goto out_cleanup_tags;
Jens Axboef2298c02013-10-25 11:52:25 +0100545 } else if (queue_mode == NULL_Q_BIO) {
546 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600547 if (!nullb->q)
548 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100549 blk_queue_make_request(nullb->q, null_queue_bio);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100550 init_driver_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100551 } else {
552 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600553 if (!nullb->q)
554 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100555 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600556 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100557 init_driver_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100558 }
559
Jens Axboef2298c02013-10-25 11:52:25 +0100560 nullb->q->queuedata = nullb;
561 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
562
563 disk = nullb->disk = alloc_disk_node(1, home_node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600564 if (!disk)
565 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100566
567 mutex_lock(&lock);
568 list_add_tail(&nullb->list, &nullb_list);
569 nullb->index = nullb_indexes++;
570 mutex_unlock(&lock);
571
572 blk_queue_logical_block_size(nullb->q, bs);
573 blk_queue_physical_block_size(nullb->q, bs);
574
575 size = gb * 1024 * 1024 * 1024ULL;
576 sector_div(size, bs);
577 set_capacity(disk, size);
578
579 disk->flags |= GENHD_FL_EXT_DEVT;
580 disk->major = null_major;
581 disk->first_minor = nullb->index;
582 disk->fops = &null_fops;
583 disk->private_data = nullb;
584 disk->queue = nullb->q;
585 sprintf(disk->disk_name, "nullb%d", nullb->index);
586 add_disk(disk);
587 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600588
589out_cleanup_blk_queue:
590 blk_cleanup_queue(nullb->q);
591out_cleanup_tags:
592 if (queue_mode == NULL_Q_MQ)
593 blk_mq_free_tag_set(&nullb->tag_set);
594out_cleanup_queues:
595 cleanup_queues(nullb);
596out_free_nullb:
597 kfree(nullb);
598out:
599 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100600}
601
602static int __init null_init(void)
603{
604 unsigned int i;
605
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530606 if (bs > PAGE_SIZE) {
607 pr_warn("null_blk: invalid block size\n");
608 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
609 bs = PAGE_SIZE;
610 }
Jens Axboef2298c02013-10-25 11:52:25 +0100611
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100612 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100613 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100614 pr_warn("null_blk: submit_queues param is set to %u.",
615 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100616 submit_queues = nr_online_nodes;
617 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100618 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100619 submit_queues = nr_cpu_ids;
620 else if (!submit_queues)
621 submit_queues = 1;
622
623 mutex_init(&lock);
624
625 /* Initialize a separate list for each CPU for issuing softirqs */
626 for_each_possible_cpu(i) {
627 struct completion_queue *cq = &per_cpu(completion_queues, i);
628
629 init_llist_head(&cq->list);
630
631 if (irqmode != NULL_IRQ_TIMER)
632 continue;
633
634 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
635 cq->timer.function = null_cmd_timer_expired;
636 }
637
638 null_major = register_blkdev(0, "nullb");
639 if (null_major < 0)
640 return null_major;
641
642 for (i = 0; i < nr_devices; i++) {
643 if (null_add_dev()) {
644 unregister_blkdev(null_major, "nullb");
645 return -EINVAL;
646 }
647 }
648
649 pr_info("null: module loaded\n");
650 return 0;
651}
652
653static void __exit null_exit(void)
654{
655 struct nullb *nullb;
656
657 unregister_blkdev(null_major, "nullb");
658
659 mutex_lock(&lock);
660 while (!list_empty(&nullb_list)) {
661 nullb = list_entry(nullb_list.next, struct nullb, list);
662 null_del_dev(nullb);
663 }
664 mutex_unlock(&lock);
665}
666
667module_init(null_init);
668module_exit(null_exit);
669
670MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
671MODULE_LICENSE("GPL");