blob: 816525143a743ad8c2c098d7b401ddfe20840a48 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
21};
22
23struct nullb_queue {
24 unsigned long *tag_map;
25 wait_queue_head_t wait;
26 unsigned int queue_depth;
27
28 struct nullb_cmd *cmds;
29};
30
31struct nullb {
32 struct list_head list;
33 unsigned int index;
34 struct request_queue *q;
35 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060036 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010037 struct hrtimer timer;
38 unsigned int queue_depth;
39 spinlock_t lock;
40
41 struct nullb_queue *queues;
42 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010043 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010044};
45
46static LIST_HEAD(nullb_list);
47static struct mutex lock;
48static int null_major;
49static int nullb_indexes;
50
51struct completion_queue {
52 struct llist_head list;
53 struct hrtimer timer;
54};
55
56/*
57 * These are per-cpu for now, they will need to be configured by the
58 * complete_queues parameter and appropriately mapped.
59 */
60static DEFINE_PER_CPU(struct completion_queue, completion_queues);
61
62enum {
63 NULL_IRQ_NONE = 0,
64 NULL_IRQ_SOFTIRQ = 1,
65 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080066};
Jens Axboef2298c02013-10-25 11:52:25 +010067
Christoph Hellwigce2c3502014-02-10 03:24:40 -080068enum {
Jens Axboef2298c02013-10-25 11:52:25 +010069 NULL_Q_BIO = 0,
70 NULL_Q_RQ = 1,
71 NULL_Q_MQ = 2,
72};
73
Matias Bjorling2d263a782013-12-18 13:41:43 +010074static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010075module_param(submit_queues, int, S_IRUGO);
76MODULE_PARM_DESC(submit_queues, "Number of submission queues");
77
78static int home_node = NUMA_NO_NODE;
79module_param(home_node, int, S_IRUGO);
80MODULE_PARM_DESC(home_node, "Home node for the device");
81
82static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070083
84static int null_param_store_val(const char *str, int *val, int min, int max)
85{
86 int ret, new_val;
87
88 ret = kstrtoint(str, 10, &new_val);
89 if (ret)
90 return -EINVAL;
91
92 if (new_val < min || new_val > max)
93 return -EINVAL;
94
95 *val = new_val;
96 return 0;
97}
98
99static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
100{
101 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
102}
103
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930104static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700105 .set = null_set_queue_mode,
106 .get = param_get_int,
107};
108
109device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400110MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100111
112static int gb = 250;
113module_param(gb, int, S_IRUGO);
114MODULE_PARM_DESC(gb, "Size in GB");
115
116static int bs = 512;
117module_param(bs, int, S_IRUGO);
118MODULE_PARM_DESC(bs, "Block size (in bytes)");
119
120static int nr_devices = 2;
121module_param(nr_devices, int, S_IRUGO);
122MODULE_PARM_DESC(nr_devices, "Number of devices to register");
123
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100124static bool use_lightnvm;
125module_param(use_lightnvm, bool, S_IRUGO);
126MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
127
Jens Axboef2298c02013-10-25 11:52:25 +0100128static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700129
130static int null_set_irqmode(const char *str, const struct kernel_param *kp)
131{
132 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
133 NULL_IRQ_TIMER);
134}
135
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930136static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700137 .set = null_set_irqmode,
138 .get = param_get_int,
139};
140
141device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100142MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
143
144static int completion_nsec = 10000;
145module_param(completion_nsec, int, S_IRUGO);
146MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
147
148static int hw_queue_depth = 64;
149module_param(hw_queue_depth, int, S_IRUGO);
150MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
151
Matias Bjørling20005242013-12-21 00:11:00 +0100152static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100153module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100154MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100155
156static void put_tag(struct nullb_queue *nq, unsigned int tag)
157{
158 clear_bit_unlock(tag, nq->tag_map);
159
160 if (waitqueue_active(&nq->wait))
161 wake_up(&nq->wait);
162}
163
164static unsigned int get_tag(struct nullb_queue *nq)
165{
166 unsigned int tag;
167
168 do {
169 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
170 if (tag >= nq->queue_depth)
171 return -1U;
172 } while (test_and_set_bit_lock(tag, nq->tag_map));
173
174 return tag;
175}
176
177static void free_cmd(struct nullb_cmd *cmd)
178{
179 put_tag(cmd->nq, cmd->tag);
180}
181
182static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
183{
184 struct nullb_cmd *cmd;
185 unsigned int tag;
186
187 tag = get_tag(nq);
188 if (tag != -1U) {
189 cmd = &nq->cmds[tag];
190 cmd->tag = tag;
191 cmd->nq = nq;
192 return cmd;
193 }
194
195 return NULL;
196}
197
198static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
199{
200 struct nullb_cmd *cmd;
201 DEFINE_WAIT(wait);
202
203 cmd = __alloc_cmd(nq);
204 if (cmd || !can_wait)
205 return cmd;
206
207 do {
208 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
209 cmd = __alloc_cmd(nq);
210 if (cmd)
211 break;
212
213 io_schedule();
214 } while (1);
215
216 finish_wait(&nq->wait, &wait);
217 return cmd;
218}
219
220static void end_cmd(struct nullb_cmd *cmd)
221{
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800222 switch (queue_mode) {
223 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700224 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800225 return;
226 case NULL_Q_RQ:
227 INIT_LIST_HEAD(&cmd->rq->queuelist);
228 blk_end_request_all(cmd->rq, 0);
229 break;
230 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200231 bio_endio(cmd->bio);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800232 break;
233 }
Jens Axboef2298c02013-10-25 11:52:25 +0100234
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800235 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100236}
237
238static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
239{
240 struct completion_queue *cq;
241 struct llist_node *entry;
242 struct nullb_cmd *cmd;
243
244 cq = &per_cpu(completion_queues, smp_processor_id());
245
246 while ((entry = llist_del_all(&cq->list)) != NULL) {
Shlomo Pongratzd7790b92014-02-06 18:33:17 +0200247 entry = llist_reverse_order(entry);
Jens Axboef2298c02013-10-25 11:52:25 +0100248 do {
Mike Krinkin21974062015-07-19 09:53:17 +0300249 struct request_queue *q = NULL;
250
Jens Axboef2298c02013-10-25 11:52:25 +0100251 cmd = container_of(entry, struct nullb_cmd, ll_list);
Jens Axboef2298c02013-10-25 11:52:25 +0100252 entry = entry->next;
Mike Krinkin21974062015-07-19 09:53:17 +0300253 if (cmd->rq)
254 q = cmd->rq->q;
Ming Leifc276912014-05-01 15:12:36 +0800255 end_cmd(cmd);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900256
Mike Krinkin21974062015-07-19 09:53:17 +0300257 if (q && !q->mq_ops && blk_queue_stopped(q)) {
258 spin_lock(q->queue_lock);
259 if (blk_queue_stopped(q))
260 blk_start_queue(q);
261 spin_unlock(q->queue_lock);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900262 }
Jens Axboef2298c02013-10-25 11:52:25 +0100263 } while (entry);
264 }
265
266 return HRTIMER_NORESTART;
267}
268
269static void null_cmd_end_timer(struct nullb_cmd *cmd)
270{
271 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
272
273 cmd->ll_list.next = NULL;
274 if (llist_add(&cmd->ll_list, &cq->list)) {
275 ktime_t kt = ktime_set(0, completion_nsec);
276
Akinobu Mita419c21a2015-06-02 08:35:09 +0900277 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
Jens Axboef2298c02013-10-25 11:52:25 +0100278 }
279
280 put_cpu();
281}
282
283static void null_softirq_done_fn(struct request *rq)
284{
Jens Axboed891fa72014-06-16 11:40:25 -0600285 if (queue_mode == NULL_Q_MQ)
286 end_cmd(blk_mq_rq_to_pdu(rq));
287 else
288 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100289}
290
Jens Axboef2298c02013-10-25 11:52:25 +0100291static inline void null_handle_cmd(struct nullb_cmd *cmd)
292{
293 /* Complete IO by inline, softirq or timer */
294 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800295 case NULL_IRQ_SOFTIRQ:
296 switch (queue_mode) {
297 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200298 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800299 break;
300 case NULL_Q_RQ:
301 blk_complete_request(cmd->rq);
302 break;
303 case NULL_Q_BIO:
304 /*
305 * XXX: no proper submitting cpu information available.
306 */
307 end_cmd(cmd);
308 break;
309 }
310 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100311 case NULL_IRQ_NONE:
312 end_cmd(cmd);
313 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100314 case NULL_IRQ_TIMER:
315 null_cmd_end_timer(cmd);
316 break;
317 }
318}
319
320static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
321{
322 int index = 0;
323
324 if (nullb->nr_queues != 1)
325 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
326
327 return &nullb->queues[index];
328}
329
Jens Axboedece1632015-11-05 10:41:16 -0700330static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100331{
332 struct nullb *nullb = q->queuedata;
333 struct nullb_queue *nq = nullb_to_queue(nullb);
334 struct nullb_cmd *cmd;
335
336 cmd = alloc_cmd(nq, 1);
337 cmd->bio = bio;
338
339 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700340 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100341}
342
343static int null_rq_prep_fn(struct request_queue *q, struct request *req)
344{
345 struct nullb *nullb = q->queuedata;
346 struct nullb_queue *nq = nullb_to_queue(nullb);
347 struct nullb_cmd *cmd;
348
349 cmd = alloc_cmd(nq, 0);
350 if (cmd) {
351 cmd->rq = req;
352 req->special = cmd;
353 return BLKPREP_OK;
354 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900355 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100356
357 return BLKPREP_DEFER;
358}
359
360static void null_request_fn(struct request_queue *q)
361{
362 struct request *rq;
363
364 while ((rq = blk_fetch_request(q)) != NULL) {
365 struct nullb_cmd *cmd = rq->special;
366
367 spin_unlock_irq(q->queue_lock);
368 null_handle_cmd(cmd);
369 spin_lock_irq(q->queue_lock);
370 }
371}
372
Jens Axboe74c45052014-10-29 11:14:52 -0600373static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
374 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100375{
Jens Axboe74c45052014-10-29 11:14:52 -0600376 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100377
Jens Axboe74c45052014-10-29 11:14:52 -0600378 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100379 cmd->nq = hctx->driver_data;
380
Jens Axboe74c45052014-10-29 11:14:52 -0600381 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700382
Jens Axboef2298c02013-10-25 11:52:25 +0100383 null_handle_cmd(cmd);
384 return BLK_MQ_RQ_QUEUE_OK;
385}
386
Matias Bjorling2d263a782013-12-18 13:41:43 +0100387static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
388{
389 BUG_ON(!nullb);
390 BUG_ON(!nq);
391
392 init_waitqueue_head(&nq->wait);
393 nq->queue_depth = nullb->queue_depth;
394}
395
Jens Axboef2298c02013-10-25 11:52:25 +0100396static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
397 unsigned int index)
398{
399 struct nullb *nullb = data;
400 struct nullb_queue *nq = &nullb->queues[index];
401
Jens Axboef2298c02013-10-25 11:52:25 +0100402 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100403 null_init_queue(nullb, nq);
404 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100405
406 return 0;
407}
408
409static struct blk_mq_ops null_mq_ops = {
410 .queue_rq = null_queue_rq,
411 .map_queue = blk_mq_map_queue,
412 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800413 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100414};
415
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200416static void cleanup_queue(struct nullb_queue *nq)
417{
418 kfree(nq->tag_map);
419 kfree(nq->cmds);
420}
421
422static void cleanup_queues(struct nullb *nullb)
423{
424 int i;
425
426 for (i = 0; i < nullb->nr_queues; i++)
427 cleanup_queue(&nullb->queues[i]);
428
429 kfree(nullb->queues);
430}
431
Jens Axboef2298c02013-10-25 11:52:25 +0100432static void null_del_dev(struct nullb *nullb)
433{
434 list_del_init(&nullb->list);
435
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100436 if (use_lightnvm)
437 nvm_unregister(nullb->disk->disk_name);
Jens Axboef2298c02013-10-25 11:52:25 +0100438 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800439 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600440 if (queue_mode == NULL_Q_MQ)
441 blk_mq_free_tag_set(&nullb->tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +0100442 put_disk(nullb->disk);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200443 cleanup_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100444 kfree(nullb);
445}
446
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100447#ifdef CONFIG_NVM
448
449static void null_lnvm_end_io(struct request *rq, int error)
450{
451 struct nvm_rq *rqd = rq->end_io_data;
452 struct nvm_dev *dev = rqd->dev;
453
454 dev->mt->end_io(rqd, error);
455
456 blk_put_request(rq);
457}
458
459static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
460{
461 struct request *rq;
462 struct bio *bio = rqd->bio;
463
464 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
465 if (IS_ERR(rq))
466 return -ENOMEM;
467
468 rq->cmd_type = REQ_TYPE_DRV_PRIV;
469 rq->__sector = bio->bi_iter.bi_sector;
470 rq->ioprio = bio_prio(bio);
471
472 if (bio_has_data(bio))
473 rq->nr_phys_segments = bio_phys_segments(q, bio);
474
475 rq->__data_len = bio->bi_iter.bi_size;
476 rq->bio = rq->biotail = bio;
477
478 rq->end_io_data = rqd;
479
480 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
481
482 return 0;
483}
484
485static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
486{
487 sector_t size = gb * 1024 * 1024 * 1024ULL;
488 struct nvm_id_group *grp;
489
490 id->ver_id = 0x1;
491 id->vmnt = 0;
492 id->cgrps = 1;
493 id->cap = 0x3;
494 id->dom = 0x1;
495 id->ppat = NVM_ADDRMODE_LINEAR;
496
497 do_div(size, bs); /* convert size to pages */
498 grp = &id->groups[0];
499 grp->mtype = 0;
500 grp->fmtype = 1;
501 grp->num_ch = 1;
502 grp->num_lun = 1;
503 grp->num_pln = 1;
504 grp->num_blk = size / 256;
505 grp->num_pg = 256;
506 grp->fpg_sz = bs;
507 grp->csecs = bs;
508 grp->trdt = 25000;
509 grp->trdm = 25000;
510 grp->tprt = 500000;
511 grp->tprm = 500000;
512 grp->tbet = 1500000;
513 grp->tbem = 1500000;
514 grp->mpos = 0x010101; /* single plane rwe */
515 grp->cpar = hw_queue_depth;
516
517 return 0;
518}
519
520static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
521{
522 mempool_t *virtmem_pool;
523
524 virtmem_pool = mempool_create_page_pool(64, 0);
525 if (!virtmem_pool) {
526 pr_err("null_blk: Unable to create virtual memory pool\n");
527 return NULL;
528 }
529
530 return virtmem_pool;
531}
532
533static void null_lnvm_destroy_dma_pool(void *pool)
534{
535 mempool_destroy(pool);
536}
537
538static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
539 gfp_t mem_flags, dma_addr_t *dma_handler)
540{
541 return mempool_alloc(pool, mem_flags);
542}
543
544static void null_lnvm_dev_dma_free(void *pool, void *entry,
545 dma_addr_t dma_handler)
546{
547 mempool_free(entry, pool);
548}
549
550static struct nvm_dev_ops null_lnvm_dev_ops = {
551 .identity = null_lnvm_id,
552 .submit_io = null_lnvm_submit_io,
553
554 .create_dma_pool = null_lnvm_create_dma_pool,
555 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
556 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
557 .dev_dma_free = null_lnvm_dev_dma_free,
558
559 /* Simulate nvme protocol restriction */
560 .max_phys_sect = 64,
561};
562#else
563static struct nvm_dev_ops null_lnvm_dev_ops;
564#endif /* CONFIG_NVM */
565
Jens Axboef2298c02013-10-25 11:52:25 +0100566static int null_open(struct block_device *bdev, fmode_t mode)
567{
568 return 0;
569}
570
571static void null_release(struct gendisk *disk, fmode_t mode)
572{
573}
574
575static const struct block_device_operations null_fops = {
576 .owner = THIS_MODULE,
577 .open = null_open,
578 .release = null_release,
579};
580
581static int setup_commands(struct nullb_queue *nq)
582{
583 struct nullb_cmd *cmd;
584 int i, tag_size;
585
586 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
587 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100588 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100589
590 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
591 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
592 if (!nq->tag_map) {
593 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100594 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100595 }
596
597 for (i = 0; i < nq->queue_depth; i++) {
598 cmd = &nq->cmds[i];
599 INIT_LIST_HEAD(&cmd->list);
600 cmd->ll_list.next = NULL;
601 cmd->tag = -1U;
602 }
603
604 return 0;
605}
606
Jens Axboef2298c02013-10-25 11:52:25 +0100607static int setup_queues(struct nullb *nullb)
608{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100609 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
610 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100611 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100612 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100613
614 nullb->nr_queues = 0;
615 nullb->queue_depth = hw_queue_depth;
616
Matias Bjorling2d263a782013-12-18 13:41:43 +0100617 return 0;
618}
619
620static int init_driver_queues(struct nullb *nullb)
621{
622 struct nullb_queue *nq;
623 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100624
625 for (i = 0; i < submit_queues; i++) {
626 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100627
628 null_init_queue(nullb, nq);
629
630 ret = setup_commands(nq);
631 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200632 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100633 nullb->nr_queues++;
634 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100635 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100636}
637
638static int null_add_dev(void)
639{
640 struct gendisk *disk;
641 struct nullb *nullb;
642 sector_t size;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500643 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100644
645 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500646 if (!nullb) {
647 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600648 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500649 }
Jens Axboef2298c02013-10-25 11:52:25 +0100650
651 spin_lock_init(&nullb->lock);
652
Matias Bjorling57053d82013-12-10 16:50:38 +0100653 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
654 submit_queues = nr_online_nodes;
655
Robert Elliottdc501dc2014-09-02 11:38:49 -0500656 rv = setup_queues(nullb);
657 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600658 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100659
660 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200661 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600662 nullb->tag_set.nr_hw_queues = submit_queues;
663 nullb->tag_set.queue_depth = hw_queue_depth;
664 nullb->tag_set.numa_node = home_node;
665 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
666 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
667 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100668
Robert Elliottdc501dc2014-09-02 11:38:49 -0500669 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
670 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600671 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100672
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600673 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000674 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500675 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600676 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500677 }
Jens Axboef2298c02013-10-25 11:52:25 +0100678 } else if (queue_mode == NULL_Q_BIO) {
679 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500680 if (!nullb->q) {
681 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600682 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500683 }
Jens Axboef2298c02013-10-25 11:52:25 +0100684 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200685 rv = init_driver_queues(nullb);
686 if (rv)
687 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100688 } else {
689 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500690 if (!nullb->q) {
691 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600692 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500693 }
Jens Axboef2298c02013-10-25 11:52:25 +0100694 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600695 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200696 rv = init_driver_queues(nullb);
697 if (rv)
698 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100699 }
700
Jens Axboef2298c02013-10-25 11:52:25 +0100701 nullb->q->queuedata = nullb;
702 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600703 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100704
Jens Axboef2298c02013-10-25 11:52:25 +0100705
706 mutex_lock(&lock);
707 list_add_tail(&nullb->list, &nullb_list);
708 nullb->index = nullb_indexes++;
709 mutex_unlock(&lock);
710
711 blk_queue_logical_block_size(nullb->q, bs);
712 blk_queue_physical_block_size(nullb->q, bs);
713
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100714 sprintf(nullb->disk_name, "nullb%d", nullb->index);
715
716 if (use_lightnvm) {
717 rv = nvm_register(nullb->q, nullb->disk_name,
718 &null_lnvm_dev_ops);
719 if (rv)
720 goto out_cleanup_blk_queue;
721 goto done;
722 }
723
724 disk = nullb->disk = alloc_disk_node(1, home_node);
725 if (!disk) {
726 rv = -ENOMEM;
727 goto out_cleanup_lightnvm;
728 }
Jens Axboef2298c02013-10-25 11:52:25 +0100729 size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5fdb7e12015-08-31 14:17:31 +0200730 set_capacity(disk, size >> 9);
Jens Axboef2298c02013-10-25 11:52:25 +0100731
Jens Axboe227290b2015-01-16 16:02:24 -0700732 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
Jens Axboef2298c02013-10-25 11:52:25 +0100733 disk->major = null_major;
734 disk->first_minor = nullb->index;
735 disk->fops = &null_fops;
736 disk->private_data = nullb;
737 disk->queue = nullb->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100738 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
739
Jens Axboef2298c02013-10-25 11:52:25 +0100740 add_disk(disk);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100741done:
Jens Axboef2298c02013-10-25 11:52:25 +0100742 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600743
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100744out_cleanup_lightnvm:
745 if (use_lightnvm)
746 nvm_unregister(nullb->disk_name);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600747out_cleanup_blk_queue:
748 blk_cleanup_queue(nullb->q);
749out_cleanup_tags:
750 if (queue_mode == NULL_Q_MQ)
751 blk_mq_free_tag_set(&nullb->tag_set);
752out_cleanup_queues:
753 cleanup_queues(nullb);
754out_free_nullb:
755 kfree(nullb);
756out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500757 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100758}
759
760static int __init null_init(void)
761{
762 unsigned int i;
763
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530764 if (bs > PAGE_SIZE) {
765 pr_warn("null_blk: invalid block size\n");
766 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
767 bs = PAGE_SIZE;
768 }
Jens Axboef2298c02013-10-25 11:52:25 +0100769
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100770 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
771 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
772 pr_warn("null_blk: defaults queue mode to blk-mq\n");
773 queue_mode = NULL_Q_MQ;
774 }
775
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100776 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100777 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100778 pr_warn("null_blk: submit_queues param is set to %u.",
779 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100780 submit_queues = nr_online_nodes;
781 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100782 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100783 submit_queues = nr_cpu_ids;
784 else if (!submit_queues)
785 submit_queues = 1;
786
787 mutex_init(&lock);
788
789 /* Initialize a separate list for each CPU for issuing softirqs */
790 for_each_possible_cpu(i) {
791 struct completion_queue *cq = &per_cpu(completion_queues, i);
792
793 init_llist_head(&cq->list);
794
795 if (irqmode != NULL_IRQ_TIMER)
796 continue;
797
798 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
799 cq->timer.function = null_cmd_timer_expired;
800 }
801
802 null_major = register_blkdev(0, "nullb");
803 if (null_major < 0)
804 return null_major;
805
806 for (i = 0; i < nr_devices; i++) {
807 if (null_add_dev()) {
808 unregister_blkdev(null_major, "nullb");
809 return -EINVAL;
810 }
811 }
812
813 pr_info("null: module loaded\n");
814 return 0;
815}
816
817static void __exit null_exit(void)
818{
819 struct nullb *nullb;
820
821 unregister_blkdev(null_major, "nullb");
822
823 mutex_lock(&lock);
824 while (!list_empty(&nullb_list)) {
825 nullb = list_entry(nullb_list.next, struct nullb, list);
826 null_del_dev(nullb);
827 }
828 mutex_unlock(&lock);
829}
830
831module_init(null_init);
832module_exit(null_exit);
833
834MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
835MODULE_LICENSE("GPL");