blob: 5c8ba5484d86b14edaaf20cfd215591950826a76 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
21};
22
23struct nullb_queue {
24 unsigned long *tag_map;
25 wait_queue_head_t wait;
26 unsigned int queue_depth;
27
28 struct nullb_cmd *cmds;
29};
30
31struct nullb {
32 struct list_head list;
33 unsigned int index;
34 struct request_queue *q;
35 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060036 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010037 struct hrtimer timer;
38 unsigned int queue_depth;
39 spinlock_t lock;
40
41 struct nullb_queue *queues;
42 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010043 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010044};
45
46static LIST_HEAD(nullb_list);
47static struct mutex lock;
48static int null_major;
49static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010050static struct kmem_cache *ppa_cache;
Jens Axboef2298c02013-10-25 11:52:25 +010051
52struct completion_queue {
53 struct llist_head list;
54 struct hrtimer timer;
55};
56
57/*
58 * These are per-cpu for now, they will need to be configured by the
59 * complete_queues parameter and appropriately mapped.
60 */
61static DEFINE_PER_CPU(struct completion_queue, completion_queues);
62
63enum {
64 NULL_IRQ_NONE = 0,
65 NULL_IRQ_SOFTIRQ = 1,
66 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080067};
Jens Axboef2298c02013-10-25 11:52:25 +010068
Christoph Hellwigce2c3502014-02-10 03:24:40 -080069enum {
Jens Axboef2298c02013-10-25 11:52:25 +010070 NULL_Q_BIO = 0,
71 NULL_Q_RQ = 1,
72 NULL_Q_MQ = 2,
73};
74
Matias Bjorling2d263a782013-12-18 13:41:43 +010075static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010076module_param(submit_queues, int, S_IRUGO);
77MODULE_PARM_DESC(submit_queues, "Number of submission queues");
78
79static int home_node = NUMA_NO_NODE;
80module_param(home_node, int, S_IRUGO);
81MODULE_PARM_DESC(home_node, "Home node for the device");
82
83static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070084
85static int null_param_store_val(const char *str, int *val, int min, int max)
86{
87 int ret, new_val;
88
89 ret = kstrtoint(str, 10, &new_val);
90 if (ret)
91 return -EINVAL;
92
93 if (new_val < min || new_val > max)
94 return -EINVAL;
95
96 *val = new_val;
97 return 0;
98}
99
100static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
101{
102 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
103}
104
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930105static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700106 .set = null_set_queue_mode,
107 .get = param_get_int,
108};
109
110device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400111MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100112
113static int gb = 250;
114module_param(gb, int, S_IRUGO);
115MODULE_PARM_DESC(gb, "Size in GB");
116
117static int bs = 512;
118module_param(bs, int, S_IRUGO);
119MODULE_PARM_DESC(bs, "Block size (in bytes)");
120
121static int nr_devices = 2;
122module_param(nr_devices, int, S_IRUGO);
123MODULE_PARM_DESC(nr_devices, "Number of devices to register");
124
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100125static bool use_lightnvm;
126module_param(use_lightnvm, bool, S_IRUGO);
127MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
128
Jens Axboef2298c02013-10-25 11:52:25 +0100129static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700130
131static int null_set_irqmode(const char *str, const struct kernel_param *kp)
132{
133 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
134 NULL_IRQ_TIMER);
135}
136
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930137static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700138 .set = null_set_irqmode,
139 .get = param_get_int,
140};
141
142device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100143MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
144
145static int completion_nsec = 10000;
146module_param(completion_nsec, int, S_IRUGO);
147MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
148
149static int hw_queue_depth = 64;
150module_param(hw_queue_depth, int, S_IRUGO);
151MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
152
Matias Bjørling20005242013-12-21 00:11:00 +0100153static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100154module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100155MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100156
157static void put_tag(struct nullb_queue *nq, unsigned int tag)
158{
159 clear_bit_unlock(tag, nq->tag_map);
160
161 if (waitqueue_active(&nq->wait))
162 wake_up(&nq->wait);
163}
164
165static unsigned int get_tag(struct nullb_queue *nq)
166{
167 unsigned int tag;
168
169 do {
170 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
171 if (tag >= nq->queue_depth)
172 return -1U;
173 } while (test_and_set_bit_lock(tag, nq->tag_map));
174
175 return tag;
176}
177
178static void free_cmd(struct nullb_cmd *cmd)
179{
180 put_tag(cmd->nq, cmd->tag);
181}
182
183static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
184{
185 struct nullb_cmd *cmd;
186 unsigned int tag;
187
188 tag = get_tag(nq);
189 if (tag != -1U) {
190 cmd = &nq->cmds[tag];
191 cmd->tag = tag;
192 cmd->nq = nq;
193 return cmd;
194 }
195
196 return NULL;
197}
198
199static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
200{
201 struct nullb_cmd *cmd;
202 DEFINE_WAIT(wait);
203
204 cmd = __alloc_cmd(nq);
205 if (cmd || !can_wait)
206 return cmd;
207
208 do {
209 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
210 cmd = __alloc_cmd(nq);
211 if (cmd)
212 break;
213
214 io_schedule();
215 } while (1);
216
217 finish_wait(&nq->wait, &wait);
218 return cmd;
219}
220
221static void end_cmd(struct nullb_cmd *cmd)
222{
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800223 switch (queue_mode) {
224 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700225 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800226 return;
227 case NULL_Q_RQ:
228 INIT_LIST_HEAD(&cmd->rq->queuelist);
229 blk_end_request_all(cmd->rq, 0);
230 break;
231 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200232 bio_endio(cmd->bio);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800233 break;
234 }
Jens Axboef2298c02013-10-25 11:52:25 +0100235
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800236 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100237}
238
239static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
240{
241 struct completion_queue *cq;
242 struct llist_node *entry;
243 struct nullb_cmd *cmd;
244
245 cq = &per_cpu(completion_queues, smp_processor_id());
246
247 while ((entry = llist_del_all(&cq->list)) != NULL) {
Shlomo Pongratzd7790b92014-02-06 18:33:17 +0200248 entry = llist_reverse_order(entry);
Jens Axboef2298c02013-10-25 11:52:25 +0100249 do {
Mike Krinkin21974062015-07-19 09:53:17 +0300250 struct request_queue *q = NULL;
251
Jens Axboef2298c02013-10-25 11:52:25 +0100252 cmd = container_of(entry, struct nullb_cmd, ll_list);
Jens Axboef2298c02013-10-25 11:52:25 +0100253 entry = entry->next;
Mike Krinkin21974062015-07-19 09:53:17 +0300254 if (cmd->rq)
255 q = cmd->rq->q;
Ming Leifc276912014-05-01 15:12:36 +0800256 end_cmd(cmd);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900257
Mike Krinkin21974062015-07-19 09:53:17 +0300258 if (q && !q->mq_ops && blk_queue_stopped(q)) {
259 spin_lock(q->queue_lock);
260 if (blk_queue_stopped(q))
261 blk_start_queue(q);
262 spin_unlock(q->queue_lock);
Akinobu Mita8b70f452015-06-02 08:35:10 +0900263 }
Jens Axboef2298c02013-10-25 11:52:25 +0100264 } while (entry);
265 }
266
267 return HRTIMER_NORESTART;
268}
269
270static void null_cmd_end_timer(struct nullb_cmd *cmd)
271{
272 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
273
274 cmd->ll_list.next = NULL;
275 if (llist_add(&cmd->ll_list, &cq->list)) {
276 ktime_t kt = ktime_set(0, completion_nsec);
277
Akinobu Mita419c21a2015-06-02 08:35:09 +0900278 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
Jens Axboef2298c02013-10-25 11:52:25 +0100279 }
280
281 put_cpu();
282}
283
284static void null_softirq_done_fn(struct request *rq)
285{
Jens Axboed891fa72014-06-16 11:40:25 -0600286 if (queue_mode == NULL_Q_MQ)
287 end_cmd(blk_mq_rq_to_pdu(rq));
288 else
289 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100290}
291
Jens Axboef2298c02013-10-25 11:52:25 +0100292static inline void null_handle_cmd(struct nullb_cmd *cmd)
293{
294 /* Complete IO by inline, softirq or timer */
295 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800296 case NULL_IRQ_SOFTIRQ:
297 switch (queue_mode) {
298 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200299 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800300 break;
301 case NULL_Q_RQ:
302 blk_complete_request(cmd->rq);
303 break;
304 case NULL_Q_BIO:
305 /*
306 * XXX: no proper submitting cpu information available.
307 */
308 end_cmd(cmd);
309 break;
310 }
311 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100312 case NULL_IRQ_NONE:
313 end_cmd(cmd);
314 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100315 case NULL_IRQ_TIMER:
316 null_cmd_end_timer(cmd);
317 break;
318 }
319}
320
321static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
322{
323 int index = 0;
324
325 if (nullb->nr_queues != 1)
326 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
327
328 return &nullb->queues[index];
329}
330
Jens Axboedece1632015-11-05 10:41:16 -0700331static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100332{
333 struct nullb *nullb = q->queuedata;
334 struct nullb_queue *nq = nullb_to_queue(nullb);
335 struct nullb_cmd *cmd;
336
337 cmd = alloc_cmd(nq, 1);
338 cmd->bio = bio;
339
340 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700341 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100342}
343
344static int null_rq_prep_fn(struct request_queue *q, struct request *req)
345{
346 struct nullb *nullb = q->queuedata;
347 struct nullb_queue *nq = nullb_to_queue(nullb);
348 struct nullb_cmd *cmd;
349
350 cmd = alloc_cmd(nq, 0);
351 if (cmd) {
352 cmd->rq = req;
353 req->special = cmd;
354 return BLKPREP_OK;
355 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900356 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100357
358 return BLKPREP_DEFER;
359}
360
361static void null_request_fn(struct request_queue *q)
362{
363 struct request *rq;
364
365 while ((rq = blk_fetch_request(q)) != NULL) {
366 struct nullb_cmd *cmd = rq->special;
367
368 spin_unlock_irq(q->queue_lock);
369 null_handle_cmd(cmd);
370 spin_lock_irq(q->queue_lock);
371 }
372}
373
Jens Axboe74c45052014-10-29 11:14:52 -0600374static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
375 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100376{
Jens Axboe74c45052014-10-29 11:14:52 -0600377 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100378
Jens Axboe74c45052014-10-29 11:14:52 -0600379 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100380 cmd->nq = hctx->driver_data;
381
Jens Axboe74c45052014-10-29 11:14:52 -0600382 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700383
Jens Axboef2298c02013-10-25 11:52:25 +0100384 null_handle_cmd(cmd);
385 return BLK_MQ_RQ_QUEUE_OK;
386}
387
Matias Bjorling2d263a782013-12-18 13:41:43 +0100388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
Jens Axboef2298c02013-10-25 11:52:25 +0100397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
398 unsigned int index)
399{
400 struct nullb *nullb = data;
401 struct nullb_queue *nq = &nullb->queues[index];
402
Jens Axboef2298c02013-10-25 11:52:25 +0100403 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100406
407 return 0;
408}
409
410static struct blk_mq_ops null_mq_ops = {
411 .queue_rq = null_queue_rq,
412 .map_queue = blk_mq_map_queue,
413 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800414 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100415};
416
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200417static void cleanup_queue(struct nullb_queue *nq)
418{
419 kfree(nq->tag_map);
420 kfree(nq->cmds);
421}
422
423static void cleanup_queues(struct nullb *nullb)
424{
425 int i;
426
427 for (i = 0; i < nullb->nr_queues; i++)
428 cleanup_queue(&nullb->queues[i]);
429
430 kfree(nullb->queues);
431}
432
Jens Axboef2298c02013-10-25 11:52:25 +0100433static void null_del_dev(struct nullb *nullb)
434{
435 list_del_init(&nullb->list);
436
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100437 if (use_lightnvm)
Matias Bjørling54514aa42015-11-19 12:50:10 +0100438 nvm_unregister(nullb->disk_name);
439 else
440 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800441 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600442 if (queue_mode == NULL_Q_MQ)
443 blk_mq_free_tag_set(&nullb->tag_set);
Matias Bjørling54514aa42015-11-19 12:50:10 +0100444 if (!use_lightnvm)
445 put_disk(nullb->disk);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200446 cleanup_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100447 kfree(nullb);
448}
449
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100450#ifdef CONFIG_NVM
451
452static void null_lnvm_end_io(struct request *rq, int error)
453{
454 struct nvm_rq *rqd = rq->end_io_data;
455 struct nvm_dev *dev = rqd->dev;
456
457 dev->mt->end_io(rqd, error);
458
459 blk_put_request(rq);
460}
461
462static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
463{
464 struct request *rq;
465 struct bio *bio = rqd->bio;
466
467 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
468 if (IS_ERR(rq))
469 return -ENOMEM;
470
471 rq->cmd_type = REQ_TYPE_DRV_PRIV;
472 rq->__sector = bio->bi_iter.bi_sector;
473 rq->ioprio = bio_prio(bio);
474
475 if (bio_has_data(bio))
476 rq->nr_phys_segments = bio_phys_segments(q, bio);
477
478 rq->__data_len = bio->bi_iter.bi_size;
479 rq->bio = rq->biotail = bio;
480
481 rq->end_io_data = rqd;
482
483 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
484
485 return 0;
486}
487
488static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
489{
490 sector_t size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100491 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100492 struct nvm_id_group *grp;
493
494 id->ver_id = 0x1;
495 id->vmnt = 0;
496 id->cgrps = 1;
497 id->cap = 0x3;
498 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100499
500 id->ppaf.blk_offset = 0;
501 id->ppaf.blk_len = 16;
502 id->ppaf.pg_offset = 16;
503 id->ppaf.pg_len = 16;
504 id->ppaf.sect_offset = 32;
505 id->ppaf.sect_len = 8;
506 id->ppaf.pln_offset = 40;
507 id->ppaf.pln_len = 8;
508 id->ppaf.lun_offset = 48;
509 id->ppaf.lun_len = 8;
510 id->ppaf.ch_offset = 56;
511 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100512
513 do_div(size, bs); /* convert size to pages */
Matias Bjørling5b40db92015-11-19 12:50:09 +0100514 do_div(size, 256); /* concert size to pgs pr blk */
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100515 grp = &id->groups[0];
516 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100517 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100518 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100519 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100520 blksize = size;
521 do_div(size, (1 << 16));
522 grp->num_lun = size + 1;
523 do_div(blksize, grp->num_lun);
524 grp->num_blk = blksize;
525 grp->num_pln = 1;
526
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100527 grp->fpg_sz = bs;
528 grp->csecs = bs;
529 grp->trdt = 25000;
530 grp->trdm = 25000;
531 grp->tprt = 500000;
532 grp->tprm = 500000;
533 grp->tbet = 1500000;
534 grp->tbem = 1500000;
535 grp->mpos = 0x010101; /* single plane rwe */
536 grp->cpar = hw_queue_depth;
537
538 return 0;
539}
540
541static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
542{
543 mempool_t *virtmem_pool;
544
Matias Bjørling6bb95352015-11-19 12:50:08 +0100545 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100546 if (!virtmem_pool) {
547 pr_err("null_blk: Unable to create virtual memory pool\n");
548 return NULL;
549 }
550
551 return virtmem_pool;
552}
553
554static void null_lnvm_destroy_dma_pool(void *pool)
555{
556 mempool_destroy(pool);
557}
558
559static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
560 gfp_t mem_flags, dma_addr_t *dma_handler)
561{
562 return mempool_alloc(pool, mem_flags);
563}
564
565static void null_lnvm_dev_dma_free(void *pool, void *entry,
566 dma_addr_t dma_handler)
567{
568 mempool_free(entry, pool);
569}
570
571static struct nvm_dev_ops null_lnvm_dev_ops = {
572 .identity = null_lnvm_id,
573 .submit_io = null_lnvm_submit_io,
574
575 .create_dma_pool = null_lnvm_create_dma_pool,
576 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
577 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
578 .dev_dma_free = null_lnvm_dev_dma_free,
579
580 /* Simulate nvme protocol restriction */
581 .max_phys_sect = 64,
582};
583#else
584static struct nvm_dev_ops null_lnvm_dev_ops;
585#endif /* CONFIG_NVM */
586
Jens Axboef2298c02013-10-25 11:52:25 +0100587static int null_open(struct block_device *bdev, fmode_t mode)
588{
589 return 0;
590}
591
592static void null_release(struct gendisk *disk, fmode_t mode)
593{
594}
595
596static const struct block_device_operations null_fops = {
597 .owner = THIS_MODULE,
598 .open = null_open,
599 .release = null_release,
600};
601
602static int setup_commands(struct nullb_queue *nq)
603{
604 struct nullb_cmd *cmd;
605 int i, tag_size;
606
607 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
608 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100609 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100610
611 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
612 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
613 if (!nq->tag_map) {
614 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100615 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100616 }
617
618 for (i = 0; i < nq->queue_depth; i++) {
619 cmd = &nq->cmds[i];
620 INIT_LIST_HEAD(&cmd->list);
621 cmd->ll_list.next = NULL;
622 cmd->tag = -1U;
623 }
624
625 return 0;
626}
627
Jens Axboef2298c02013-10-25 11:52:25 +0100628static int setup_queues(struct nullb *nullb)
629{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100630 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
631 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100632 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100633 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100634
635 nullb->nr_queues = 0;
636 nullb->queue_depth = hw_queue_depth;
637
Matias Bjorling2d263a782013-12-18 13:41:43 +0100638 return 0;
639}
640
641static int init_driver_queues(struct nullb *nullb)
642{
643 struct nullb_queue *nq;
644 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100645
646 for (i = 0; i < submit_queues; i++) {
647 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100648
649 null_init_queue(nullb, nq);
650
651 ret = setup_commands(nq);
652 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200653 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100654 nullb->nr_queues++;
655 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100656 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100657}
658
659static int null_add_dev(void)
660{
661 struct gendisk *disk;
662 struct nullb *nullb;
663 sector_t size;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500664 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100665
666 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500667 if (!nullb) {
668 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600669 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500670 }
Jens Axboef2298c02013-10-25 11:52:25 +0100671
672 spin_lock_init(&nullb->lock);
673
Matias Bjorling57053d82013-12-10 16:50:38 +0100674 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
675 submit_queues = nr_online_nodes;
676
Robert Elliottdc501dc2014-09-02 11:38:49 -0500677 rv = setup_queues(nullb);
678 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600679 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100680
681 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200682 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600683 nullb->tag_set.nr_hw_queues = submit_queues;
684 nullb->tag_set.queue_depth = hw_queue_depth;
685 nullb->tag_set.numa_node = home_node;
686 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
687 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
688 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100689
Robert Elliottdc501dc2014-09-02 11:38:49 -0500690 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
691 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600692 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100693
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600694 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000695 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500696 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600697 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500698 }
Jens Axboef2298c02013-10-25 11:52:25 +0100699 } else if (queue_mode == NULL_Q_BIO) {
700 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500701 if (!nullb->q) {
702 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600703 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500704 }
Jens Axboef2298c02013-10-25 11:52:25 +0100705 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200706 rv = init_driver_queues(nullb);
707 if (rv)
708 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100709 } else {
710 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500711 if (!nullb->q) {
712 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600713 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500714 }
Jens Axboef2298c02013-10-25 11:52:25 +0100715 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600716 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200717 rv = init_driver_queues(nullb);
718 if (rv)
719 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100720 }
721
Jens Axboef2298c02013-10-25 11:52:25 +0100722 nullb->q->queuedata = nullb;
723 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600724 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100725
Jens Axboef2298c02013-10-25 11:52:25 +0100726
727 mutex_lock(&lock);
728 list_add_tail(&nullb->list, &nullb_list);
729 nullb->index = nullb_indexes++;
730 mutex_unlock(&lock);
731
732 blk_queue_logical_block_size(nullb->q, bs);
733 blk_queue_physical_block_size(nullb->q, bs);
734
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100735 sprintf(nullb->disk_name, "nullb%d", nullb->index);
736
737 if (use_lightnvm) {
738 rv = nvm_register(nullb->q, nullb->disk_name,
739 &null_lnvm_dev_ops);
740 if (rv)
741 goto out_cleanup_blk_queue;
742 goto done;
743 }
744
745 disk = nullb->disk = alloc_disk_node(1, home_node);
746 if (!disk) {
747 rv = -ENOMEM;
748 goto out_cleanup_lightnvm;
749 }
Jens Axboef2298c02013-10-25 11:52:25 +0100750 size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5fdb7e12015-08-31 14:17:31 +0200751 set_capacity(disk, size >> 9);
Jens Axboef2298c02013-10-25 11:52:25 +0100752
Jens Axboe227290b2015-01-16 16:02:24 -0700753 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
Jens Axboef2298c02013-10-25 11:52:25 +0100754 disk->major = null_major;
755 disk->first_minor = nullb->index;
756 disk->fops = &null_fops;
757 disk->private_data = nullb;
758 disk->queue = nullb->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100759 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
760
Jens Axboef2298c02013-10-25 11:52:25 +0100761 add_disk(disk);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100762done:
Jens Axboef2298c02013-10-25 11:52:25 +0100763 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600764
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100765out_cleanup_lightnvm:
766 if (use_lightnvm)
767 nvm_unregister(nullb->disk_name);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600768out_cleanup_blk_queue:
769 blk_cleanup_queue(nullb->q);
770out_cleanup_tags:
771 if (queue_mode == NULL_Q_MQ)
772 blk_mq_free_tag_set(&nullb->tag_set);
773out_cleanup_queues:
774 cleanup_queues(nullb);
775out_free_nullb:
776 kfree(nullb);
777out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500778 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100779}
780
781static int __init null_init(void)
782{
783 unsigned int i;
784
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530785 if (bs > PAGE_SIZE) {
786 pr_warn("null_blk: invalid block size\n");
787 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
788 bs = PAGE_SIZE;
789 }
Jens Axboef2298c02013-10-25 11:52:25 +0100790
Matias Bjørling6bb95352015-11-19 12:50:08 +0100791 if (use_lightnvm && bs != 4096) {
792 pr_warn("null_blk: LightNVM only supports 4k block size\n");
793 pr_warn("null_blk: defaults block size to 4k\n");
794 bs = 4096;
795 }
796
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100797 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
798 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
799 pr_warn("null_blk: defaults queue mode to blk-mq\n");
800 queue_mode = NULL_Q_MQ;
801 }
802
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100803 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100804 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100805 pr_warn("null_blk: submit_queues param is set to %u.",
806 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100807 submit_queues = nr_online_nodes;
808 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100809 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100810 submit_queues = nr_cpu_ids;
811 else if (!submit_queues)
812 submit_queues = 1;
813
814 mutex_init(&lock);
815
816 /* Initialize a separate list for each CPU for issuing softirqs */
817 for_each_possible_cpu(i) {
818 struct completion_queue *cq = &per_cpu(completion_queues, i);
819
820 init_llist_head(&cq->list);
821
822 if (irqmode != NULL_IRQ_TIMER)
823 continue;
824
825 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
826 cq->timer.function = null_cmd_timer_expired;
827 }
828
829 null_major = register_blkdev(0, "nullb");
830 if (null_major < 0)
831 return null_major;
832
Matias Bjørling6bb95352015-11-19 12:50:08 +0100833 if (use_lightnvm) {
834 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
835 0, 0, NULL);
836 if (!ppa_cache) {
837 pr_err("null_blk: unable to create ppa cache\n");
838 return -ENOMEM;
839 }
840 }
841
Jens Axboef2298c02013-10-25 11:52:25 +0100842 for (i = 0; i < nr_devices; i++) {
843 if (null_add_dev()) {
844 unregister_blkdev(null_major, "nullb");
Matias Bjørling6bb95352015-11-19 12:50:08 +0100845 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +0100846 }
847 }
848
849 pr_info("null: module loaded\n");
850 return 0;
Matias Bjørling6bb95352015-11-19 12:50:08 +0100851err_ppa:
852 kmem_cache_destroy(ppa_cache);
853 return -EINVAL;
Jens Axboef2298c02013-10-25 11:52:25 +0100854}
855
856static void __exit null_exit(void)
857{
858 struct nullb *nullb;
859
860 unregister_blkdev(null_major, "nullb");
861
862 mutex_lock(&lock);
863 while (!list_empty(&nullb_list)) {
864 nullb = list_entry(nullb_list.next, struct nullb, list);
865 null_del_dev(nullb);
866 }
867 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100868
869 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +0100870}
871
872module_init(null_init);
873module_exit(null_exit);
874
875MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
876MODULE_LICENSE("GPL");