blob: cf656198836c52ffd31e16d6b45f8157f4c70ee2 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010021 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010022};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060037 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010038 struct hrtimer timer;
39 unsigned int queue_depth;
40 spinlock_t lock;
41
42 struct nullb_queue *queues;
43 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010044 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010045};
46
47static LIST_HEAD(nullb_list);
48static struct mutex lock;
49static int null_major;
50static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010051static struct kmem_cache *ppa_cache;
Jens Axboef2298c02013-10-25 11:52:25 +010052
Jens Axboef2298c02013-10-25 11:52:25 +010053enum {
54 NULL_IRQ_NONE = 0,
55 NULL_IRQ_SOFTIRQ = 1,
56 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080057};
Jens Axboef2298c02013-10-25 11:52:25 +010058
Christoph Hellwigce2c3502014-02-10 03:24:40 -080059enum {
Jens Axboef2298c02013-10-25 11:52:25 +010060 NULL_Q_BIO = 0,
61 NULL_Q_RQ = 1,
62 NULL_Q_MQ = 2,
63};
64
Matias Bjorling2d263a782013-12-18 13:41:43 +010065static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010066module_param(submit_queues, int, S_IRUGO);
67MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69static int home_node = NUMA_NO_NODE;
70module_param(home_node, int, S_IRUGO);
71MODULE_PARM_DESC(home_node, "Home node for the device");
72
73static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070074
75static int null_param_store_val(const char *str, int *val, int min, int max)
76{
77 int ret, new_val;
78
79 ret = kstrtoint(str, 10, &new_val);
80 if (ret)
81 return -EINVAL;
82
83 if (new_val < min || new_val > max)
84 return -EINVAL;
85
86 *val = new_val;
87 return 0;
88}
89
90static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91{
92 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93}
94
Luis R. Rodriguez9c278472015-05-27 11:09:38 +093095static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -070096 .set = null_set_queue_mode,
97 .get = param_get_int,
98};
99
100device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400101MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100102
103static int gb = 250;
104module_param(gb, int, S_IRUGO);
105MODULE_PARM_DESC(gb, "Size in GB");
106
107static int bs = 512;
108module_param(bs, int, S_IRUGO);
109MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111static int nr_devices = 2;
112module_param(nr_devices, int, S_IRUGO);
113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
Jens Axboef2298c02013-10-25 11:52:25 +0100119static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700120
121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122{
123 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124 NULL_IRQ_TIMER);
125}
126
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930127static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700128 .set = null_set_irqmode,
129 .get = param_get_int,
130};
131
132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
135static int completion_nsec = 10000;
136module_param(completion_nsec, int, S_IRUGO);
137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139static int hw_queue_depth = 64;
140module_param(hw_queue_depth, int, S_IRUGO);
141MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
Matias Bjørling20005242013-12-21 00:11:00 +0100143static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100144module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100145MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100146
147static void put_tag(struct nullb_queue *nq, unsigned int tag)
148{
149 clear_bit_unlock(tag, nq->tag_map);
150
151 if (waitqueue_active(&nq->wait))
152 wake_up(&nq->wait);
153}
154
155static unsigned int get_tag(struct nullb_queue *nq)
156{
157 unsigned int tag;
158
159 do {
160 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161 if (tag >= nq->queue_depth)
162 return -1U;
163 } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165 return tag;
166}
167
168static void free_cmd(struct nullb_cmd *cmd)
169{
170 put_tag(cmd->nq, cmd->tag);
171}
172
Paolo Valente3c395a92015-12-01 11:48:17 +0100173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
Jens Axboef2298c02013-10-25 11:52:25 +0100175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176{
177 struct nullb_cmd *cmd;
178 unsigned int tag;
179
180 tag = get_tag(nq);
181 if (tag != -1U) {
182 cmd = &nq->cmds[tag];
183 cmd->tag = tag;
184 cmd->nq = nq;
Paolo Valente3c395a92015-12-01 11:48:17 +0100185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
Jens Axboef2298c02013-10-25 11:52:25 +0100190 return cmd;
191 }
192
193 return NULL;
194}
195
196static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197{
198 struct nullb_cmd *cmd;
199 DEFINE_WAIT(wait);
200
201 cmd = __alloc_cmd(nq);
202 if (cmd || !can_wait)
203 return cmd;
204
205 do {
206 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207 cmd = __alloc_cmd(nq);
208 if (cmd)
209 break;
210
211 io_schedule();
212 } while (1);
213
214 finish_wait(&nq->wait, &wait);
215 return cmd;
216}
217
218static void end_cmd(struct nullb_cmd *cmd)
219{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100220 struct request_queue *q = NULL;
221
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800222 switch (queue_mode) {
223 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700224 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800225 return;
226 case NULL_Q_RQ:
227 INIT_LIST_HEAD(&cmd->rq->queuelist);
228 blk_end_request_all(cmd->rq, 0);
229 break;
230 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200231 bio_endio(cmd->bio);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100232 goto free_cmd;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800233 }
Jens Axboef2298c02013-10-25 11:52:25 +0100234
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100235 if (cmd->rq)
236 q = cmd->rq->q;
237
238 /* Restart queue if needed, as we are freeing a tag */
239 if (q && !q->mq_ops && blk_queue_stopped(q)) {
240 unsigned long flags;
241
242 spin_lock_irqsave(q->queue_lock, flags);
243 if (blk_queue_stopped(q))
244 blk_start_queue(q);
245 spin_unlock_irqrestore(q->queue_lock, flags);
246 }
247free_cmd:
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800248 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100249}
250
251static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
252{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100253 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100254
255 return HRTIMER_NORESTART;
256}
257
258static void null_cmd_end_timer(struct nullb_cmd *cmd)
259{
Paolo Valente3c395a92015-12-01 11:48:17 +0100260 ktime_t kt = ktime_set(0, completion_nsec);
Jens Axboef2298c02013-10-25 11:52:25 +0100261
Paolo Valente3c395a92015-12-01 11:48:17 +0100262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100263}
264
265static void null_softirq_done_fn(struct request *rq)
266{
Jens Axboed891fa72014-06-16 11:40:25 -0600267 if (queue_mode == NULL_Q_MQ)
268 end_cmd(blk_mq_rq_to_pdu(rq));
269 else
270 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100271}
272
Jens Axboef2298c02013-10-25 11:52:25 +0100273static inline void null_handle_cmd(struct nullb_cmd *cmd)
274{
275 /* Complete IO by inline, softirq or timer */
276 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800277 case NULL_IRQ_SOFTIRQ:
278 switch (queue_mode) {
279 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200280 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800281 break;
282 case NULL_Q_RQ:
283 blk_complete_request(cmd->rq);
284 break;
285 case NULL_Q_BIO:
286 /*
287 * XXX: no proper submitting cpu information available.
288 */
289 end_cmd(cmd);
290 break;
291 }
292 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100293 case NULL_IRQ_NONE:
294 end_cmd(cmd);
295 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100296 case NULL_IRQ_TIMER:
297 null_cmd_end_timer(cmd);
298 break;
299 }
300}
301
302static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
303{
304 int index = 0;
305
306 if (nullb->nr_queues != 1)
307 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
308
309 return &nullb->queues[index];
310}
311
Jens Axboedece1632015-11-05 10:41:16 -0700312static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100313{
314 struct nullb *nullb = q->queuedata;
315 struct nullb_queue *nq = nullb_to_queue(nullb);
316 struct nullb_cmd *cmd;
317
318 cmd = alloc_cmd(nq, 1);
319 cmd->bio = bio;
320
321 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700322 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100323}
324
325static int null_rq_prep_fn(struct request_queue *q, struct request *req)
326{
327 struct nullb *nullb = q->queuedata;
328 struct nullb_queue *nq = nullb_to_queue(nullb);
329 struct nullb_cmd *cmd;
330
331 cmd = alloc_cmd(nq, 0);
332 if (cmd) {
333 cmd->rq = req;
334 req->special = cmd;
335 return BLKPREP_OK;
336 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900337 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100338
339 return BLKPREP_DEFER;
340}
341
342static void null_request_fn(struct request_queue *q)
343{
344 struct request *rq;
345
346 while ((rq = blk_fetch_request(q)) != NULL) {
347 struct nullb_cmd *cmd = rq->special;
348
349 spin_unlock_irq(q->queue_lock);
350 null_handle_cmd(cmd);
351 spin_lock_irq(q->queue_lock);
352 }
353}
354
Jens Axboe74c45052014-10-29 11:14:52 -0600355static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
356 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100357{
Jens Axboe74c45052014-10-29 11:14:52 -0600358 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100359
Paolo Valente3c395a92015-12-01 11:48:17 +0100360 if (irqmode == NULL_IRQ_TIMER) {
361 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362 cmd->timer.function = null_cmd_timer_expired;
363 }
Jens Axboe74c45052014-10-29 11:14:52 -0600364 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100365 cmd->nq = hctx->driver_data;
366
Jens Axboe74c45052014-10-29 11:14:52 -0600367 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700368
Jens Axboef2298c02013-10-25 11:52:25 +0100369 null_handle_cmd(cmd);
370 return BLK_MQ_RQ_QUEUE_OK;
371}
372
Matias Bjorling2d263a782013-12-18 13:41:43 +0100373static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
374{
375 BUG_ON(!nullb);
376 BUG_ON(!nq);
377
378 init_waitqueue_head(&nq->wait);
379 nq->queue_depth = nullb->queue_depth;
380}
381
Jens Axboef2298c02013-10-25 11:52:25 +0100382static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
383 unsigned int index)
384{
385 struct nullb *nullb = data;
386 struct nullb_queue *nq = &nullb->queues[index];
387
Jens Axboef2298c02013-10-25 11:52:25 +0100388 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100389 null_init_queue(nullb, nq);
390 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100391
392 return 0;
393}
394
395static struct blk_mq_ops null_mq_ops = {
396 .queue_rq = null_queue_rq,
397 .map_queue = blk_mq_map_queue,
398 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800399 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100400};
401
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200402static void cleanup_queue(struct nullb_queue *nq)
403{
404 kfree(nq->tag_map);
405 kfree(nq->cmds);
406}
407
408static void cleanup_queues(struct nullb *nullb)
409{
410 int i;
411
412 for (i = 0; i < nullb->nr_queues; i++)
413 cleanup_queue(&nullb->queues[i]);
414
415 kfree(nullb->queues);
416}
417
Jens Axboef2298c02013-10-25 11:52:25 +0100418static void null_del_dev(struct nullb *nullb)
419{
420 list_del_init(&nullb->list);
421
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100422 if (use_lightnvm)
Matias Bjørling54514aa42015-11-19 12:50:10 +0100423 nvm_unregister(nullb->disk_name);
424 else
425 del_gendisk(nullb->disk);
Ming Lei518d00b2013-12-26 21:31:37 +0800426 blk_cleanup_queue(nullb->q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600427 if (queue_mode == NULL_Q_MQ)
428 blk_mq_free_tag_set(&nullb->tag_set);
Matias Bjørling54514aa42015-11-19 12:50:10 +0100429 if (!use_lightnvm)
430 put_disk(nullb->disk);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200431 cleanup_queues(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +0100432 kfree(nullb);
433}
434
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100435#ifdef CONFIG_NVM
436
437static void null_lnvm_end_io(struct request *rq, int error)
438{
439 struct nvm_rq *rqd = rq->end_io_data;
440 struct nvm_dev *dev = rqd->dev;
441
442 dev->mt->end_io(rqd, error);
443
444 blk_put_request(rq);
445}
446
447static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
448{
449 struct request *rq;
450 struct bio *bio = rqd->bio;
451
452 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
453 if (IS_ERR(rq))
454 return -ENOMEM;
455
456 rq->cmd_type = REQ_TYPE_DRV_PRIV;
457 rq->__sector = bio->bi_iter.bi_sector;
458 rq->ioprio = bio_prio(bio);
459
460 if (bio_has_data(bio))
461 rq->nr_phys_segments = bio_phys_segments(q, bio);
462
463 rq->__data_len = bio->bi_iter.bi_size;
464 rq->bio = rq->biotail = bio;
465
466 rq->end_io_data = rqd;
467
468 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
469
470 return 0;
471}
472
473static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
474{
475 sector_t size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100476 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100477 struct nvm_id_group *grp;
478
479 id->ver_id = 0x1;
480 id->vmnt = 0;
481 id->cgrps = 1;
482 id->cap = 0x3;
483 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100484
485 id->ppaf.blk_offset = 0;
486 id->ppaf.blk_len = 16;
487 id->ppaf.pg_offset = 16;
488 id->ppaf.pg_len = 16;
489 id->ppaf.sect_offset = 32;
490 id->ppaf.sect_len = 8;
491 id->ppaf.pln_offset = 40;
492 id->ppaf.pln_len = 8;
493 id->ppaf.lun_offset = 48;
494 id->ppaf.lun_len = 8;
495 id->ppaf.ch_offset = 56;
496 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100497
498 do_div(size, bs); /* convert size to pages */
Matias Bjørling5b40db92015-11-19 12:50:09 +0100499 do_div(size, 256); /* concert size to pgs pr blk */
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100500 grp = &id->groups[0];
501 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100502 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100503 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100504 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100505 blksize = size;
506 do_div(size, (1 << 16));
507 grp->num_lun = size + 1;
508 do_div(blksize, grp->num_lun);
509 grp->num_blk = blksize;
510 grp->num_pln = 1;
511
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100512 grp->fpg_sz = bs;
513 grp->csecs = bs;
514 grp->trdt = 25000;
515 grp->trdm = 25000;
516 grp->tprt = 500000;
517 grp->tprm = 500000;
518 grp->tbet = 1500000;
519 grp->tbem = 1500000;
520 grp->mpos = 0x010101; /* single plane rwe */
521 grp->cpar = hw_queue_depth;
522
523 return 0;
524}
525
526static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
527{
528 mempool_t *virtmem_pool;
529
Matias Bjørling6bb95352015-11-19 12:50:08 +0100530 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100531 if (!virtmem_pool) {
532 pr_err("null_blk: Unable to create virtual memory pool\n");
533 return NULL;
534 }
535
536 return virtmem_pool;
537}
538
539static void null_lnvm_destroy_dma_pool(void *pool)
540{
541 mempool_destroy(pool);
542}
543
544static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
545 gfp_t mem_flags, dma_addr_t *dma_handler)
546{
547 return mempool_alloc(pool, mem_flags);
548}
549
550static void null_lnvm_dev_dma_free(void *pool, void *entry,
551 dma_addr_t dma_handler)
552{
553 mempool_free(entry, pool);
554}
555
556static struct nvm_dev_ops null_lnvm_dev_ops = {
557 .identity = null_lnvm_id,
558 .submit_io = null_lnvm_submit_io,
559
560 .create_dma_pool = null_lnvm_create_dma_pool,
561 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
562 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
563 .dev_dma_free = null_lnvm_dev_dma_free,
564
565 /* Simulate nvme protocol restriction */
566 .max_phys_sect = 64,
567};
568#else
569static struct nvm_dev_ops null_lnvm_dev_ops;
570#endif /* CONFIG_NVM */
571
Jens Axboef2298c02013-10-25 11:52:25 +0100572static int null_open(struct block_device *bdev, fmode_t mode)
573{
574 return 0;
575}
576
577static void null_release(struct gendisk *disk, fmode_t mode)
578{
579}
580
581static const struct block_device_operations null_fops = {
582 .owner = THIS_MODULE,
583 .open = null_open,
584 .release = null_release,
585};
586
587static int setup_commands(struct nullb_queue *nq)
588{
589 struct nullb_cmd *cmd;
590 int i, tag_size;
591
592 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
593 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100594 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100595
596 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
597 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
598 if (!nq->tag_map) {
599 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100600 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100601 }
602
603 for (i = 0; i < nq->queue_depth; i++) {
604 cmd = &nq->cmds[i];
605 INIT_LIST_HEAD(&cmd->list);
606 cmd->ll_list.next = NULL;
607 cmd->tag = -1U;
608 }
609
610 return 0;
611}
612
Jens Axboef2298c02013-10-25 11:52:25 +0100613static int setup_queues(struct nullb *nullb)
614{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100615 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
616 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100617 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100618 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100619
620 nullb->nr_queues = 0;
621 nullb->queue_depth = hw_queue_depth;
622
Matias Bjorling2d263a782013-12-18 13:41:43 +0100623 return 0;
624}
625
626static int init_driver_queues(struct nullb *nullb)
627{
628 struct nullb_queue *nq;
629 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100630
631 for (i = 0; i < submit_queues; i++) {
632 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100633
634 null_init_queue(nullb, nq);
635
636 ret = setup_commands(nq);
637 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200638 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100639 nullb->nr_queues++;
640 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100641 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100642}
643
644static int null_add_dev(void)
645{
646 struct gendisk *disk;
647 struct nullb *nullb;
648 sector_t size;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500649 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100650
651 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500652 if (!nullb) {
653 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600654 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500655 }
Jens Axboef2298c02013-10-25 11:52:25 +0100656
657 spin_lock_init(&nullb->lock);
658
Matias Bjorling57053d82013-12-10 16:50:38 +0100659 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
660 submit_queues = nr_online_nodes;
661
Robert Elliottdc501dc2014-09-02 11:38:49 -0500662 rv = setup_queues(nullb);
663 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600664 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100665
666 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200667 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600668 nullb->tag_set.nr_hw_queues = submit_queues;
669 nullb->tag_set.queue_depth = hw_queue_depth;
670 nullb->tag_set.numa_node = home_node;
671 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
672 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
673 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100674
Robert Elliottdc501dc2014-09-02 11:38:49 -0500675 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
676 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600677 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100678
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600679 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000680 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500681 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600682 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500683 }
Jens Axboef2298c02013-10-25 11:52:25 +0100684 } else if (queue_mode == NULL_Q_BIO) {
685 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500686 if (!nullb->q) {
687 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600688 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500689 }
Jens Axboef2298c02013-10-25 11:52:25 +0100690 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200691 rv = init_driver_queues(nullb);
692 if (rv)
693 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100694 } else {
695 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500696 if (!nullb->q) {
697 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600698 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500699 }
Jens Axboef2298c02013-10-25 11:52:25 +0100700 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600701 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200702 rv = init_driver_queues(nullb);
703 if (rv)
704 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100705 }
706
Jens Axboef2298c02013-10-25 11:52:25 +0100707 nullb->q->queuedata = nullb;
708 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600709 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100710
Jens Axboef2298c02013-10-25 11:52:25 +0100711
712 mutex_lock(&lock);
713 list_add_tail(&nullb->list, &nullb_list);
714 nullb->index = nullb_indexes++;
715 mutex_unlock(&lock);
716
717 blk_queue_logical_block_size(nullb->q, bs);
718 blk_queue_physical_block_size(nullb->q, bs);
719
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100720 sprintf(nullb->disk_name, "nullb%d", nullb->index);
721
722 if (use_lightnvm) {
723 rv = nvm_register(nullb->q, nullb->disk_name,
724 &null_lnvm_dev_ops);
725 if (rv)
726 goto out_cleanup_blk_queue;
727 goto done;
728 }
729
730 disk = nullb->disk = alloc_disk_node(1, home_node);
731 if (!disk) {
732 rv = -ENOMEM;
733 goto out_cleanup_lightnvm;
734 }
Jens Axboef2298c02013-10-25 11:52:25 +0100735 size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5fdb7e12015-08-31 14:17:31 +0200736 set_capacity(disk, size >> 9);
Jens Axboef2298c02013-10-25 11:52:25 +0100737
Jens Axboe227290b2015-01-16 16:02:24 -0700738 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
Jens Axboef2298c02013-10-25 11:52:25 +0100739 disk->major = null_major;
740 disk->first_minor = nullb->index;
741 disk->fops = &null_fops;
742 disk->private_data = nullb;
743 disk->queue = nullb->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100744 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
745
Jens Axboef2298c02013-10-25 11:52:25 +0100746 add_disk(disk);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100747done:
Jens Axboef2298c02013-10-25 11:52:25 +0100748 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600749
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100750out_cleanup_lightnvm:
751 if (use_lightnvm)
752 nvm_unregister(nullb->disk_name);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600753out_cleanup_blk_queue:
754 blk_cleanup_queue(nullb->q);
755out_cleanup_tags:
756 if (queue_mode == NULL_Q_MQ)
757 blk_mq_free_tag_set(&nullb->tag_set);
758out_cleanup_queues:
759 cleanup_queues(nullb);
760out_free_nullb:
761 kfree(nullb);
762out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500763 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100764}
765
766static int __init null_init(void)
767{
768 unsigned int i;
769
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530770 if (bs > PAGE_SIZE) {
771 pr_warn("null_blk: invalid block size\n");
772 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
773 bs = PAGE_SIZE;
774 }
Jens Axboef2298c02013-10-25 11:52:25 +0100775
Matias Bjørling6bb95352015-11-19 12:50:08 +0100776 if (use_lightnvm && bs != 4096) {
777 pr_warn("null_blk: LightNVM only supports 4k block size\n");
778 pr_warn("null_blk: defaults block size to 4k\n");
779 bs = 4096;
780 }
781
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100782 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
783 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
784 pr_warn("null_blk: defaults queue mode to blk-mq\n");
785 queue_mode = NULL_Q_MQ;
786 }
787
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100788 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100789 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100790 pr_warn("null_blk: submit_queues param is set to %u.",
791 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100792 submit_queues = nr_online_nodes;
793 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100794 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100795 submit_queues = nr_cpu_ids;
796 else if (!submit_queues)
797 submit_queues = 1;
798
799 mutex_init(&lock);
800
Jens Axboef2298c02013-10-25 11:52:25 +0100801 null_major = register_blkdev(0, "nullb");
802 if (null_major < 0)
803 return null_major;
804
Matias Bjørling6bb95352015-11-19 12:50:08 +0100805 if (use_lightnvm) {
806 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
807 0, 0, NULL);
808 if (!ppa_cache) {
809 pr_err("null_blk: unable to create ppa cache\n");
810 return -ENOMEM;
811 }
812 }
813
Jens Axboef2298c02013-10-25 11:52:25 +0100814 for (i = 0; i < nr_devices; i++) {
815 if (null_add_dev()) {
816 unregister_blkdev(null_major, "nullb");
Matias Bjørling6bb95352015-11-19 12:50:08 +0100817 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +0100818 }
819 }
820
821 pr_info("null: module loaded\n");
822 return 0;
Matias Bjørling6bb95352015-11-19 12:50:08 +0100823err_ppa:
824 kmem_cache_destroy(ppa_cache);
825 return -EINVAL;
Jens Axboef2298c02013-10-25 11:52:25 +0100826}
827
828static void __exit null_exit(void)
829{
830 struct nullb *nullb;
831
832 unregister_blkdev(null_major, "nullb");
833
834 mutex_lock(&lock);
835 while (!list_empty(&nullb_list)) {
836 nullb = list_entry(nullb_list.next, struct nullb, list);
837 null_del_dev(nullb);
838 }
839 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100840
841 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +0100842}
843
844module_init(null_init);
845module_exit(null_exit);
846
847MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
848MODULE_LICENSE("GPL");