blob: 895867a8a78358376c899b7cb0bb9528e3f36651 [file] [log] [blame]
Jens Axboef2298c02013-10-25 11:52:25 +01001#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002
Jens Axboef2298c02013-10-25 11:52:25 +01003#include <linux/moduleparam.h>
4#include <linux/sched.h>
5#include <linux/fs.h>
6#include <linux/blkdev.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h>
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010011#include <linux/lightnvm.h>
Jens Axboef2298c02013-10-25 11:52:25 +010012
13struct nullb_cmd {
14 struct list_head list;
15 struct llist_node ll_list;
16 struct call_single_data csd;
17 struct request *rq;
18 struct bio *bio;
19 unsigned int tag;
20 struct nullb_queue *nq;
Paolo Valente3c395a92015-12-01 11:48:17 +010021 struct hrtimer timer;
Jens Axboef2298c02013-10-25 11:52:25 +010022};
23
24struct nullb_queue {
25 unsigned long *tag_map;
26 wait_queue_head_t wait;
27 unsigned int queue_depth;
28
29 struct nullb_cmd *cmds;
30};
31
32struct nullb {
33 struct list_head list;
34 unsigned int index;
35 struct request_queue *q;
36 struct gendisk *disk;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060037 struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010038 struct hrtimer timer;
39 unsigned int queue_depth;
40 spinlock_t lock;
41
42 struct nullb_queue *queues;
43 unsigned int nr_queues;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +010044 char disk_name[DISK_NAME_LEN];
Jens Axboef2298c02013-10-25 11:52:25 +010045};
46
47static LIST_HEAD(nullb_list);
48static struct mutex lock;
49static int null_major;
50static int nullb_indexes;
Matias Bjørling6bb95352015-11-19 12:50:08 +010051static struct kmem_cache *ppa_cache;
Jens Axboef2298c02013-10-25 11:52:25 +010052
Jens Axboef2298c02013-10-25 11:52:25 +010053enum {
54 NULL_IRQ_NONE = 0,
55 NULL_IRQ_SOFTIRQ = 1,
56 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080057};
Jens Axboef2298c02013-10-25 11:52:25 +010058
Christoph Hellwigce2c3502014-02-10 03:24:40 -080059enum {
Jens Axboef2298c02013-10-25 11:52:25 +010060 NULL_Q_BIO = 0,
61 NULL_Q_RQ = 1,
62 NULL_Q_MQ = 2,
63};
64
Matias Bjorling2d263a782013-12-18 13:41:43 +010065static int submit_queues;
Jens Axboef2298c02013-10-25 11:52:25 +010066module_param(submit_queues, int, S_IRUGO);
67MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69static int home_node = NUMA_NO_NODE;
70module_param(home_node, int, S_IRUGO);
71MODULE_PARM_DESC(home_node, "Home node for the device");
72
73static int queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -070074
75static int null_param_store_val(const char *str, int *val, int min, int max)
76{
77 int ret, new_val;
78
79 ret = kstrtoint(str, 10, &new_val);
80 if (ret)
81 return -EINVAL;
82
83 if (new_val < min || new_val > max)
84 return -EINVAL;
85
86 *val = new_val;
87 return 0;
88}
89
90static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91{
92 return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93}
94
Luis R. Rodriguez9c278472015-05-27 11:09:38 +093095static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -070096 .set = null_set_queue_mode,
97 .get = param_get_int,
98};
99
100device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400101MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100102
103static int gb = 250;
104module_param(gb, int, S_IRUGO);
105MODULE_PARM_DESC(gb, "Size in GB");
106
107static int bs = 512;
108module_param(bs, int, S_IRUGO);
109MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111static int nr_devices = 2;
112module_param(nr_devices, int, S_IRUGO);
113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
Jens Axboef2298c02013-10-25 11:52:25 +0100119static int irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700120
121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122{
123 return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124 NULL_IRQ_TIMER);
125}
126
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930127static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700128 .set = null_set_irqmode,
129 .get = param_get_int,
130};
131
132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
Arianna Avanzinidbac1172015-12-01 11:48:19 +0100135static unsigned long completion_nsec = 10000;
136module_param(completion_nsec, ulong, S_IRUGO);
Jens Axboef2298c02013-10-25 11:52:25 +0100137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139static int hw_queue_depth = 64;
140module_param(hw_queue_depth, int, S_IRUGO);
141MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
Matias Bjørling20005242013-12-21 00:11:00 +0100143static bool use_per_node_hctx = false;
Jens Axboef2298c02013-10-25 11:52:25 +0100144module_param(use_per_node_hctx, bool, S_IRUGO);
Matias Bjørling20005242013-12-21 00:11:00 +0100145MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100146
147static void put_tag(struct nullb_queue *nq, unsigned int tag)
148{
149 clear_bit_unlock(tag, nq->tag_map);
150
151 if (waitqueue_active(&nq->wait))
152 wake_up(&nq->wait);
153}
154
155static unsigned int get_tag(struct nullb_queue *nq)
156{
157 unsigned int tag;
158
159 do {
160 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161 if (tag >= nq->queue_depth)
162 return -1U;
163 } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165 return tag;
166}
167
168static void free_cmd(struct nullb_cmd *cmd)
169{
170 put_tag(cmd->nq, cmd->tag);
171}
172
Paolo Valente3c395a92015-12-01 11:48:17 +0100173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
Jens Axboef2298c02013-10-25 11:52:25 +0100175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176{
177 struct nullb_cmd *cmd;
178 unsigned int tag;
179
180 tag = get_tag(nq);
181 if (tag != -1U) {
182 cmd = &nq->cmds[tag];
183 cmd->tag = tag;
184 cmd->nq = nq;
Paolo Valente3c395a92015-12-01 11:48:17 +0100185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
Jens Axboef2298c02013-10-25 11:52:25 +0100190 return cmd;
191 }
192
193 return NULL;
194}
195
196static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197{
198 struct nullb_cmd *cmd;
199 DEFINE_WAIT(wait);
200
201 cmd = __alloc_cmd(nq);
202 if (cmd || !can_wait)
203 return cmd;
204
205 do {
206 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207 cmd = __alloc_cmd(nq);
208 if (cmd)
209 break;
210
211 io_schedule();
212 } while (1);
213
214 finish_wait(&nq->wait, &wait);
215 return cmd;
216}
217
218static void end_cmd(struct nullb_cmd *cmd)
219{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100220 struct request_queue *q = NULL;
221
Mike Krinkine8271202015-12-15 12:56:40 +0300222 if (cmd->rq)
223 q = cmd->rq->q;
224
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800225 switch (queue_mode) {
226 case NULL_Q_MQ:
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700227 blk_mq_end_request(cmd->rq, 0);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800228 return;
229 case NULL_Q_RQ:
230 INIT_LIST_HEAD(&cmd->rq->queuelist);
231 blk_end_request_all(cmd->rq, 0);
232 break;
233 case NULL_Q_BIO:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200234 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700235 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800236 }
Jens Axboef2298c02013-10-25 11:52:25 +0100237
Jens Axboe48cc6612015-12-28 13:02:47 -0700238 free_cmd(cmd);
239
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100240 /* Restart queue if needed, as we are freeing a tag */
Jens Axboe48cc6612015-12-28 13:02:47 -0700241 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100242 unsigned long flags;
243
244 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe48cc6612015-12-28 13:02:47 -0700245 blk_start_queue_async(q);
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100246 spin_unlock_irqrestore(q->queue_lock, flags);
247 }
Jens Axboef2298c02013-10-25 11:52:25 +0100248}
249
250static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
251{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100252 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100253
254 return HRTIMER_NORESTART;
255}
256
257static void null_cmd_end_timer(struct nullb_cmd *cmd)
258{
Paolo Valente3c395a92015-12-01 11:48:17 +0100259 ktime_t kt = ktime_set(0, completion_nsec);
Jens Axboef2298c02013-10-25 11:52:25 +0100260
Paolo Valente3c395a92015-12-01 11:48:17 +0100261 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100262}
263
264static void null_softirq_done_fn(struct request *rq)
265{
Jens Axboed891fa72014-06-16 11:40:25 -0600266 if (queue_mode == NULL_Q_MQ)
267 end_cmd(blk_mq_rq_to_pdu(rq));
268 else
269 end_cmd(rq->special);
Jens Axboef2298c02013-10-25 11:52:25 +0100270}
271
Jens Axboef2298c02013-10-25 11:52:25 +0100272static inline void null_handle_cmd(struct nullb_cmd *cmd)
273{
274 /* Complete IO by inline, softirq or timer */
275 switch (irqmode) {
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800276 case NULL_IRQ_SOFTIRQ:
277 switch (queue_mode) {
278 case NULL_Q_MQ:
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200279 blk_mq_complete_request(cmd->rq, cmd->rq->errors);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800280 break;
281 case NULL_Q_RQ:
282 blk_complete_request(cmd->rq);
283 break;
284 case NULL_Q_BIO:
285 /*
286 * XXX: no proper submitting cpu information available.
287 */
288 end_cmd(cmd);
289 break;
290 }
291 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100292 case NULL_IRQ_NONE:
293 end_cmd(cmd);
294 break;
Jens Axboef2298c02013-10-25 11:52:25 +0100295 case NULL_IRQ_TIMER:
296 null_cmd_end_timer(cmd);
297 break;
298 }
299}
300
301static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
302{
303 int index = 0;
304
305 if (nullb->nr_queues != 1)
306 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
307
308 return &nullb->queues[index];
309}
310
Jens Axboedece1632015-11-05 10:41:16 -0700311static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100312{
313 struct nullb *nullb = q->queuedata;
314 struct nullb_queue *nq = nullb_to_queue(nullb);
315 struct nullb_cmd *cmd;
316
317 cmd = alloc_cmd(nq, 1);
318 cmd->bio = bio;
319
320 null_handle_cmd(cmd);
Jens Axboedece1632015-11-05 10:41:16 -0700321 return BLK_QC_T_NONE;
Jens Axboef2298c02013-10-25 11:52:25 +0100322}
323
324static int null_rq_prep_fn(struct request_queue *q, struct request *req)
325{
326 struct nullb *nullb = q->queuedata;
327 struct nullb_queue *nq = nullb_to_queue(nullb);
328 struct nullb_cmd *cmd;
329
330 cmd = alloc_cmd(nq, 0);
331 if (cmd) {
332 cmd->rq = req;
333 req->special = cmd;
334 return BLKPREP_OK;
335 }
Akinobu Mita8b70f452015-06-02 08:35:10 +0900336 blk_stop_queue(q);
Jens Axboef2298c02013-10-25 11:52:25 +0100337
338 return BLKPREP_DEFER;
339}
340
341static void null_request_fn(struct request_queue *q)
342{
343 struct request *rq;
344
345 while ((rq = blk_fetch_request(q)) != NULL) {
346 struct nullb_cmd *cmd = rq->special;
347
348 spin_unlock_irq(q->queue_lock);
349 null_handle_cmd(cmd);
350 spin_lock_irq(q->queue_lock);
351 }
352}
353
Jens Axboe74c45052014-10-29 11:14:52 -0600354static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
355 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +0100356{
Jens Axboe74c45052014-10-29 11:14:52 -0600357 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +0100358
Paolo Valente3c395a92015-12-01 11:48:17 +0100359 if (irqmode == NULL_IRQ_TIMER) {
360 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
361 cmd->timer.function = null_cmd_timer_expired;
362 }
Jens Axboe74c45052014-10-29 11:14:52 -0600363 cmd->rq = bd->rq;
Jens Axboef2298c02013-10-25 11:52:25 +0100364 cmd->nq = hctx->driver_data;
365
Jens Axboe74c45052014-10-29 11:14:52 -0600366 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -0700367
Jens Axboef2298c02013-10-25 11:52:25 +0100368 null_handle_cmd(cmd);
369 return BLK_MQ_RQ_QUEUE_OK;
370}
371
Matias Bjorling2d263a782013-12-18 13:41:43 +0100372static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
373{
374 BUG_ON(!nullb);
375 BUG_ON(!nq);
376
377 init_waitqueue_head(&nq->wait);
378 nq->queue_depth = nullb->queue_depth;
379}
380
Jens Axboef2298c02013-10-25 11:52:25 +0100381static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
382 unsigned int index)
383{
384 struct nullb *nullb = data;
385 struct nullb_queue *nq = &nullb->queues[index];
386
Jens Axboef2298c02013-10-25 11:52:25 +0100387 hctx->driver_data = nq;
Matias Bjorling2d263a782013-12-18 13:41:43 +0100388 null_init_queue(nullb, nq);
389 nullb->nr_queues++;
Jens Axboef2298c02013-10-25 11:52:25 +0100390
391 return 0;
392}
393
394static struct blk_mq_ops null_mq_ops = {
395 .queue_rq = null_queue_rq,
396 .map_queue = blk_mq_map_queue,
397 .init_hctx = null_init_hctx,
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800398 .complete = null_softirq_done_fn,
Jens Axboef2298c02013-10-25 11:52:25 +0100399};
400
Matias Bjørlingde65d2d2015-08-31 14:17:18 +0200401static void cleanup_queue(struct nullb_queue *nq)
402{
403 kfree(nq->tag_map);
404 kfree(nq->cmds);
405}
406
407static void cleanup_queues(struct nullb *nullb)
408{
409 int i;
410
411 for (i = 0; i < nullb->nr_queues; i++)
412 cleanup_queue(&nullb->queues[i]);
413
414 kfree(nullb->queues);
415}
416
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100417#ifdef CONFIG_NVM
418
419static void null_lnvm_end_io(struct request *rq, int error)
420{
421 struct nvm_rq *rqd = rq->end_io_data;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100422
Matias Bjørling912761622016-01-12 07:49:21 +0100423 nvm_end_io(rqd, error);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100424
425 blk_put_request(rq);
426}
427
Matias Bjørling16f26c32015-12-06 11:25:48 +0100428static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100429{
Matias Bjørling16f26c32015-12-06 11:25:48 +0100430 struct request_queue *q = dev->q;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100431 struct request *rq;
432 struct bio *bio = rqd->bio;
433
Christoph Hellwig70246282016-07-19 11:28:41 +0200434 rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100435 if (IS_ERR(rq))
436 return -ENOMEM;
437
438 rq->cmd_type = REQ_TYPE_DRV_PRIV;
439 rq->__sector = bio->bi_iter.bi_sector;
440 rq->ioprio = bio_prio(bio);
441
442 if (bio_has_data(bio))
443 rq->nr_phys_segments = bio_phys_segments(q, bio);
444
445 rq->__data_len = bio->bi_iter.bi_size;
446 rq->bio = rq->biotail = bio;
447
448 rq->end_io_data = rqd;
449
450 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
451
452 return 0;
453}
454
Matias Bjørling16f26c32015-12-06 11:25:48 +0100455static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100456{
457 sector_t size = gb * 1024 * 1024 * 1024ULL;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100458 sector_t blksize;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100459 struct nvm_id_group *grp;
460
461 id->ver_id = 0x1;
462 id->vmnt = 0;
463 id->cgrps = 1;
Matias Bjørlingbf643182016-02-04 15:13:27 +0100464 id->cap = 0x2;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100465 id->dom = 0x1;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100466
467 id->ppaf.blk_offset = 0;
468 id->ppaf.blk_len = 16;
469 id->ppaf.pg_offset = 16;
470 id->ppaf.pg_len = 16;
471 id->ppaf.sect_offset = 32;
472 id->ppaf.sect_len = 8;
473 id->ppaf.pln_offset = 40;
474 id->ppaf.pln_len = 8;
475 id->ppaf.lun_offset = 48;
476 id->ppaf.lun_len = 8;
477 id->ppaf.ch_offset = 56;
478 id->ppaf.ch_len = 8;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100479
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100480 sector_div(size, bs); /* convert size to pages */
481 size >>= 8; /* concert size to pgs pr blk */
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100482 grp = &id->groups[0];
483 grp->mtype = 0;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100484 grp->fmtype = 0;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100485 grp->num_ch = 1;
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100486 grp->num_pg = 256;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100487 blksize = size;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100488 size >>= 16;
Matias Bjørling5b40db92015-11-19 12:50:09 +0100489 grp->num_lun = size + 1;
Arnd Bergmanne93d12a2016-01-13 23:04:08 +0100490 sector_div(blksize, grp->num_lun);
Matias Bjørling5b40db92015-11-19 12:50:09 +0100491 grp->num_blk = blksize;
492 grp->num_pln = 1;
493
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100494 grp->fpg_sz = bs;
495 grp->csecs = bs;
496 grp->trdt = 25000;
497 grp->trdm = 25000;
498 grp->tprt = 500000;
499 grp->tprm = 500000;
500 grp->tbet = 1500000;
501 grp->tbem = 1500000;
502 grp->mpos = 0x010101; /* single plane rwe */
503 grp->cpar = hw_queue_depth;
504
505 return 0;
506}
507
Matias Bjørling16f26c32015-12-06 11:25:48 +0100508static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100509{
510 mempool_t *virtmem_pool;
511
Matias Bjørling6bb95352015-11-19 12:50:08 +0100512 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100513 if (!virtmem_pool) {
514 pr_err("null_blk: Unable to create virtual memory pool\n");
515 return NULL;
516 }
517
518 return virtmem_pool;
519}
520
521static void null_lnvm_destroy_dma_pool(void *pool)
522{
523 mempool_destroy(pool);
524}
525
Matias Bjørling16f26c32015-12-06 11:25:48 +0100526static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100527 gfp_t mem_flags, dma_addr_t *dma_handler)
528{
529 return mempool_alloc(pool, mem_flags);
530}
531
532static void null_lnvm_dev_dma_free(void *pool, void *entry,
533 dma_addr_t dma_handler)
534{
535 mempool_free(entry, pool);
536}
537
538static struct nvm_dev_ops null_lnvm_dev_ops = {
539 .identity = null_lnvm_id,
540 .submit_io = null_lnvm_submit_io,
541
542 .create_dma_pool = null_lnvm_create_dma_pool,
543 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
544 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
545 .dev_dma_free = null_lnvm_dev_dma_free,
546
547 /* Simulate nvme protocol restriction */
548 .max_phys_sect = 64,
549};
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200550
551static int null_nvm_register(struct nullb *nullb)
552{
553 return nvm_register(nullb->q, nullb->disk_name, &null_lnvm_dev_ops);
554}
555
556static void null_nvm_unregister(struct nullb *nullb)
557{
558 nvm_unregister(nullb->disk_name);
559}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100560#else
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200561static int null_nvm_register(struct nullb *nullb)
562{
563 return -EINVAL;
564}
565static void null_nvm_unregister(struct nullb *nullb) {}
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100566#endif /* CONFIG_NVM */
567
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200568static void null_del_dev(struct nullb *nullb)
569{
570 list_del_init(&nullb->list);
571
572 if (use_lightnvm)
573 null_nvm_unregister(nullb);
574 else
575 del_gendisk(nullb->disk);
576 blk_cleanup_queue(nullb->q);
577 if (queue_mode == NULL_Q_MQ)
578 blk_mq_free_tag_set(&nullb->tag_set);
579 if (!use_lightnvm)
580 put_disk(nullb->disk);
581 cleanup_queues(nullb);
582 kfree(nullb);
583}
584
Jens Axboef2298c02013-10-25 11:52:25 +0100585static int null_open(struct block_device *bdev, fmode_t mode)
586{
587 return 0;
588}
589
590static void null_release(struct gendisk *disk, fmode_t mode)
591{
592}
593
594static const struct block_device_operations null_fops = {
595 .owner = THIS_MODULE,
596 .open = null_open,
597 .release = null_release,
598};
599
600static int setup_commands(struct nullb_queue *nq)
601{
602 struct nullb_cmd *cmd;
603 int i, tag_size;
604
605 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
606 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100607 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100608
609 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
610 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
611 if (!nq->tag_map) {
612 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +0100613 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100614 }
615
616 for (i = 0; i < nq->queue_depth; i++) {
617 cmd = &nq->cmds[i];
618 INIT_LIST_HEAD(&cmd->list);
619 cmd->ll_list.next = NULL;
620 cmd->tag = -1U;
621 }
622
623 return 0;
624}
625
Jens Axboef2298c02013-10-25 11:52:25 +0100626static int setup_queues(struct nullb *nullb)
627{
Matias Bjorling2d263a782013-12-18 13:41:43 +0100628 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
629 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +0100630 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +0100631 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +0100632
633 nullb->nr_queues = 0;
634 nullb->queue_depth = hw_queue_depth;
635
Matias Bjorling2d263a782013-12-18 13:41:43 +0100636 return 0;
637}
638
639static int init_driver_queues(struct nullb *nullb)
640{
641 struct nullb_queue *nq;
642 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100643
644 for (i = 0; i < submit_queues; i++) {
645 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +0100646
647 null_init_queue(nullb, nq);
648
649 ret = setup_commands(nq);
650 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +0200651 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100652 nullb->nr_queues++;
653 }
Matias Bjorling2d263a782013-12-18 13:41:43 +0100654 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100655}
656
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200657static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +0100658{
659 struct gendisk *disk;
Jens Axboef2298c02013-10-25 11:52:25 +0100660 sector_t size;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200661
662 disk = nullb->disk = alloc_disk_node(1, home_node);
663 if (!disk)
664 return -ENOMEM;
665 size = gb * 1024 * 1024 * 1024ULL;
666 set_capacity(disk, size >> 9);
667
668 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
669 disk->major = null_major;
670 disk->first_minor = nullb->index;
671 disk->fops = &null_fops;
672 disk->private_data = nullb;
673 disk->queue = nullb->q;
674 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
675
676 add_disk(disk);
677 return 0;
678}
679
680static int null_add_dev(void)
681{
682 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500683 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100684
685 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500686 if (!nullb) {
687 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600688 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500689 }
Jens Axboef2298c02013-10-25 11:52:25 +0100690
691 spin_lock_init(&nullb->lock);
692
Matias Bjorling57053d82013-12-10 16:50:38 +0100693 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
694 submit_queues = nr_online_nodes;
695
Robert Elliottdc501dc2014-09-02 11:38:49 -0500696 rv = setup_queues(nullb);
697 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600698 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100699
700 if (queue_mode == NULL_Q_MQ) {
Christoph Hellwigcdef54d2014-05-28 18:11:06 +0200701 nullb->tag_set.ops = &null_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600702 nullb->tag_set.nr_hw_queues = submit_queues;
703 nullb->tag_set.queue_depth = hw_queue_depth;
704 nullb->tag_set.numa_node = home_node;
705 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
706 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
707 nullb->tag_set.driver_data = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100708
Robert Elliottdc501dc2014-09-02 11:38:49 -0500709 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
710 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600711 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +0100712
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600713 nullb->q = blk_mq_init_queue(&nullb->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000714 if (IS_ERR(nullb->q)) {
Robert Elliottdc501dc2014-09-02 11:38:49 -0500715 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600716 goto out_cleanup_tags;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500717 }
Jens Axboef2298c02013-10-25 11:52:25 +0100718 } else if (queue_mode == NULL_Q_BIO) {
719 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500720 if (!nullb->q) {
721 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600722 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500723 }
Jens Axboef2298c02013-10-25 11:52:25 +0100724 blk_queue_make_request(nullb->q, null_queue_bio);
Jan Kara31f96902014-10-22 15:34:21 +0200725 rv = init_driver_queues(nullb);
726 if (rv)
727 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100728 } else {
729 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -0500730 if (!nullb->q) {
731 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600732 goto out_cleanup_queues;
Robert Elliottdc501dc2014-09-02 11:38:49 -0500733 }
Jens Axboef2298c02013-10-25 11:52:25 +0100734 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600735 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
Jan Kara31f96902014-10-22 15:34:21 +0200736 rv = init_driver_queues(nullb);
737 if (rv)
738 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100739 }
740
Jens Axboef2298c02013-10-25 11:52:25 +0100741 nullb->q->queuedata = nullb;
742 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
Mike Snitzerb277da02014-10-04 10:55:32 -0600743 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +0100744
Jens Axboef2298c02013-10-25 11:52:25 +0100745 mutex_lock(&lock);
Jens Axboef2298c02013-10-25 11:52:25 +0100746 nullb->index = nullb_indexes++;
747 mutex_unlock(&lock);
748
749 blk_queue_logical_block_size(nullb->q, bs);
750 blk_queue_physical_block_size(nullb->q, bs);
751
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100752 sprintf(nullb->disk_name, "nullb%d", nullb->index);
753
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200754 if (use_lightnvm)
755 rv = null_nvm_register(nullb);
756 else
757 rv = null_gendisk_register(nullb);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100758
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +0200759 if (rv)
760 goto out_cleanup_blk_queue;
Jens Axboef2298c02013-10-25 11:52:25 +0100761
Matias Bjørlinga5143792016-02-11 14:49:13 +0100762 mutex_lock(&lock);
763 list_add_tail(&nullb->list, &nullb_list);
764 mutex_unlock(&lock);
Wenwei Tao3681c852016-03-05 00:27:04 +0800765
Jens Axboef2298c02013-10-25 11:52:25 +0100766 return 0;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600767out_cleanup_blk_queue:
768 blk_cleanup_queue(nullb->q);
769out_cleanup_tags:
770 if (queue_mode == NULL_Q_MQ)
771 blk_mq_free_tag_set(&nullb->tag_set);
772out_cleanup_queues:
773 cleanup_queues(nullb);
774out_free_nullb:
775 kfree(nullb);
776out:
Robert Elliottdc501dc2014-09-02 11:38:49 -0500777 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +0100778}
779
780static int __init null_init(void)
781{
Minfei Huangaf096e22015-12-08 13:47:34 -0700782 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +0100783 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -0700784 struct nullb *nullb;
Jens Axboef2298c02013-10-25 11:52:25 +0100785
Raghavendra K T9967d8a2014-01-21 16:59:59 +0530786 if (bs > PAGE_SIZE) {
787 pr_warn("null_blk: invalid block size\n");
788 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
789 bs = PAGE_SIZE;
790 }
Jens Axboef2298c02013-10-25 11:52:25 +0100791
Matias Bjørling6bb95352015-11-19 12:50:08 +0100792 if (use_lightnvm && bs != 4096) {
793 pr_warn("null_blk: LightNVM only supports 4k block size\n");
794 pr_warn("null_blk: defaults block size to 4k\n");
795 bs = 4096;
796 }
797
Matias Bjørlingb2b7e002015-11-12 20:25:10 +0100798 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
799 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
800 pr_warn("null_blk: defaults queue mode to blk-mq\n");
801 queue_mode = NULL_Q_MQ;
802 }
803
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100804 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100805 if (submit_queues < nr_online_nodes) {
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100806 pr_warn("null_blk: submit_queues param is set to %u.",
807 nr_online_nodes);
Matias Bjørlingfc1bc352013-12-21 00:11:01 +0100808 submit_queues = nr_online_nodes;
809 }
Matias Bjorlingd15ee6b2013-12-18 13:41:44 +0100810 } else if (submit_queues > nr_cpu_ids)
Jens Axboef2298c02013-10-25 11:52:25 +0100811 submit_queues = nr_cpu_ids;
812 else if (!submit_queues)
813 submit_queues = 1;
814
815 mutex_init(&lock);
816
Jens Axboef2298c02013-10-25 11:52:25 +0100817 null_major = register_blkdev(0, "nullb");
818 if (null_major < 0)
819 return null_major;
820
Matias Bjørling6bb95352015-11-19 12:50:08 +0100821 if (use_lightnvm) {
822 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
823 0, 0, NULL);
824 if (!ppa_cache) {
825 pr_err("null_blk: unable to create ppa cache\n");
Minfei Huangaf096e22015-12-08 13:47:34 -0700826 ret = -ENOMEM;
Matias Bjørling6bb95352015-11-19 12:50:08 +0100827 goto err_ppa;
Jens Axboef2298c02013-10-25 11:52:25 +0100828 }
829 }
830
Minfei Huangaf096e22015-12-08 13:47:34 -0700831 for (i = 0; i < nr_devices; i++) {
832 ret = null_add_dev();
833 if (ret)
834 goto err_dev;
835 }
836
Jens Axboef2298c02013-10-25 11:52:25 +0100837 pr_info("null: module loaded\n");
838 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -0700839
840err_dev:
841 while (!list_empty(&nullb_list)) {
842 nullb = list_entry(nullb_list.next, struct nullb, list);
843 null_del_dev(nullb);
844 }
Matias Bjørling6bb95352015-11-19 12:50:08 +0100845 kmem_cache_destroy(ppa_cache);
Minfei Huangaf096e22015-12-08 13:47:34 -0700846err_ppa:
847 unregister_blkdev(null_major, "nullb");
848 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +0100849}
850
851static void __exit null_exit(void)
852{
853 struct nullb *nullb;
854
855 unregister_blkdev(null_major, "nullb");
856
857 mutex_lock(&lock);
858 while (!list_empty(&nullb_list)) {
859 nullb = list_entry(nullb_list.next, struct nullb, list);
860 null_del_dev(nullb);
861 }
862 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +0100863
864 kmem_cache_destroy(ppa_cache);
Jens Axboef2298c02013-10-25 11:52:25 +0100865}
866
867module_init(null_init);
868module_exit(null_exit);
869
870MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
871MODULE_LICENSE("GPL");