blob: 26d039879ba2c7cf9c4cc27e23b2a751a4dee99c [file] [log] [blame]
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001/*
2 * NVDIMM Block Window Driver
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/blkdev.h>
16#include <linux/fs.h>
17#include <linux/genhd.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/nd.h>
21#include <linux/sizes.h>
22#include "nd.h"
23
24struct nd_blk_device {
Ross Zwisler047fc8a2015-06-25 04:21:02 -040025 struct nd_namespace_blk *nsblk;
26 struct nd_blk_region *ndbr;
27 size_t disk_size;
Vishal Vermafcae6952015-06-25 04:22:39 -040028 u32 sector_size;
29 u32 internal_lbasize;
Ross Zwisler047fc8a2015-06-25 04:21:02 -040030};
31
Vishal Vermafcae6952015-06-25 04:22:39 -040032static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
33{
34 return blk_dev->nsblk->lbasize - blk_dev->sector_size;
35}
36
Ross Zwisler047fc8a2015-06-25 04:21:02 -040037static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
38 resource_size_t ns_offset, unsigned int len)
39{
40 int i;
41
42 for (i = 0; i < nsblk->num_resources; i++) {
43 if (ns_offset < resource_size(nsblk->res[i])) {
44 if (ns_offset + len > resource_size(nsblk->res[i])) {
45 dev_WARN_ONCE(&nsblk->common.dev, 1,
46 "illegal request\n");
47 return SIZE_MAX;
48 }
49 return nsblk->res[i]->start + ns_offset;
50 }
51 ns_offset -= resource_size(nsblk->res[i]);
52 }
53
54 dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
55 return SIZE_MAX;
56}
57
Vishal Vermafcae6952015-06-25 04:22:39 -040058#ifdef CONFIG_BLK_DEV_INTEGRITY
59static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
60 struct bio_integrity_payload *bip, u64 lba,
61 int rw)
62{
63 unsigned int len = nd_blk_meta_size(blk_dev);
64 resource_size_t dev_offset, ns_offset;
65 struct nd_namespace_blk *nsblk;
66 struct nd_blk_region *ndbr;
67 int err = 0;
68
69 nsblk = blk_dev->nsblk;
70 ndbr = blk_dev->ndbr;
71 ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
72 dev_offset = to_dev_offset(nsblk, ns_offset, len);
73 if (dev_offset == SIZE_MAX)
74 return -EIO;
75
76 while (len) {
77 unsigned int cur_len;
78 struct bio_vec bv;
79 void *iobuf;
80
81 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
82 /*
83 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
84 * .bv_offset already adjusted for iter->bi_bvec_done, and we
85 * can use those directly
86 */
87
88 cur_len = min(len, bv.bv_len);
89 iobuf = kmap_atomic(bv.bv_page);
90 err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
91 cur_len, rw);
92 kunmap_atomic(iobuf);
93 if (err)
94 return err;
95
96 len -= cur_len;
97 dev_offset += cur_len;
98 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
99 }
100
101 return err;
102}
103
104#else /* CONFIG_BLK_DEV_INTEGRITY */
105static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
106 struct bio_integrity_payload *bip, u64 lba,
107 int rw)
108{
109 return 0;
110}
111#endif
112
113static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
114 struct bio_integrity_payload *bip, struct page *page,
115 unsigned int len, unsigned int off, int rw,
116 sector_t sector)
117{
118 struct nd_blk_region *ndbr = blk_dev->ndbr;
119 resource_size_t dev_offset, ns_offset;
120 int err = 0;
121 void *iobuf;
122 u64 lba;
123
124 while (len) {
125 unsigned int cur_len;
126
127 /*
128 * If we don't have an integrity payload, we don't have to
129 * split the bvec into sectors, as this would cause unnecessary
130 * Block Window setup/move steps. the do_io routine is capable
131 * of handling len <= PAGE_SIZE.
132 */
133 cur_len = bip ? min(len, blk_dev->sector_size) : len;
134
135 lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
136 ns_offset = lba * blk_dev->internal_lbasize;
137 dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
138 if (dev_offset == SIZE_MAX)
139 return -EIO;
140
141 iobuf = kmap_atomic(page);
142 err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
143 kunmap_atomic(iobuf);
144 if (err)
145 return err;
146
147 if (bip) {
148 err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
149 if (err)
150 return err;
151 }
152 len -= cur_len;
153 off += cur_len;
154 sector += blk_dev->sector_size >> SECTOR_SHIFT;
155 }
156
157 return err;
158}
159
Jens Axboedece1632015-11-05 10:41:16 -0700160static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400161{
Vishal Vermafcae6952015-06-25 04:22:39 -0400162 struct bio_integrity_payload *bip;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400163 struct nd_blk_device *blk_dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400164 struct bvec_iter iter;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400165 unsigned long start;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400166 struct bio_vec bvec;
167 int err = 0, rw;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400168 bool do_acct;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400169
Vishal Vermafcae6952015-06-25 04:22:39 -0400170 /*
171 * bio_integrity_enabled also checks if the bio already has an
172 * integrity payload attached. If it does, we *don't* do a
173 * bio_integrity_prep here - the payload has been generated by
174 * another kernel subsystem, and we just pass it through.
175 */
176 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200177 bio->bi_error = -EIO;
Vishal Vermafcae6952015-06-25 04:22:39 -0400178 goto out;
179 }
180
181 bip = bio_integrity(bio);
Dan Williamsd44077a2016-03-18 23:45:45 -0700182 blk_dev = q->queuedata;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400183 rw = bio_data_dir(bio);
Dan Williamsf0dc0892015-05-16 12:28:53 -0400184 do_acct = nd_iostat_start(bio, &start);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400185 bio_for_each_segment(bvec, bio, iter) {
186 unsigned int len = bvec.bv_len;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400187
188 BUG_ON(len > PAGE_SIZE);
Vishal Vermafcae6952015-06-25 04:22:39 -0400189 err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
190 bvec.bv_offset, rw, iter.bi_sector);
191 if (err) {
Dan Williams8378af12016-03-24 18:06:07 -0700192 dev_dbg(&blk_dev->nsblk->common.dev,
Vishal Vermafcae6952015-06-25 04:22:39 -0400193 "io error in %s sector %lld, len %d,\n",
194 (rw == READ) ? "READ" : "WRITE",
195 (unsigned long long) iter.bi_sector, len);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200196 bio->bi_error = err;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400197 break;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400198 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400199 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400200 if (do_acct)
201 nd_iostat_end(bio, start);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400202
203 out:
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200204 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700205 return BLK_QC_T_NONE;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400206}
207
208static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
209 resource_size_t offset, void *iobuf, size_t n, int rw)
210{
211 struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
212 struct nd_namespace_blk *nsblk = blk_dev->nsblk;
213 struct nd_blk_region *ndbr = blk_dev->ndbr;
214 resource_size_t dev_offset;
215
216 dev_offset = to_dev_offset(nsblk, offset, n);
217
218 if (unlikely(offset + n > blk_dev->disk_size)) {
219 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
220 return -EFAULT;
221 }
222
223 if (dev_offset == SIZE_MAX)
224 return -EIO;
225
226 return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
227}
228
229static const struct block_device_operations nd_blk_fops = {
230 .owner = THIS_MODULE,
Dan Williams58138822015-06-23 20:08:34 -0400231 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400232};
233
Dan Williamsd29cee12016-03-17 20:08:28 -0700234static void nd_blk_release_queue(void *q)
235{
236 blk_cleanup_queue(q);
237}
238
239static void nd_blk_release_disk(void *disk)
240{
241 del_gendisk(disk);
242 put_disk(disk);
243}
244
245static int nd_blk_attach_disk(struct device *dev,
246 struct nd_namespace_common *ndns, struct nd_blk_device *blk_dev)
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400247{
Vishal Vermafcae6952015-06-25 04:22:39 -0400248 resource_size_t available_disk_size;
Dan Williamsd29cee12016-03-17 20:08:28 -0700249 struct request_queue *q;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400250 struct gendisk *disk;
Vishal Vermafcae6952015-06-25 04:22:39 -0400251 u64 internal_nlba;
252
253 internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
254 available_disk_size = internal_nlba * blk_dev->sector_size;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400255
Dan Williamsd29cee12016-03-17 20:08:28 -0700256 q = blk_alloc_queue(GFP_KERNEL);
257 if (!q)
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400258 return -ENOMEM;
Dan Williamsd29cee12016-03-17 20:08:28 -0700259 if (devm_add_action(dev, nd_blk_release_queue, q)) {
260 blk_cleanup_queue(q);
261 return -ENOMEM;
262 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400263
Dan Williamsd29cee12016-03-17 20:08:28 -0700264 blk_queue_make_request(q, nd_blk_make_request);
265 blk_queue_max_hw_sectors(q, UINT_MAX);
266 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
267 blk_queue_logical_block_size(q, blk_dev->sector_size);
268 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Dan Williamsd44077a2016-03-18 23:45:45 -0700269 q->queuedata = blk_dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400270
Dan Williamsd29cee12016-03-17 20:08:28 -0700271 disk = alloc_disk(0);
272 if (!disk)
273 return -ENOMEM;
274 if (devm_add_action(dev, nd_blk_release_disk, disk)) {
275 put_disk(disk);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400276 return -ENOMEM;
277 }
278
279 disk->driverfs_dev = &ndns->dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400280 disk->first_minor = 0;
281 disk->fops = &nd_blk_fops;
Dan Williamsd29cee12016-03-17 20:08:28 -0700282 disk->queue = q;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400283 disk->flags = GENHD_FL_EXT_DEVT;
284 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Vishal Vermafcae6952015-06-25 04:22:39 -0400285 set_capacity(disk, 0);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400286 add_disk(disk);
287
Vishal Vermafcae6952015-06-25 04:22:39 -0400288 if (nd_blk_meta_size(blk_dev)) {
289 int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
290
Dan Williamsd29cee12016-03-17 20:08:28 -0700291 if (rc)
Vishal Vermafcae6952015-06-25 04:22:39 -0400292 return rc;
Vishal Vermafcae6952015-06-25 04:22:39 -0400293 }
294
295 set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
Dan Williams58138822015-06-23 20:08:34 -0400296 revalidate_disk(disk);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400297 return 0;
298}
299
300static int nd_blk_probe(struct device *dev)
301{
302 struct nd_namespace_common *ndns;
Vishal Vermafcae6952015-06-25 04:22:39 -0400303 struct nd_namespace_blk *nsblk;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400304 struct nd_blk_device *blk_dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400305
306 ndns = nvdimm_namespace_common_probe(dev);
307 if (IS_ERR(ndns))
308 return PTR_ERR(ndns);
309
Dan Williamsd29cee12016-03-17 20:08:28 -0700310 blk_dev = devm_kzalloc(dev, sizeof(*blk_dev), GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400311 if (!blk_dev)
312 return -ENOMEM;
313
Vishal Vermafcae6952015-06-25 04:22:39 -0400314 nsblk = to_nd_namespace_blk(&ndns->dev);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400315 blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
316 blk_dev->ndbr = to_nd_blk_region(dev->parent);
317 blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
Vishal Vermafcae6952015-06-25 04:22:39 -0400318 blk_dev->internal_lbasize = roundup(nsblk->lbasize,
319 INT_LBASIZE_ALIGNMENT);
320 blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400321 dev_set_drvdata(dev, blk_dev);
322
323 ndns->rw_bytes = nd_blk_rw_bytes;
324 if (is_nd_btt(dev))
Dan Williamsd29cee12016-03-17 20:08:28 -0700325 return nvdimm_namespace_attach_btt(ndns);
Dan Williamse32bc722016-03-17 18:23:09 -0700326 else if (nd_btt_probe(dev, ndns, blk_dev) == 0) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400327 /* we'll come back as btt-blk */
Dan Williamsd29cee12016-03-17 20:08:28 -0700328 return -ENXIO;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400329 } else
Dan Williamsd29cee12016-03-17 20:08:28 -0700330 return nd_blk_attach_disk(dev, ndns, blk_dev);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400331}
332
333static int nd_blk_remove(struct device *dev)
334{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400335 if (is_nd_btt(dev))
Dan Williams298f2bc2016-03-15 16:41:04 -0700336 nvdimm_namespace_detach_btt(to_nd_btt(dev));
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400337 return 0;
338}
339
340static struct nd_device_driver nd_blk_driver = {
341 .probe = nd_blk_probe,
342 .remove = nd_blk_remove,
343 .drv = {
344 .name = "nd_blk",
345 },
346 .type = ND_DRIVER_NAMESPACE_BLK,
347};
348
349static int __init nd_blk_init(void)
350{
NeilBrownec561512016-03-10 08:59:28 +1100351 return nd_driver_register(&nd_blk_driver);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400352}
353
354static void __exit nd_blk_exit(void)
355{
356 driver_unregister(&nd_blk_driver.drv);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400357}
358
359MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
360MODULE_LICENSE("GPL v2");
361MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
362module_init(nd_blk_init);
363module_exit(nd_blk_exit);