blob: 608fc4464574e1d9edb4037c53b6998afdb11ebe [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040032#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020034
35struct pmem_device {
Ross Zwisler9e853f22015-04-01 09:12:19 +020036 /* One contiguous memory region per device */
37 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040038 /* when non-zero this device is hosting a 'pfn' instance */
39 phys_addr_t data_offset;
Arnd Bergmannc4544202016-02-22 22:58:34 +010040 u64 pfn_flags;
Ross Zwisler61031952015-06-25 03:08:39 -040041 void __pmem *virt_addr;
Dan Williamscfe30b82016-03-03 09:38:00 -080042 /* immutable base size of the namespace */
Ross Zwisler9e853f22015-04-01 09:12:19 +020043 size_t size;
Dan Williamscfe30b82016-03-03 09:38:00 -080044 /* trim size when namespace capacity has been section aligned */
45 u32 pfn_pad;
Dan Williamsb95f5f42016-01-04 23:50:23 -080046 struct badblocks bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +020047};
48
Dan Williams59e64732016-03-08 07:16:07 -080049static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
50 unsigned int len)
51{
Dan Williams5a922892016-03-21 15:43:53 -070052 struct device *dev = pmem->bb.dev;
Dan Williams59e64732016-03-08 07:16:07 -080053 sector_t sector;
54 long cleared;
55
56 sector = (offset - pmem->data_offset) / 512;
57 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
58
59 if (cleared > 0 && cleared / 512) {
60 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
61 __func__, (unsigned long long) sector,
62 cleared / 512, cleared / 512 > 1 ? "s" : "");
63 badblocks_clear(&pmem->bb, sector, cleared / 512);
64 }
65 invalidate_pmem(pmem->virt_addr + offset, len);
66}
67
Dan Williamse10624f2016-01-06 12:03:41 -080068static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Ross Zwisler9e853f22015-04-01 09:12:19 +020069 unsigned int len, unsigned int off, int rw,
70 sector_t sector)
71{
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080072 int rc = 0;
Dan Williams59e64732016-03-08 07:16:07 -080073 bool bad_pmem = false;
Ross Zwisler9e853f22015-04-01 09:12:19 +020074 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040075 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040076 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020077
Dan Williams59e64732016-03-08 07:16:07 -080078 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
79 bad_pmem = true;
80
Ross Zwisler9e853f22015-04-01 09:12:19 +020081 if (rw == READ) {
Dan Williams59e64732016-03-08 07:16:07 -080082 if (unlikely(bad_pmem))
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080083 rc = -EIO;
84 else {
Dan Williamsfc0c2022016-03-08 10:30:19 -080085 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080086 flush_dcache_page(page);
87 }
Ross Zwisler9e853f22015-04-01 09:12:19 +020088 } else {
Dan Williams0a370d262016-04-14 19:40:47 -070089 /*
90 * Note that we write the data both before and after
91 * clearing poison. The write before clear poison
92 * handles situations where the latest written data is
93 * preserved and the clear poison operation simply marks
94 * the address range as valid without changing the data.
95 * In this case application software can assume that an
96 * interrupted write will either return the new good
97 * data or an error.
98 *
99 * However, if pmem_clear_poison() leaves the data in an
100 * indeterminate state we need to perform the write
101 * after clear poison.
102 */
Ross Zwisler9e853f22015-04-01 09:12:19 +0200103 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -0400104 memcpy_to_pmem(pmem_addr, mem + off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800105 if (unlikely(bad_pmem)) {
106 pmem_clear_poison(pmem, pmem_off, len);
107 memcpy_to_pmem(pmem_addr, mem + off, len);
108 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200109 }
110
111 kunmap_atomic(mem);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800112 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200113}
114
Jens Axboedece1632015-11-05 10:41:16 -0700115static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200116{
Dan Williamse10624f2016-01-06 12:03:41 -0800117 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400118 bool do_acct;
119 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400120 struct bio_vec bvec;
121 struct bvec_iter iter;
Dan Williamsbd842b82016-03-18 23:47:43 -0700122 struct pmem_device *pmem = q->queuedata;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200123
Dan Williamsf0dc0892015-05-16 12:28:53 -0400124 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800125 bio_for_each_segment(bvec, bio, iter) {
126 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
127 bvec.bv_offset, bio_data_dir(bio),
128 iter.bi_sector);
129 if (rc) {
130 bio->bi_error = rc;
131 break;
132 }
133 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400134 if (do_acct)
135 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400136
137 if (bio_data_dir(bio))
138 wmb_pmem();
139
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200140 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700141 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200142}
143
144static int pmem_rw_page(struct block_device *bdev, sector_t sector,
145 struct page *page, int rw)
146{
Dan Williamsbd842b82016-03-18 23:47:43 -0700147 struct pmem_device *pmem = bdev->bd_queue->queuedata;
Dan Williamse10624f2016-01-06 12:03:41 -0800148 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200149
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300150 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -0600151 if (rw & WRITE)
152 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +0200153
Dan Williamse10624f2016-01-06 12:03:41 -0800154 /*
155 * The ->rw_page interface is subtle and tricky. The core
156 * retries on any error, so we can only invoke page_endio() in
157 * the successful completion case. Otherwise, we'll see crashes
158 * caused by double completion.
159 */
160 if (rc == 0)
161 page_endio(page, rw & WRITE, 0);
162
163 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200164}
165
166static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams0a70bd42016-02-24 14:02:11 -0800167 void __pmem **kaddr, pfn_t *pfn, long size)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200168{
Dan Williamsbd842b82016-03-18 23:47:43 -0700169 struct pmem_device *pmem = bdev->bd_queue->queuedata;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400170 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200171
Dan Williams0a70bd42016-02-24 14:02:11 -0800172 if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
173 return -EIO;
Ross Zwislere2e05392015-08-18 13:55:41 -0600174 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800175 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200176
Dan Williams0a70bd42016-02-24 14:02:11 -0800177 /*
178 * If badblocks are present, limit known good range to the
179 * requested range.
180 */
181 if (unlikely(pmem->bb.count))
182 return size;
Dan Williamscfe30b82016-03-03 09:38:00 -0800183 return pmem->size - pmem->pfn_pad - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200184}
185
186static const struct block_device_operations pmem_fops = {
187 .owner = THIS_MODULE,
188 .rw_page = pmem_rw_page,
189 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400190 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200191};
192
Dan Williams030b99e2016-03-17 20:24:31 -0700193static void pmem_release_queue(void *q)
194{
195 blk_cleanup_queue(q);
196}
197
198void pmem_release_disk(void *disk)
199{
200 del_gendisk(disk);
201 put_disk(disk);
202}
203
Dan Williams200c79d2016-03-22 00:22:16 -0700204static int pmem_attach_disk(struct device *dev,
205 struct nd_namespace_common *ndns)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200206{
Dan Williams200c79d2016-03-22 00:22:16 -0700207 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
208 struct vmem_altmap __altmap, *altmap = NULL;
209 struct resource *res = &nsio->res;
210 struct nd_pfn *nd_pfn = NULL;
211 int nid = dev_to_node(dev);
212 struct nd_pfn_sb *pfn_sb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200213 struct pmem_device *pmem;
Dan Williams200c79d2016-03-22 00:22:16 -0700214 struct resource pfn_res;
Dan Williams468ded02016-01-15 16:56:46 -0800215 struct request_queue *q;
Dan Williams200c79d2016-03-22 00:22:16 -0700216 struct gendisk *disk;
217 void *addr;
218
219 /* while nsio_rw_bytes is active, parse a pfn info block if present */
220 if (is_nd_pfn(dev)) {
221 nd_pfn = to_nd_pfn(dev);
222 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
223 if (IS_ERR(altmap))
224 return PTR_ERR(altmap);
225 }
226
227 /* we're attaching a block device, disable raw namespace access */
228 devm_nsio_disable(dev, nsio);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200229
Christoph Hellwig708ab622015-08-10 23:07:08 -0400230 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200231 if (!pmem)
Dan Williams200c79d2016-03-22 00:22:16 -0700232 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200233
Dan Williams200c79d2016-03-22 00:22:16 -0700234 dev_set_drvdata(dev, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200235 pmem->phys_addr = res->start;
236 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400237 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400238 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200239
Dan Williams947df022016-03-21 22:28:40 -0700240 if (!devm_request_mem_region(dev, res->start, resource_size(res),
241 dev_name(dev))) {
242 dev_warn(dev, "could not reserve region %pR\n", res);
Dan Williams200c79d2016-03-22 00:22:16 -0700243 return -EBUSY;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200244 }
245
Dan Williams468ded02016-01-15 16:56:46 -0800246 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
247 if (!q)
Dan Williams200c79d2016-03-22 00:22:16 -0700248 return -ENOMEM;
Dan Williams468ded02016-01-15 16:56:46 -0800249
Dan Williams34c0fd52016-01-15 16:56:14 -0800250 pmem->pfn_flags = PFN_DEV;
Dan Williams200c79d2016-03-22 00:22:16 -0700251 if (is_nd_pfn(dev)) {
252 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
253 altmap);
254 pfn_sb = nd_pfn->pfn_sb;
255 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
256 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
257 pmem->pfn_flags |= PFN_MAP;
258 res = &pfn_res; /* for badblocks populate */
259 res->start += pmem->data_offset;
260 } else if (pmem_should_map_pages(dev)) {
261 addr = devm_memremap_pages(dev, &nsio->res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800262 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800263 pmem->pfn_flags |= PFN_MAP;
264 } else
Dan Williams200c79d2016-03-22 00:22:16 -0700265 addr = devm_memremap(dev, pmem->phys_addr,
266 pmem->size, ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400267
Dan Williams030b99e2016-03-17 20:24:31 -0700268 /*
269 * At release time the queue must be dead before
270 * devm_memremap_pages is unwound
271 */
272 if (devm_add_action(dev, pmem_release_queue, q)) {
Dan Williams468ded02016-01-15 16:56:46 -0800273 blk_cleanup_queue(q);
Dan Williams200c79d2016-03-22 00:22:16 -0700274 return -ENOMEM;
Dan Williams468ded02016-01-15 16:56:46 -0800275 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400276
Dan Williams200c79d2016-03-22 00:22:16 -0700277 if (IS_ERR(addr))
278 return PTR_ERR(addr);
279 pmem->virt_addr = (void __pmem *) addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200280
Dan Williams5a922892016-03-21 15:43:53 -0700281 blk_queue_make_request(q, pmem_make_request);
282 blk_queue_physical_block_size(q, PAGE_SIZE);
283 blk_queue_max_hw_sectors(q, UINT_MAX);
284 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
285 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
286 q->queuedata = pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200287
Dan Williams538ea4a2015-10-05 20:35:56 -0400288 disk = alloc_disk_node(0, nid);
Dan Williams030b99e2016-03-17 20:24:31 -0700289 if (!disk)
290 return -ENOMEM;
291 if (devm_add_action(dev, pmem_release_disk, disk)) {
292 put_disk(disk);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400293 return -ENOMEM;
294 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200295
Ross Zwisler9e853f22015-04-01 09:12:19 +0200296 disk->fops = &pmem_fops;
Dan Williams5a922892016-03-21 15:43:53 -0700297 disk->queue = q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200298 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400299 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400300 disk->driverfs_dev = dev;
Dan Williamscfe30b82016-03-03 09:38:00 -0800301 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
302 / 512);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800303 if (devm_init_badblocks(dev, &pmem->bb))
304 return -ENOMEM;
Dan Williams200c79d2016-03-22 00:22:16 -0700305 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
Dan Williams57f7f312016-01-06 12:03:42 -0800306 disk->bb = &pmem->bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200307 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400308 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200309
Dan Williams8c2f7e82015-06-25 04:20:04 -0400310 return 0;
311}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200312
Dan Williams9f53f9f2015-06-09 15:33:45 -0400313static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200314{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400315 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200316
Dan Williams8c2f7e82015-06-25 04:20:04 -0400317 ndns = nvdimm_namespace_common_probe(dev);
318 if (IS_ERR(ndns))
319 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400320
Dan Williams200c79d2016-03-22 00:22:16 -0700321 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
322 return -ENXIO;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200323
Dan Williams200c79d2016-03-22 00:22:16 -0700324 if (is_nd_btt(dev))
Christoph Hellwig708ab622015-08-10 23:07:08 -0400325 return nvdimm_namespace_attach_btt(ndns);
326
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400327 if (is_nd_pfn(dev))
Dan Williams200c79d2016-03-22 00:22:16 -0700328 return pmem_attach_disk(dev, ndns);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400329
Dan Williams200c79d2016-03-22 00:22:16 -0700330 /* if we find a valid info-block we'll come back as that personality */
Dan Williamsc5ed9262016-05-18 14:50:12 -0700331 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
332 || nd_dax_probe(dev, ndns) == 0)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400333 return -ENXIO;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400334
Dan Williams200c79d2016-03-22 00:22:16 -0700335 /* ...otherwise we're just a raw pmem device */
336 return pmem_attach_disk(dev, ndns);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200337}
338
Dan Williams9f53f9f2015-06-09 15:33:45 -0400339static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200340{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400341 if (is_nd_btt(dev))
Dan Williams298f2bc2016-03-15 16:41:04 -0700342 nvdimm_namespace_detach_btt(to_nd_btt(dev));
Ross Zwisler9e853f22015-04-01 09:12:19 +0200343 return 0;
344}
345
Dan Williams71999462016-02-18 10:29:49 -0800346static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
347{
Dan Williamsa3901802016-04-07 20:02:06 -0700348 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams298f2bc2016-03-15 16:41:04 -0700349 struct pmem_device *pmem = dev_get_drvdata(dev);
350 resource_size_t offset = 0, end_trunc = 0;
351 struct nd_namespace_common *ndns;
352 struct nd_namespace_io *nsio;
353 struct resource res;
Dan Williams71999462016-02-18 10:29:49 -0800354
355 if (event != NVDIMM_REVALIDATE_POISON)
356 return;
357
Dan Williams298f2bc2016-03-15 16:41:04 -0700358 if (is_nd_btt(dev)) {
359 struct nd_btt *nd_btt = to_nd_btt(dev);
360
361 ndns = nd_btt->ndns;
362 } else if (is_nd_pfn(dev)) {
Dan Williamsa3901802016-04-07 20:02:06 -0700363 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
364 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
365
Dan Williams298f2bc2016-03-15 16:41:04 -0700366 ndns = nd_pfn->ndns;
367 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
368 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
369 } else
370 ndns = to_ndns(dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700371
Dan Williams298f2bc2016-03-15 16:41:04 -0700372 nsio = to_nd_namespace_io(&ndns->dev);
373 res.start = nsio->res.start + offset;
374 res.end = nsio->res.end - end_trunc;
Dan Williamsa3901802016-04-07 20:02:06 -0700375 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
Dan Williams71999462016-02-18 10:29:49 -0800376}
377
Dan Williams9f53f9f2015-06-09 15:33:45 -0400378MODULE_ALIAS("pmem");
379MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400380MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400381static struct nd_device_driver nd_pmem_driver = {
382 .probe = nd_pmem_probe,
383 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800384 .notify = nd_pmem_notify,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400385 .drv = {
386 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200387 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400388 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200389};
390
391static int __init pmem_init(void)
392{
NeilBrown55155292016-03-09 09:21:54 +1100393 return nd_driver_register(&nd_pmem_driver);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200394}
395module_init(pmem_init);
396
397static void pmem_exit(void)
398{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400399 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200400}
401module_exit(pmem_exit);
402
403MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
404MODULE_LICENSE("GPL v2");