blob: 5b536be5a12eb97023745a59f65283280b7b3675 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williamsf295e532016-06-17 11:08:06 -070032#include "pmem.h"
Dan Williams32ab0a3f2015-08-01 02:16:37 -040033#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040034#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020035
Dan Williamsf284a4f2016-07-07 19:44:50 -070036static struct device *to_dev(struct pmem_device *pmem)
37{
38 /*
39 * nvdimm bus services need a 'dev' parameter, and we record the device
40 * at init in bb.dev.
41 */
42 return pmem->bb.dev;
43}
44
45static struct nd_region *to_region(struct pmem_device *pmem)
46{
47 return to_nd_region(to_dev(pmem)->parent);
48}
Ross Zwisler9e853f22015-04-01 09:12:19 +020049
Toshi Kani3115bb02016-10-13 09:54:21 -060050static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
Dan Williams59e64732016-03-08 07:16:07 -080051 unsigned int len)
52{
Dan Williamsf284a4f2016-07-07 19:44:50 -070053 struct device *dev = to_dev(pmem);
Dan Williams59e64732016-03-08 07:16:07 -080054 sector_t sector;
55 long cleared;
Dan Williams868f036f2016-12-16 08:10:31 -080056 int rc = 0;
Dan Williams59e64732016-03-08 07:16:07 -080057
58 sector = (offset - pmem->data_offset) / 512;
Dan Williams59e64732016-03-08 07:16:07 -080059
Dan Williams868f036f2016-12-16 08:10:31 -080060 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
61 if (cleared < len)
62 rc = -EIO;
Dan Williams59e64732016-03-08 07:16:07 -080063 if (cleared > 0 && cleared / 512) {
Dan Williams868f036f2016-12-16 08:10:31 -080064 cleared /= 512;
65 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
66 (unsigned long long) sector, cleared,
67 cleared > 1 ? "s" : "");
Fabian Frederick0a3f27b2016-12-04 10:48:58 -080068 badblocks_clear(&pmem->bb, sector, cleared);
Dan Williams59e64732016-03-08 07:16:07 -080069 }
Toshi Kani3115bb02016-10-13 09:54:21 -060070
Dan Williams59e64732016-03-08 07:16:07 -080071 invalidate_pmem(pmem->virt_addr + offset, len);
Dan Williams868f036f2016-12-16 08:10:31 -080072
73 return rc;
Dan Williams59e64732016-03-08 07:16:07 -080074}
75
Vishal Vermabd697a82016-09-30 17:19:30 -060076static void write_pmem(void *pmem_addr, struct page *page,
77 unsigned int off, unsigned int len)
78{
79 void *mem = kmap_atomic(page);
80
81 memcpy_to_pmem(pmem_addr, mem + off, len);
82 kunmap_atomic(mem);
83}
84
85static int read_pmem(struct page *page, unsigned int off,
86 void *pmem_addr, unsigned int len)
87{
88 int rc;
89 void *mem = kmap_atomic(page);
90
91 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
92 kunmap_atomic(mem);
Stefan Hajnoczid47d1d22017-01-05 10:05:46 +000093 if (rc)
94 return -EIO;
95 return 0;
Vishal Vermabd697a82016-09-30 17:19:30 -060096}
97
Dan Williamse10624f2016-01-06 12:03:41 -080098static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Jens Axboec11f0c02016-08-05 08:11:04 -060099 unsigned int len, unsigned int off, bool is_write,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200100 sector_t sector)
101{
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800102 int rc = 0;
Dan Williams59e64732016-03-08 07:16:07 -0800103 bool bad_pmem = false;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400104 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Dan Williams7a9eb202016-06-03 18:06:47 -0700105 void *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200106
Dan Williams59e64732016-03-08 07:16:07 -0800107 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
108 bad_pmem = true;
109
Jens Axboec11f0c02016-08-05 08:11:04 -0600110 if (!is_write) {
Dan Williams59e64732016-03-08 07:16:07 -0800111 if (unlikely(bad_pmem))
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800112 rc = -EIO;
113 else {
Vishal Vermabd697a82016-09-30 17:19:30 -0600114 rc = read_pmem(page, off, pmem_addr, len);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800115 flush_dcache_page(page);
116 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200117 } else {
Dan Williams0a370d262016-04-14 19:40:47 -0700118 /*
119 * Note that we write the data both before and after
120 * clearing poison. The write before clear poison
121 * handles situations where the latest written data is
122 * preserved and the clear poison operation simply marks
123 * the address range as valid without changing the data.
124 * In this case application software can assume that an
125 * interrupted write will either return the new good
126 * data or an error.
127 *
128 * However, if pmem_clear_poison() leaves the data in an
129 * indeterminate state we need to perform the write
130 * after clear poison.
131 */
Ross Zwisler9e853f22015-04-01 09:12:19 +0200132 flush_dcache_page(page);
Vishal Vermabd697a82016-09-30 17:19:30 -0600133 write_pmem(pmem_addr, page, off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800134 if (unlikely(bad_pmem)) {
Toshi Kani3115bb02016-10-13 09:54:21 -0600135 rc = pmem_clear_poison(pmem, pmem_off, len);
Vishal Vermabd697a82016-09-30 17:19:30 -0600136 write_pmem(pmem_addr, page, off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800137 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200138 }
139
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800140 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200141}
142
Dan Williams7e267a82016-06-01 20:48:15 -0700143/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
144#ifndef REQ_FLUSH
145#define REQ_FLUSH REQ_PREFLUSH
146#endif
147
Jens Axboedece1632015-11-05 10:41:16 -0700148static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200149{
Dan Williamse10624f2016-01-06 12:03:41 -0800150 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400151 bool do_acct;
152 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400153 struct bio_vec bvec;
154 struct bvec_iter iter;
Dan Williamsbd842b82016-03-18 23:47:43 -0700155 struct pmem_device *pmem = q->queuedata;
Dan Williams7e267a82016-06-01 20:48:15 -0700156 struct nd_region *nd_region = to_region(pmem);
157
Jens Axboe1eff9d32016-08-05 15:35:16 -0600158 if (bio->bi_opf & REQ_FLUSH)
Dan Williams7e267a82016-06-01 20:48:15 -0700159 nvdimm_flush(nd_region);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200160
Dan Williamsf0dc0892015-05-16 12:28:53 -0400161 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800162 bio_for_each_segment(bvec, bio, iter) {
163 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
Jens Axboec11f0c02016-08-05 08:11:04 -0600164 bvec.bv_offset, op_is_write(bio_op(bio)),
Dan Williamse10624f2016-01-06 12:03:41 -0800165 iter.bi_sector);
166 if (rc) {
167 bio->bi_error = rc;
168 break;
169 }
170 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400171 if (do_acct)
172 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400173
Jens Axboe1eff9d32016-08-05 15:35:16 -0600174 if (bio->bi_opf & REQ_FUA)
Dan Williams7e267a82016-06-01 20:48:15 -0700175 nvdimm_flush(nd_region);
Ross Zwisler61031952015-06-25 03:08:39 -0400176
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200177 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700178 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200179}
180
181static int pmem_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600182 struct page *page, bool is_write)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200183{
Dan Williamsbd842b82016-03-18 23:47:43 -0700184 struct pmem_device *pmem = bdev->bd_queue->queuedata;
Dan Williamse10624f2016-01-06 12:03:41 -0800185 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200186
Jens Axboec11f0c02016-08-05 08:11:04 -0600187 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200188
Dan Williamse10624f2016-01-06 12:03:41 -0800189 /*
190 * The ->rw_page interface is subtle and tricky. The core
191 * retries on any error, so we can only invoke page_endio() in
192 * the successful completion case. Otherwise, we'll see crashes
193 * caused by double completion.
194 */
195 if (rc == 0)
Jens Axboec11f0c02016-08-05 08:11:04 -0600196 page_endio(page, is_write, 0);
Dan Williamse10624f2016-01-06 12:03:41 -0800197
198 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200199}
200
Dan Williamsf295e532016-06-17 11:08:06 -0700201/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
202__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams7a9eb202016-06-03 18:06:47 -0700203 void **kaddr, pfn_t *pfn, long size)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200204{
Dan Williamsbd842b82016-03-18 23:47:43 -0700205 struct pmem_device *pmem = bdev->bd_queue->queuedata;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400206 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200207
Dan Williams0a70bd42016-02-24 14:02:11 -0800208 if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
209 return -EIO;
Ross Zwislere2e05392015-08-18 13:55:41 -0600210 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800211 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200212
Dan Williams0a70bd42016-02-24 14:02:11 -0800213 /*
214 * If badblocks are present, limit known good range to the
215 * requested range.
216 */
217 if (unlikely(pmem->bb.count))
218 return size;
Dan Williamscfe30b82016-03-03 09:38:00 -0800219 return pmem->size - pmem->pfn_pad - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200220}
221
222static const struct block_device_operations pmem_fops = {
223 .owner = THIS_MODULE,
224 .rw_page = pmem_rw_page,
225 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400226 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200227};
228
Dan Williams030b99e2016-03-17 20:24:31 -0700229static void pmem_release_queue(void *q)
230{
231 blk_cleanup_queue(q);
232}
233
Dan Williamsf02716d2016-06-15 14:59:17 -0700234static void pmem_release_disk(void *disk)
Dan Williams030b99e2016-03-17 20:24:31 -0700235{
236 del_gendisk(disk);
237 put_disk(disk);
238}
239
Dan Williams200c79d2016-03-22 00:22:16 -0700240static int pmem_attach_disk(struct device *dev,
241 struct nd_namespace_common *ndns)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200242{
Dan Williams200c79d2016-03-22 00:22:16 -0700243 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
Dan Williamsf284a4f2016-07-07 19:44:50 -0700244 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams200c79d2016-03-22 00:22:16 -0700245 struct vmem_altmap __altmap, *altmap = NULL;
246 struct resource *res = &nsio->res;
247 struct nd_pfn *nd_pfn = NULL;
248 int nid = dev_to_node(dev);
249 struct nd_pfn_sb *pfn_sb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200250 struct pmem_device *pmem;
Dan Williams200c79d2016-03-22 00:22:16 -0700251 struct resource pfn_res;
Dan Williams468ded02016-01-15 16:56:46 -0800252 struct request_queue *q;
Dan Williams200c79d2016-03-22 00:22:16 -0700253 struct gendisk *disk;
254 void *addr;
255
256 /* while nsio_rw_bytes is active, parse a pfn info block if present */
257 if (is_nd_pfn(dev)) {
258 nd_pfn = to_nd_pfn(dev);
259 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
260 if (IS_ERR(altmap))
261 return PTR_ERR(altmap);
262 }
263
264 /* we're attaching a block device, disable raw namespace access */
265 devm_nsio_disable(dev, nsio);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200266
Christoph Hellwig708ab622015-08-10 23:07:08 -0400267 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200268 if (!pmem)
Dan Williams200c79d2016-03-22 00:22:16 -0700269 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200270
Dan Williams200c79d2016-03-22 00:22:16 -0700271 dev_set_drvdata(dev, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200272 pmem->phys_addr = res->start;
273 pmem->size = resource_size(res);
Dan Williamsf284a4f2016-07-07 19:44:50 -0700274 if (nvdimm_has_flush(nd_region) < 0)
Ross Zwisler61031952015-06-25 03:08:39 -0400275 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200276
Dan Williams947df022016-03-21 22:28:40 -0700277 if (!devm_request_mem_region(dev, res->start, resource_size(res),
Dan Williams450c6632016-11-28 11:15:18 -0800278 dev_name(&ndns->dev))) {
Dan Williams947df022016-03-21 22:28:40 -0700279 dev_warn(dev, "could not reserve region %pR\n", res);
Dan Williams200c79d2016-03-22 00:22:16 -0700280 return -EBUSY;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200281 }
282
Dan Williams468ded02016-01-15 16:56:46 -0800283 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
284 if (!q)
Dan Williams200c79d2016-03-22 00:22:16 -0700285 return -ENOMEM;
Dan Williams468ded02016-01-15 16:56:46 -0800286
Dan Williams34c0fd52016-01-15 16:56:14 -0800287 pmem->pfn_flags = PFN_DEV;
Dan Williams200c79d2016-03-22 00:22:16 -0700288 if (is_nd_pfn(dev)) {
289 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
290 altmap);
291 pfn_sb = nd_pfn->pfn_sb;
292 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
293 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
294 pmem->pfn_flags |= PFN_MAP;
295 res = &pfn_res; /* for badblocks populate */
296 res->start += pmem->data_offset;
297 } else if (pmem_should_map_pages(dev)) {
298 addr = devm_memremap_pages(dev, &nsio->res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800299 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800300 pmem->pfn_flags |= PFN_MAP;
301 } else
Dan Williams200c79d2016-03-22 00:22:16 -0700302 addr = devm_memremap(dev, pmem->phys_addr,
303 pmem->size, ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400304
Dan Williams030b99e2016-03-17 20:24:31 -0700305 /*
306 * At release time the queue must be dead before
307 * devm_memremap_pages is unwound
308 */
Dan Williamsf02716d2016-06-15 14:59:17 -0700309 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
Dan Williams200c79d2016-03-22 00:22:16 -0700310 return -ENOMEM;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400311
Dan Williams200c79d2016-03-22 00:22:16 -0700312 if (IS_ERR(addr))
313 return PTR_ERR(addr);
Dan Williams7a9eb202016-06-03 18:06:47 -0700314 pmem->virt_addr = addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200315
Dan Williams7e267a82016-06-01 20:48:15 -0700316 blk_queue_write_cache(q, true, true);
Dan Williams5a922892016-03-21 15:43:53 -0700317 blk_queue_make_request(q, pmem_make_request);
318 blk_queue_physical_block_size(q, PAGE_SIZE);
319 blk_queue_max_hw_sectors(q, UINT_MAX);
320 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
321 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Toshi Kani163d4ba2016-06-23 17:05:50 -0400322 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
Dan Williams5a922892016-03-21 15:43:53 -0700323 q->queuedata = pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200324
Dan Williams538ea4a2015-10-05 20:35:56 -0400325 disk = alloc_disk_node(0, nid);
Dan Williams030b99e2016-03-17 20:24:31 -0700326 if (!disk)
327 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200328
Ross Zwisler9e853f22015-04-01 09:12:19 +0200329 disk->fops = &pmem_fops;
Dan Williams5a922892016-03-21 15:43:53 -0700330 disk->queue = q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200331 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400332 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williamscfe30b82016-03-03 09:38:00 -0800333 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
334 / 512);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800335 if (devm_init_badblocks(dev, &pmem->bb))
336 return -ENOMEM;
Dan Williamsf284a4f2016-07-07 19:44:50 -0700337 nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
Dan Williams57f7f312016-01-06 12:03:42 -0800338 disk->bb = &pmem->bb;
Dan Williams0d52c7562016-06-15 19:44:20 -0700339 device_add_disk(dev, disk);
Dan Williamsf02716d2016-06-15 14:59:17 -0700340
341 if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
342 return -ENOMEM;
343
Dan Williams58138822015-06-23 20:08:34 -0400344 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200345
Dan Williams8c2f7e82015-06-25 04:20:04 -0400346 return 0;
347}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200348
Dan Williams9f53f9f2015-06-09 15:33:45 -0400349static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200350{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400351 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200352
Dan Williams8c2f7e82015-06-25 04:20:04 -0400353 ndns = nvdimm_namespace_common_probe(dev);
354 if (IS_ERR(ndns))
355 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400356
Dan Williams200c79d2016-03-22 00:22:16 -0700357 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
358 return -ENXIO;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200359
Dan Williams200c79d2016-03-22 00:22:16 -0700360 if (is_nd_btt(dev))
Christoph Hellwig708ab622015-08-10 23:07:08 -0400361 return nvdimm_namespace_attach_btt(ndns);
362
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400363 if (is_nd_pfn(dev))
Dan Williams200c79d2016-03-22 00:22:16 -0700364 return pmem_attach_disk(dev, ndns);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400365
Dan Williams200c79d2016-03-22 00:22:16 -0700366 /* if we find a valid info-block we'll come back as that personality */
Dan Williamsc5ed9262016-05-18 14:50:12 -0700367 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
368 || nd_dax_probe(dev, ndns) == 0)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400369 return -ENXIO;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400370
Dan Williams200c79d2016-03-22 00:22:16 -0700371 /* ...otherwise we're just a raw pmem device */
372 return pmem_attach_disk(dev, ndns);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200373}
374
Dan Williams9f53f9f2015-06-09 15:33:45 -0400375static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200376{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400377 if (is_nd_btt(dev))
Dan Williams298f2bc2016-03-15 16:41:04 -0700378 nvdimm_namespace_detach_btt(to_nd_btt(dev));
Dan Williams476f8482016-07-09 00:12:52 -0700379 nvdimm_flush(to_nd_region(dev->parent));
380
Ross Zwisler9e853f22015-04-01 09:12:19 +0200381 return 0;
382}
383
Dan Williams476f8482016-07-09 00:12:52 -0700384static void nd_pmem_shutdown(struct device *dev)
385{
386 nvdimm_flush(to_nd_region(dev->parent));
387}
388
Dan Williams71999462016-02-18 10:29:49 -0800389static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
390{
Dan Williams298f2bc2016-03-15 16:41:04 -0700391 struct pmem_device *pmem = dev_get_drvdata(dev);
Dan Williamsf284a4f2016-07-07 19:44:50 -0700392 struct nd_region *nd_region = to_region(pmem);
Dan Williams298f2bc2016-03-15 16:41:04 -0700393 resource_size_t offset = 0, end_trunc = 0;
394 struct nd_namespace_common *ndns;
395 struct nd_namespace_io *nsio;
396 struct resource res;
Dan Williams71999462016-02-18 10:29:49 -0800397
398 if (event != NVDIMM_REVALIDATE_POISON)
399 return;
400
Dan Williams298f2bc2016-03-15 16:41:04 -0700401 if (is_nd_btt(dev)) {
402 struct nd_btt *nd_btt = to_nd_btt(dev);
403
404 ndns = nd_btt->ndns;
405 } else if (is_nd_pfn(dev)) {
Dan Williamsa3901802016-04-07 20:02:06 -0700406 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
407 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
408
Dan Williams298f2bc2016-03-15 16:41:04 -0700409 ndns = nd_pfn->ndns;
410 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
411 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
412 } else
413 ndns = to_ndns(dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700414
Dan Williams298f2bc2016-03-15 16:41:04 -0700415 nsio = to_nd_namespace_io(&ndns->dev);
416 res.start = nsio->res.start + offset;
417 res.end = nsio->res.end - end_trunc;
Dan Williamsa3901802016-04-07 20:02:06 -0700418 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
Dan Williams71999462016-02-18 10:29:49 -0800419}
420
Dan Williams9f53f9f2015-06-09 15:33:45 -0400421MODULE_ALIAS("pmem");
422MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400423MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400424static struct nd_device_driver nd_pmem_driver = {
425 .probe = nd_pmem_probe,
426 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800427 .notify = nd_pmem_notify,
Dan Williams476f8482016-07-09 00:12:52 -0700428 .shutdown = nd_pmem_shutdown,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400429 .drv = {
430 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200431 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400432 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200433};
434
435static int __init pmem_init(void)
436{
NeilBrown55155292016-03-09 09:21:54 +1100437 return nd_driver_register(&nd_pmem_driver);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200438}
439module_init(pmem_init);
440
441static void pmem_exit(void)
442{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400443 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200444}
445module_exit(pmem_exit);
446
447MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
448MODULE_LICENSE("GPL v2");