blob: 8d0b546701848eb2b57d87b7207490133e5c32a9 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040032#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020034
35struct pmem_device {
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040038 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +020039
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040042 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset;
Arnd Bergmannc4544202016-02-22 22:58:34 +010044 u64 pfn_flags;
Ross Zwisler61031952015-06-25 03:08:39 -040045 void __pmem *virt_addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +020046 size_t size;
Dan Williamsb95f5f42016-01-04 23:50:23 -080047 struct badblocks bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +020048};
49
50static int pmem_major;
Ross Zwisler9e853f22015-04-01 09:12:19 +020051
Dan Williamse10624f2016-01-06 12:03:41 -080052static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
53{
54 if (bb->count) {
55 sector_t first_bad;
56 int num_bad;
57
58 return !!badblocks_check(bb, sector, len / 512, &first_bad,
59 &num_bad);
60 }
61
62 return false;
63}
64
65static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Ross Zwisler9e853f22015-04-01 09:12:19 +020066 unsigned int len, unsigned int off, int rw,
67 sector_t sector)
68{
69 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040070 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040071 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020072
73 if (rw == READ) {
Dan Williamse10624f2016-01-06 12:03:41 -080074 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
75 return -EIO;
Ross Zwisler61031952015-06-25 03:08:39 -040076 memcpy_from_pmem(mem + off, pmem_addr, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020077 flush_dcache_page(page);
78 } else {
79 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -040080 memcpy_to_pmem(pmem_addr, mem + off, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020081 }
82
83 kunmap_atomic(mem);
Dan Williamse10624f2016-01-06 12:03:41 -080084 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +020085}
86
Jens Axboedece1632015-11-05 10:41:16 -070087static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +020088{
Dan Williamse10624f2016-01-06 12:03:41 -080089 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -040090 bool do_acct;
91 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -040092 struct bio_vec bvec;
93 struct bvec_iter iter;
Ross Zwisler9e853f22015-04-01 09:12:19 +020094 struct block_device *bdev = bio->bi_bdev;
95 struct pmem_device *pmem = bdev->bd_disk->private_data;
Ross Zwisler9e853f22015-04-01 09:12:19 +020096
Dan Williamsf0dc0892015-05-16 12:28:53 -040097 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -080098 bio_for_each_segment(bvec, bio, iter) {
99 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
100 bvec.bv_offset, bio_data_dir(bio),
101 iter.bi_sector);
102 if (rc) {
103 bio->bi_error = rc;
104 break;
105 }
106 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400107 if (do_acct)
108 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400109
110 if (bio_data_dir(bio))
111 wmb_pmem();
112
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200113 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700114 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200115}
116
117static int pmem_rw_page(struct block_device *bdev, sector_t sector,
118 struct page *page, int rw)
119{
120 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williamse10624f2016-01-06 12:03:41 -0800121 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200122
Dan Williamse10624f2016-01-06 12:03:41 -0800123 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -0600124 if (rw & WRITE)
125 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +0200126
Dan Williamse10624f2016-01-06 12:03:41 -0800127 /*
128 * The ->rw_page interface is subtle and tricky. The core
129 * retries on any error, so we can only invoke page_endio() in
130 * the successful completion case. Otherwise, we'll see crashes
131 * caused by double completion.
132 */
133 if (rc == 0)
134 page_endio(page, rw & WRITE, 0);
135
136 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200137}
138
139static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams34c0fd52016-01-15 16:56:14 -0800140 void __pmem **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200141{
142 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400143 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200144
Ross Zwislere2e05392015-08-18 13:55:41 -0600145 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800146 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200147
Dan Williams589e75d2015-10-24 19:55:58 -0700148 return pmem->size - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200149}
150
151static const struct block_device_operations pmem_fops = {
152 .owner = THIS_MODULE,
153 .rw_page = pmem_rw_page,
154 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400155 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200156};
157
Dan Williams9f53f9f2015-06-09 15:33:45 -0400158static struct pmem_device *pmem_alloc(struct device *dev,
159 struct resource *res, int id)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200160{
161 struct pmem_device *pmem;
Dan Williams468ded02016-01-15 16:56:46 -0800162 struct request_queue *q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200163
Christoph Hellwig708ab622015-08-10 23:07:08 -0400164 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200165 if (!pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400166 return ERR_PTR(-ENOMEM);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200167
168 pmem->phys_addr = res->start;
169 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400170 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400171 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200172
Christoph Hellwig708ab622015-08-10 23:07:08 -0400173 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
174 dev_name(dev))) {
Dan Williams9f53f9f2015-06-09 15:33:45 -0400175 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
176 &pmem->phys_addr, pmem->size);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400177 return ERR_PTR(-EBUSY);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200178 }
179
Dan Williams468ded02016-01-15 16:56:46 -0800180 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
181 if (!q)
182 return ERR_PTR(-ENOMEM);
183
Dan Williams34c0fd52016-01-15 16:56:14 -0800184 pmem->pfn_flags = PFN_DEV;
185 if (pmem_should_map_pages(dev)) {
Dan Williams4b94ffd2016-01-15 16:56:22 -0800186 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800187 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800188 pmem->pfn_flags |= PFN_MAP;
189 } else
Dan Williamsa6393152015-09-15 02:14:03 -0400190 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
191 pmem->phys_addr, pmem->size,
192 ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400193
Dan Williams468ded02016-01-15 16:56:46 -0800194 if (IS_ERR(pmem->virt_addr)) {
195 blk_cleanup_queue(q);
Dan Williamsb36f4762015-09-15 02:42:20 -0400196 return (void __force *) pmem->virt_addr;
Dan Williams468ded02016-01-15 16:56:46 -0800197 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400198
Dan Williams468ded02016-01-15 16:56:46 -0800199 pmem->pmem_queue = q;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400200 return pmem;
201}
202
203static void pmem_detach_disk(struct pmem_device *pmem)
204{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400205 if (!pmem->pmem_disk)
206 return;
207
Dan Williams8c2f7e82015-06-25 04:20:04 -0400208 del_gendisk(pmem->pmem_disk);
209 put_disk(pmem->pmem_disk);
210 blk_cleanup_queue(pmem->pmem_queue);
211}
212
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400213static int pmem_attach_disk(struct device *dev,
214 struct nd_namespace_common *ndns, struct pmem_device *pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400215{
Dan Williams538ea4a2015-10-05 20:35:56 -0400216 int nid = dev_to_node(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400217 struct gendisk *disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200218
Ross Zwisler9e853f22015-04-01 09:12:19 +0200219 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
Vishal Verma6b474962015-07-23 11:58:48 -0600220 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
Dan Williams43d3fa32015-05-16 12:28:50 -0400221 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200222 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
Dan Williams0f51c4f2015-05-16 12:28:54 -0400223 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200224
Dan Williams538ea4a2015-10-05 20:35:56 -0400225 disk = alloc_disk_node(0, nid);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400226 if (!disk) {
227 blk_cleanup_queue(pmem->pmem_queue);
228 return -ENOMEM;
229 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200230
Ross Zwisler9e853f22015-04-01 09:12:19 +0200231 disk->major = pmem_major;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400232 disk->first_minor = 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200233 disk->fops = &pmem_fops;
234 disk->private_data = pmem;
235 disk->queue = pmem->pmem_queue;
236 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400237 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400238 disk->driverfs_dev = dev;
239 set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200240 pmem->pmem_disk = disk;
Dan Williams710d69c2016-01-04 23:31:24 -0800241 devm_exit_badblocks(dev, &pmem->bb);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800242 if (devm_init_badblocks(dev, &pmem->bb))
243 return -ENOMEM;
244 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200245
Dan Williams57f7f312016-01-06 12:03:42 -0800246 disk->bb = &pmem->bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200247 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400248 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200249
Dan Williams8c2f7e82015-06-25 04:20:04 -0400250 return 0;
251}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200252
Dan Williams8c2f7e82015-06-25 04:20:04 -0400253static int pmem_rw_bytes(struct nd_namespace_common *ndns,
254 resource_size_t offset, void *buf, size_t size, int rw)
255{
256 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
257
258 if (unlikely(offset + size > pmem->size)) {
259 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
260 return -EFAULT;
261 }
262
Dan Williams710d69c2016-01-04 23:31:24 -0800263 if (rw == READ) {
264 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
265
266 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
267 return -EIO;
Ross Zwisler61031952015-06-25 03:08:39 -0400268 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
Dan Williams710d69c2016-01-04 23:31:24 -0800269 } else {
Ross Zwisler61031952015-06-25 03:08:39 -0400270 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
271 wmb_pmem();
272 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400273
274 return 0;
275}
276
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400277static int nd_pfn_init(struct nd_pfn *nd_pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200278{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400279 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
280 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
281 struct nd_namespace_common *ndns = nd_pfn->ndns;
282 struct nd_region *nd_region;
283 unsigned long npfns;
284 phys_addr_t offset;
285 u64 checksum;
286 int rc;
287
288 if (!pfn_sb)
289 return -ENOMEM;
290
291 nd_pfn->pfn_sb = pfn_sb;
292 rc = nd_pfn_validate(nd_pfn);
Dan Williams3fa96262015-12-13 11:35:52 -0800293 if (rc == -ENODEV)
294 /* no info block, do init */;
295 else
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400296 return rc;
297
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400298 nd_region = to_nd_region(nd_pfn->dev.parent);
299 if (nd_region->ro) {
300 dev_info(&nd_pfn->dev,
301 "%s is read-only, unable to init metadata\n",
302 dev_name(&nd_region->dev));
303 goto err;
304 }
305
306 memset(pfn_sb, 0, sizeof(*pfn_sb));
307 npfns = (pmem->size - SZ_8K) / SZ_4K;
308 /*
309 * Note, we use 64 here for the standard size of struct page,
310 * debugging options may cause it to be larger in which case the
311 * implementation will limit the pfns advertised through
312 * ->direct_access() to those that are included in the memmap.
313 */
314 if (nd_pfn->mode == PFN_MODE_PMEM)
Dan Williams315c5622015-12-10 14:45:23 -0800315 offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400316 else if (nd_pfn->mode == PFN_MODE_RAM)
Dan Williams315c5622015-12-10 14:45:23 -0800317 offset = ALIGN(SZ_8K, nd_pfn->align);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400318 else
319 goto err;
320
321 npfns = (pmem->size - offset) / SZ_4K;
322 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
323 pfn_sb->dataoff = cpu_to_le64(offset);
324 pfn_sb->npfns = cpu_to_le64(npfns);
325 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
326 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
Dan Williamsa34d5e82015-12-12 16:09:14 -0800327 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400328 pfn_sb->version_major = cpu_to_le16(1);
329 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
330 pfn_sb->checksum = cpu_to_le64(checksum);
331
332 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
333 if (rc)
334 goto err;
335
336 return 0;
337 err:
338 nd_pfn->pfn_sb = NULL;
339 kfree(pfn_sb);
340 return -ENXIO;
341}
342
343static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
344{
345 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
346 struct pmem_device *pmem;
347
348 /* free pmem disk */
349 pmem = dev_get_drvdata(&nd_pfn->dev);
350 pmem_detach_disk(pmem);
351
352 /* release nd_pfn resources */
353 kfree(nd_pfn->pfn_sb);
354 nd_pfn->pfn_sb = NULL;
355
356 return 0;
357}
358
359static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
360{
361 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
362 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
363 struct device *dev = &nd_pfn->dev;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400364 struct nd_region *nd_region;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800365 struct vmem_altmap *altmap;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400366 struct nd_pfn_sb *pfn_sb;
367 struct pmem_device *pmem;
Dan Williams5c2c2582016-01-15 16:56:49 -0800368 struct request_queue *q;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400369 phys_addr_t offset;
370 int rc;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800371 struct vmem_altmap __altmap = {
372 .base_pfn = __phys_to_pfn(nsio->res.start),
373 .reserve = __phys_to_pfn(SZ_8K),
374 };
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400375
376 if (!nd_pfn->uuid || !nd_pfn->ndns)
377 return -ENODEV;
378
379 nd_region = to_nd_region(dev->parent);
380 rc = nd_pfn_init(nd_pfn);
381 if (rc)
382 return rc;
383
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400384 pfn_sb = nd_pfn->pfn_sb;
385 offset = le64_to_cpu(pfn_sb->dataoff);
386 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
387 if (nd_pfn->mode == PFN_MODE_RAM) {
Dan Williams315c5622015-12-10 14:45:23 -0800388 if (offset < SZ_8K)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400389 return -EINVAL;
390 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
391 altmap = NULL;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800392 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
393 nd_pfn->npfns = (resource_size(&nsio->res) - offset)
394 / PAGE_SIZE;
395 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
396 dev_info(&nd_pfn->dev,
397 "number of pfns truncated from %lld to %ld\n",
398 le64_to_cpu(nd_pfn->pfn_sb->npfns),
399 nd_pfn->npfns);
400 altmap = & __altmap;
401 altmap->free = __phys_to_pfn(offset - SZ_8K);
402 altmap->alloc = 0;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400403 } else {
404 rc = -ENXIO;
405 goto err;
406 }
407
408 /* establish pfn range for lookup, and switch to direct map */
409 pmem = dev_get_drvdata(dev);
Dan Williams5c2c2582016-01-15 16:56:49 -0800410 q = pmem->pmem_queue;
Dan Williamsa6393152015-09-15 02:14:03 -0400411 devm_memunmap(dev, (void __force *) pmem->virt_addr);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800412 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800413 &q->q_usage_counter, altmap);
Dan Williams34c0fd52016-01-15 16:56:14 -0800414 pmem->pfn_flags |= PFN_MAP;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400415 if (IS_ERR(pmem->virt_addr)) {
416 rc = PTR_ERR(pmem->virt_addr);
417 goto err;
418 }
419
420 /* attach pmem disk in "pfn-mode" */
421 pmem->data_offset = offset;
422 rc = pmem_attach_disk(dev, ndns, pmem);
423 if (rc)
424 goto err;
425
426 return rc;
427 err:
428 nvdimm_namespace_detach_pfn(ndns);
429 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200430}
431
Dan Williams9f53f9f2015-06-09 15:33:45 -0400432static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200433{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400434 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400435 struct nd_namespace_common *ndns;
436 struct nd_namespace_io *nsio;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200437 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200438
Dan Williams8c2f7e82015-06-25 04:20:04 -0400439 ndns = nvdimm_namespace_common_probe(dev);
440 if (IS_ERR(ndns))
441 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400442
Dan Williams8c2f7e82015-06-25 04:20:04 -0400443 nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400444 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200445 if (IS_ERR(pmem))
446 return PTR_ERR(pmem);
447
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400448 pmem->ndns = ndns;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400449 dev_set_drvdata(dev, pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400450 ndns->rw_bytes = pmem_rw_bytes;
Dan Williams710d69c2016-01-04 23:31:24 -0800451 if (devm_init_badblocks(dev, &pmem->bb))
452 return -ENOMEM;
453 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
Christoph Hellwig708ab622015-08-10 23:07:08 -0400454
Dan Williams468ded02016-01-15 16:56:46 -0800455 if (is_nd_btt(dev)) {
456 /* btt allocates its own request_queue */
457 blk_cleanup_queue(pmem->pmem_queue);
458 pmem->pmem_queue = NULL;
Christoph Hellwig708ab622015-08-10 23:07:08 -0400459 return nvdimm_namespace_attach_btt(ndns);
Dan Williams468ded02016-01-15 16:56:46 -0800460 }
Christoph Hellwig708ab622015-08-10 23:07:08 -0400461
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400462 if (is_nd_pfn(dev))
463 return nvdimm_namespace_attach_pfn(ndns);
464
Dan Williams468ded02016-01-15 16:56:46 -0800465 if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
466 /*
467 * We'll come back as either btt-pmem, or pfn-pmem, so
468 * drop the queue allocation for now.
469 */
470 blk_cleanup_queue(pmem->pmem_queue);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400471 return -ENXIO;
472 }
473
474 return pmem_attach_disk(dev, ndns, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200475}
476
Dan Williams9f53f9f2015-06-09 15:33:45 -0400477static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200478{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400479 struct pmem_device *pmem = dev_get_drvdata(dev);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200480
Dan Williams8c2f7e82015-06-25 04:20:04 -0400481 if (is_nd_btt(dev))
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400482 nvdimm_namespace_detach_btt(pmem->ndns);
483 else if (is_nd_pfn(dev))
484 nvdimm_namespace_detach_pfn(pmem->ndns);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400485 else
486 pmem_detach_disk(pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400487
Ross Zwisler9e853f22015-04-01 09:12:19 +0200488 return 0;
489}
490
Dan Williams9f53f9f2015-06-09 15:33:45 -0400491MODULE_ALIAS("pmem");
492MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400493MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400494static struct nd_device_driver nd_pmem_driver = {
495 .probe = nd_pmem_probe,
496 .remove = nd_pmem_remove,
497 .drv = {
498 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200499 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400500 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200501};
502
503static int __init pmem_init(void)
504{
505 int error;
506
507 pmem_major = register_blkdev(0, "pmem");
508 if (pmem_major < 0)
509 return pmem_major;
510
Dan Williams9f53f9f2015-06-09 15:33:45 -0400511 error = nd_driver_register(&nd_pmem_driver);
512 if (error) {
Ross Zwisler9e853f22015-04-01 09:12:19 +0200513 unregister_blkdev(pmem_major, "pmem");
Dan Williams9f53f9f2015-06-09 15:33:45 -0400514 return error;
515 }
516
517 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200518}
519module_init(pmem_init);
520
521static void pmem_exit(void)
522{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400523 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200524 unregister_blkdev(pmem_major, "pmem");
525}
526module_exit(pmem_exit);
527
528MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
529MODULE_LICENSE("GPL v2");