blob: adc387236fe75c80b90d8797a32c6c744a79f5a0 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040032#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020034
35struct pmem_device {
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040038 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +020039
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040042 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset;
Arnd Bergmannc4544202016-02-22 22:58:34 +010044 u64 pfn_flags;
Ross Zwisler61031952015-06-25 03:08:39 -040045 void __pmem *virt_addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +020046 size_t size;
Dan Williamsb95f5f42016-01-04 23:50:23 -080047 struct badblocks bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +020048};
49
50static int pmem_major;
Ross Zwisler9e853f22015-04-01 09:12:19 +020051
Dan Williamse10624f2016-01-06 12:03:41 -080052static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
53{
54 if (bb->count) {
55 sector_t first_bad;
56 int num_bad;
57
58 return !!badblocks_check(bb, sector, len / 512, &first_bad,
59 &num_bad);
60 }
61
62 return false;
63}
64
Dan Williams59e64732016-03-08 07:16:07 -080065static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
66 unsigned int len)
67{
68 struct device *dev = disk_to_dev(pmem->pmem_disk);
69 sector_t sector;
70 long cleared;
71
72 sector = (offset - pmem->data_offset) / 512;
73 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
74
75 if (cleared > 0 && cleared / 512) {
76 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
77 __func__, (unsigned long long) sector,
78 cleared / 512, cleared / 512 > 1 ? "s" : "");
79 badblocks_clear(&pmem->bb, sector, cleared / 512);
80 }
81 invalidate_pmem(pmem->virt_addr + offset, len);
82}
83
Dan Williamse10624f2016-01-06 12:03:41 -080084static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Ross Zwisler9e853f22015-04-01 09:12:19 +020085 unsigned int len, unsigned int off, int rw,
86 sector_t sector)
87{
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080088 int rc = 0;
Dan Williams59e64732016-03-08 07:16:07 -080089 bool bad_pmem = false;
Ross Zwisler9e853f22015-04-01 09:12:19 +020090 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040091 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040092 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020093
Dan Williams59e64732016-03-08 07:16:07 -080094 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
95 bad_pmem = true;
96
Ross Zwisler9e853f22015-04-01 09:12:19 +020097 if (rw == READ) {
Dan Williams59e64732016-03-08 07:16:07 -080098 if (unlikely(bad_pmem))
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080099 rc = -EIO;
100 else {
101 memcpy_from_pmem(mem + off, pmem_addr, len);
102 flush_dcache_page(page);
103 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200104 } else {
105 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -0400106 memcpy_to_pmem(pmem_addr, mem + off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800107 if (unlikely(bad_pmem)) {
108 pmem_clear_poison(pmem, pmem_off, len);
109 memcpy_to_pmem(pmem_addr, mem + off, len);
110 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200111 }
112
113 kunmap_atomic(mem);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800114 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200115}
116
Jens Axboedece1632015-11-05 10:41:16 -0700117static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200118{
Dan Williamse10624f2016-01-06 12:03:41 -0800119 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400120 bool do_acct;
121 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400122 struct bio_vec bvec;
123 struct bvec_iter iter;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200124 struct block_device *bdev = bio->bi_bdev;
125 struct pmem_device *pmem = bdev->bd_disk->private_data;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200126
Dan Williamsf0dc0892015-05-16 12:28:53 -0400127 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800128 bio_for_each_segment(bvec, bio, iter) {
129 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
130 bvec.bv_offset, bio_data_dir(bio),
131 iter.bi_sector);
132 if (rc) {
133 bio->bi_error = rc;
134 break;
135 }
136 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400137 if (do_acct)
138 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400139
140 if (bio_data_dir(bio))
141 wmb_pmem();
142
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200143 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700144 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200145}
146
147static int pmem_rw_page(struct block_device *bdev, sector_t sector,
148 struct page *page, int rw)
149{
150 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williamse10624f2016-01-06 12:03:41 -0800151 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200152
Dan Williamse10624f2016-01-06 12:03:41 -0800153 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -0600154 if (rw & WRITE)
155 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +0200156
Dan Williamse10624f2016-01-06 12:03:41 -0800157 /*
158 * The ->rw_page interface is subtle and tricky. The core
159 * retries on any error, so we can only invoke page_endio() in
160 * the successful completion case. Otherwise, we'll see crashes
161 * caused by double completion.
162 */
163 if (rc == 0)
164 page_endio(page, rw & WRITE, 0);
165
166 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200167}
168
169static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams34c0fd52016-01-15 16:56:14 -0800170 void __pmem **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200171{
172 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400173 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200174
Ross Zwislere2e05392015-08-18 13:55:41 -0600175 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800176 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200177
Dan Williams589e75d2015-10-24 19:55:58 -0700178 return pmem->size - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200179}
180
181static const struct block_device_operations pmem_fops = {
182 .owner = THIS_MODULE,
183 .rw_page = pmem_rw_page,
184 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400185 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200186};
187
Dan Williams9f53f9f2015-06-09 15:33:45 -0400188static struct pmem_device *pmem_alloc(struct device *dev,
189 struct resource *res, int id)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200190{
191 struct pmem_device *pmem;
Dan Williams468ded02016-01-15 16:56:46 -0800192 struct request_queue *q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200193
Christoph Hellwig708ab622015-08-10 23:07:08 -0400194 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200195 if (!pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400196 return ERR_PTR(-ENOMEM);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200197
198 pmem->phys_addr = res->start;
199 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400200 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400201 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200202
Christoph Hellwig708ab622015-08-10 23:07:08 -0400203 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
204 dev_name(dev))) {
Dan Williams9f53f9f2015-06-09 15:33:45 -0400205 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
206 &pmem->phys_addr, pmem->size);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400207 return ERR_PTR(-EBUSY);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200208 }
209
Dan Williams468ded02016-01-15 16:56:46 -0800210 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
211 if (!q)
212 return ERR_PTR(-ENOMEM);
213
Dan Williams34c0fd52016-01-15 16:56:14 -0800214 pmem->pfn_flags = PFN_DEV;
215 if (pmem_should_map_pages(dev)) {
Dan Williams4b94ffd2016-01-15 16:56:22 -0800216 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800217 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800218 pmem->pfn_flags |= PFN_MAP;
219 } else
Dan Williamsa6393152015-09-15 02:14:03 -0400220 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
221 pmem->phys_addr, pmem->size,
222 ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400223
Dan Williams468ded02016-01-15 16:56:46 -0800224 if (IS_ERR(pmem->virt_addr)) {
225 blk_cleanup_queue(q);
Dan Williamsb36f4762015-09-15 02:42:20 -0400226 return (void __force *) pmem->virt_addr;
Dan Williams468ded02016-01-15 16:56:46 -0800227 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400228
Dan Williams468ded02016-01-15 16:56:46 -0800229 pmem->pmem_queue = q;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400230 return pmem;
231}
232
233static void pmem_detach_disk(struct pmem_device *pmem)
234{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400235 if (!pmem->pmem_disk)
236 return;
237
Dan Williams8c2f7e82015-06-25 04:20:04 -0400238 del_gendisk(pmem->pmem_disk);
239 put_disk(pmem->pmem_disk);
240 blk_cleanup_queue(pmem->pmem_queue);
241}
242
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400243static int pmem_attach_disk(struct device *dev,
244 struct nd_namespace_common *ndns, struct pmem_device *pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400245{
Dan Williams538ea4a2015-10-05 20:35:56 -0400246 int nid = dev_to_node(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400247 struct gendisk *disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200248
Ross Zwisler9e853f22015-04-01 09:12:19 +0200249 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
Vishal Verma6b474962015-07-23 11:58:48 -0600250 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
Dan Williams43d3fa32015-05-16 12:28:50 -0400251 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200252 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
Dan Williams0f51c4f2015-05-16 12:28:54 -0400253 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200254
Dan Williams538ea4a2015-10-05 20:35:56 -0400255 disk = alloc_disk_node(0, nid);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400256 if (!disk) {
257 blk_cleanup_queue(pmem->pmem_queue);
258 return -ENOMEM;
259 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200260
Ross Zwisler9e853f22015-04-01 09:12:19 +0200261 disk->major = pmem_major;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400262 disk->first_minor = 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200263 disk->fops = &pmem_fops;
264 disk->private_data = pmem;
265 disk->queue = pmem->pmem_queue;
266 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400267 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400268 disk->driverfs_dev = dev;
269 set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200270 pmem->pmem_disk = disk;
Dan Williams710d69c2016-01-04 23:31:24 -0800271 devm_exit_badblocks(dev, &pmem->bb);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800272 if (devm_init_badblocks(dev, &pmem->bb))
273 return -ENOMEM;
274 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200275
Dan Williams57f7f312016-01-06 12:03:42 -0800276 disk->bb = &pmem->bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200277 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400278 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200279
Dan Williams8c2f7e82015-06-25 04:20:04 -0400280 return 0;
281}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200282
Dan Williams8c2f7e82015-06-25 04:20:04 -0400283static int pmem_rw_bytes(struct nd_namespace_common *ndns,
284 resource_size_t offset, void *buf, size_t size, int rw)
285{
286 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
287
288 if (unlikely(offset + size > pmem->size)) {
289 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
290 return -EFAULT;
291 }
292
Dan Williams710d69c2016-01-04 23:31:24 -0800293 if (rw == READ) {
294 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
295
296 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
297 return -EIO;
Ross Zwisler61031952015-06-25 03:08:39 -0400298 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
Dan Williams710d69c2016-01-04 23:31:24 -0800299 } else {
Ross Zwisler61031952015-06-25 03:08:39 -0400300 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
301 wmb_pmem();
302 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400303
304 return 0;
305}
306
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400307static int nd_pfn_init(struct nd_pfn *nd_pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200308{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400309 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
310 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
311 struct nd_namespace_common *ndns = nd_pfn->ndns;
312 struct nd_region *nd_region;
313 unsigned long npfns;
314 phys_addr_t offset;
315 u64 checksum;
316 int rc;
317
318 if (!pfn_sb)
319 return -ENOMEM;
320
321 nd_pfn->pfn_sb = pfn_sb;
322 rc = nd_pfn_validate(nd_pfn);
Dan Williams3fa96262015-12-13 11:35:52 -0800323 if (rc == -ENODEV)
324 /* no info block, do init */;
325 else
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400326 return rc;
327
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400328 nd_region = to_nd_region(nd_pfn->dev.parent);
329 if (nd_region->ro) {
330 dev_info(&nd_pfn->dev,
331 "%s is read-only, unable to init metadata\n",
332 dev_name(&nd_region->dev));
333 goto err;
334 }
335
336 memset(pfn_sb, 0, sizeof(*pfn_sb));
337 npfns = (pmem->size - SZ_8K) / SZ_4K;
338 /*
339 * Note, we use 64 here for the standard size of struct page,
340 * debugging options may cause it to be larger in which case the
341 * implementation will limit the pfns advertised through
342 * ->direct_access() to those that are included in the memmap.
343 */
344 if (nd_pfn->mode == PFN_MODE_PMEM)
Dan Williams315c5622015-12-10 14:45:23 -0800345 offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400346 else if (nd_pfn->mode == PFN_MODE_RAM)
Dan Williams315c5622015-12-10 14:45:23 -0800347 offset = ALIGN(SZ_8K, nd_pfn->align);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400348 else
349 goto err;
350
351 npfns = (pmem->size - offset) / SZ_4K;
352 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
353 pfn_sb->dataoff = cpu_to_le64(offset);
354 pfn_sb->npfns = cpu_to_le64(npfns);
355 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
356 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
Dan Williamsa34d5e82015-12-12 16:09:14 -0800357 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400358 pfn_sb->version_major = cpu_to_le16(1);
359 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
360 pfn_sb->checksum = cpu_to_le64(checksum);
361
362 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
363 if (rc)
364 goto err;
365
366 return 0;
367 err:
368 nd_pfn->pfn_sb = NULL;
369 kfree(pfn_sb);
370 return -ENXIO;
371}
372
373static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
374{
375 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
376 struct pmem_device *pmem;
377
378 /* free pmem disk */
379 pmem = dev_get_drvdata(&nd_pfn->dev);
380 pmem_detach_disk(pmem);
381
382 /* release nd_pfn resources */
383 kfree(nd_pfn->pfn_sb);
384 nd_pfn->pfn_sb = NULL;
385
386 return 0;
387}
388
389static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
390{
391 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
392 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
393 struct device *dev = &nd_pfn->dev;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400394 struct nd_region *nd_region;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800395 struct vmem_altmap *altmap;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400396 struct nd_pfn_sb *pfn_sb;
397 struct pmem_device *pmem;
Dan Williams5c2c2582016-01-15 16:56:49 -0800398 struct request_queue *q;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400399 phys_addr_t offset;
400 int rc;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800401 struct vmem_altmap __altmap = {
402 .base_pfn = __phys_to_pfn(nsio->res.start),
403 .reserve = __phys_to_pfn(SZ_8K),
404 };
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400405
406 if (!nd_pfn->uuid || !nd_pfn->ndns)
407 return -ENODEV;
408
409 nd_region = to_nd_region(dev->parent);
410 rc = nd_pfn_init(nd_pfn);
411 if (rc)
412 return rc;
413
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400414 pfn_sb = nd_pfn->pfn_sb;
415 offset = le64_to_cpu(pfn_sb->dataoff);
416 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
417 if (nd_pfn->mode == PFN_MODE_RAM) {
Dan Williams315c5622015-12-10 14:45:23 -0800418 if (offset < SZ_8K)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400419 return -EINVAL;
420 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
421 altmap = NULL;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800422 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
423 nd_pfn->npfns = (resource_size(&nsio->res) - offset)
424 / PAGE_SIZE;
425 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
426 dev_info(&nd_pfn->dev,
427 "number of pfns truncated from %lld to %ld\n",
428 le64_to_cpu(nd_pfn->pfn_sb->npfns),
429 nd_pfn->npfns);
430 altmap = & __altmap;
431 altmap->free = __phys_to_pfn(offset - SZ_8K);
432 altmap->alloc = 0;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400433 } else {
434 rc = -ENXIO;
435 goto err;
436 }
437
438 /* establish pfn range for lookup, and switch to direct map */
439 pmem = dev_get_drvdata(dev);
Dan Williams5c2c2582016-01-15 16:56:49 -0800440 q = pmem->pmem_queue;
Dan Williamsa6393152015-09-15 02:14:03 -0400441 devm_memunmap(dev, (void __force *) pmem->virt_addr);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800442 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800443 &q->q_usage_counter, altmap);
Dan Williams34c0fd52016-01-15 16:56:14 -0800444 pmem->pfn_flags |= PFN_MAP;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400445 if (IS_ERR(pmem->virt_addr)) {
446 rc = PTR_ERR(pmem->virt_addr);
447 goto err;
448 }
449
450 /* attach pmem disk in "pfn-mode" */
451 pmem->data_offset = offset;
452 rc = pmem_attach_disk(dev, ndns, pmem);
453 if (rc)
454 goto err;
455
456 return rc;
457 err:
458 nvdimm_namespace_detach_pfn(ndns);
459 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200460}
461
Dan Williams9f53f9f2015-06-09 15:33:45 -0400462static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200463{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400464 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400465 struct nd_namespace_common *ndns;
466 struct nd_namespace_io *nsio;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200467 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200468
Dan Williams8c2f7e82015-06-25 04:20:04 -0400469 ndns = nvdimm_namespace_common_probe(dev);
470 if (IS_ERR(ndns))
471 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400472
Dan Williams8c2f7e82015-06-25 04:20:04 -0400473 nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400474 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200475 if (IS_ERR(pmem))
476 return PTR_ERR(pmem);
477
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400478 pmem->ndns = ndns;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400479 dev_set_drvdata(dev, pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400480 ndns->rw_bytes = pmem_rw_bytes;
Dan Williams710d69c2016-01-04 23:31:24 -0800481 if (devm_init_badblocks(dev, &pmem->bb))
482 return -ENOMEM;
483 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
Christoph Hellwig708ab622015-08-10 23:07:08 -0400484
Dan Williams468ded02016-01-15 16:56:46 -0800485 if (is_nd_btt(dev)) {
486 /* btt allocates its own request_queue */
487 blk_cleanup_queue(pmem->pmem_queue);
488 pmem->pmem_queue = NULL;
Christoph Hellwig708ab622015-08-10 23:07:08 -0400489 return nvdimm_namespace_attach_btt(ndns);
Dan Williams468ded02016-01-15 16:56:46 -0800490 }
Christoph Hellwig708ab622015-08-10 23:07:08 -0400491
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400492 if (is_nd_pfn(dev))
493 return nvdimm_namespace_attach_pfn(ndns);
494
Dan Williams468ded02016-01-15 16:56:46 -0800495 if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
496 /*
497 * We'll come back as either btt-pmem, or pfn-pmem, so
498 * drop the queue allocation for now.
499 */
500 blk_cleanup_queue(pmem->pmem_queue);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400501 return -ENXIO;
502 }
503
504 return pmem_attach_disk(dev, ndns, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200505}
506
Dan Williams9f53f9f2015-06-09 15:33:45 -0400507static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200508{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400509 struct pmem_device *pmem = dev_get_drvdata(dev);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200510
Dan Williams8c2f7e82015-06-25 04:20:04 -0400511 if (is_nd_btt(dev))
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400512 nvdimm_namespace_detach_btt(pmem->ndns);
513 else if (is_nd_pfn(dev))
514 nvdimm_namespace_detach_pfn(pmem->ndns);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400515 else
516 pmem_detach_disk(pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400517
Ross Zwisler9e853f22015-04-01 09:12:19 +0200518 return 0;
519}
520
Dan Williams71999462016-02-18 10:29:49 -0800521static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
522{
523 struct pmem_device *pmem = dev_get_drvdata(dev);
524 struct nd_namespace_common *ndns = pmem->ndns;
525
526 if (event != NVDIMM_REVALIDATE_POISON)
527 return;
528
529 if (is_nd_btt(dev))
530 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
531 else
532 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
533}
534
Dan Williams9f53f9f2015-06-09 15:33:45 -0400535MODULE_ALIAS("pmem");
536MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400537MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400538static struct nd_device_driver nd_pmem_driver = {
539 .probe = nd_pmem_probe,
540 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800541 .notify = nd_pmem_notify,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400542 .drv = {
543 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200544 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400545 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200546};
547
548static int __init pmem_init(void)
549{
550 int error;
551
552 pmem_major = register_blkdev(0, "pmem");
553 if (pmem_major < 0)
554 return pmem_major;
555
Dan Williams9f53f9f2015-06-09 15:33:45 -0400556 error = nd_driver_register(&nd_pmem_driver);
557 if (error) {
Ross Zwisler9e853f22015-04-01 09:12:19 +0200558 unregister_blkdev(pmem_major, "pmem");
Dan Williams9f53f9f2015-06-09 15:33:45 -0400559 return error;
560 }
561
562 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200563}
564module_init(pmem_init);
565
566static void pmem_exit(void)
567{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400568 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200569 unregister_blkdev(pmem_major, "pmem");
570}
571module_exit(pmem_exit);
572
573MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
574MODULE_LICENSE("GPL v2");