blob: 92f536596b24295f0b19aea551844eecb95f7df6 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040032#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020034
35struct pmem_device {
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040038 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +020039
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040042 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset;
Arnd Bergmannc4544202016-02-22 22:58:34 +010044 u64 pfn_flags;
Ross Zwisler61031952015-06-25 03:08:39 -040045 void __pmem *virt_addr;
Dan Williamscfe30b82016-03-03 09:38:00 -080046 /* immutable base size of the namespace */
Ross Zwisler9e853f22015-04-01 09:12:19 +020047 size_t size;
Dan Williamscfe30b82016-03-03 09:38:00 -080048 /* trim size when namespace capacity has been section aligned */
49 u32 pfn_pad;
Dan Williamsb95f5f42016-01-04 23:50:23 -080050 struct badblocks bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +020051};
52
Dan Williamse10624f2016-01-06 12:03:41 -080053static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
54{
55 if (bb->count) {
56 sector_t first_bad;
57 int num_bad;
58
59 return !!badblocks_check(bb, sector, len / 512, &first_bad,
60 &num_bad);
61 }
62
63 return false;
64}
65
Dan Williams59e64732016-03-08 07:16:07 -080066static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
67 unsigned int len)
68{
69 struct device *dev = disk_to_dev(pmem->pmem_disk);
70 sector_t sector;
71 long cleared;
72
73 sector = (offset - pmem->data_offset) / 512;
74 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
75
76 if (cleared > 0 && cleared / 512) {
77 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
78 __func__, (unsigned long long) sector,
79 cleared / 512, cleared / 512 > 1 ? "s" : "");
80 badblocks_clear(&pmem->bb, sector, cleared / 512);
81 }
82 invalidate_pmem(pmem->virt_addr + offset, len);
83}
84
Dan Williamse10624f2016-01-06 12:03:41 -080085static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Ross Zwisler9e853f22015-04-01 09:12:19 +020086 unsigned int len, unsigned int off, int rw,
87 sector_t sector)
88{
Dan Williamsb5ebc8e2016-03-06 15:20:51 -080089 int rc = 0;
Dan Williams59e64732016-03-08 07:16:07 -080090 bool bad_pmem = false;
Ross Zwisler9e853f22015-04-01 09:12:19 +020091 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040092 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040093 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020094
Dan Williams59e64732016-03-08 07:16:07 -080095 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
96 bad_pmem = true;
97
Ross Zwisler9e853f22015-04-01 09:12:19 +020098 if (rw == READ) {
Dan Williams59e64732016-03-08 07:16:07 -080099 if (unlikely(bad_pmem))
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800100 rc = -EIO;
101 else {
Dan Williamsfc0c2022016-03-08 10:30:19 -0800102 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800103 flush_dcache_page(page);
104 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200105 } else {
Dan Williams0a370d262016-04-14 19:40:47 -0700106 /*
107 * Note that we write the data both before and after
108 * clearing poison. The write before clear poison
109 * handles situations where the latest written data is
110 * preserved and the clear poison operation simply marks
111 * the address range as valid without changing the data.
112 * In this case application software can assume that an
113 * interrupted write will either return the new good
114 * data or an error.
115 *
116 * However, if pmem_clear_poison() leaves the data in an
117 * indeterminate state we need to perform the write
118 * after clear poison.
119 */
Ross Zwisler9e853f22015-04-01 09:12:19 +0200120 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -0400121 memcpy_to_pmem(pmem_addr, mem + off, len);
Dan Williams59e64732016-03-08 07:16:07 -0800122 if (unlikely(bad_pmem)) {
123 pmem_clear_poison(pmem, pmem_off, len);
124 memcpy_to_pmem(pmem_addr, mem + off, len);
125 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200126 }
127
128 kunmap_atomic(mem);
Dan Williamsb5ebc8e2016-03-06 15:20:51 -0800129 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200130}
131
Jens Axboedece1632015-11-05 10:41:16 -0700132static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200133{
Dan Williamse10624f2016-01-06 12:03:41 -0800134 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -0400135 bool do_acct;
136 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -0400137 struct bio_vec bvec;
138 struct bvec_iter iter;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200139 struct block_device *bdev = bio->bi_bdev;
140 struct pmem_device *pmem = bdev->bd_disk->private_data;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200141
Dan Williamsf0dc0892015-05-16 12:28:53 -0400142 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800143 bio_for_each_segment(bvec, bio, iter) {
144 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
145 bvec.bv_offset, bio_data_dir(bio),
146 iter.bi_sector);
147 if (rc) {
148 bio->bi_error = rc;
149 break;
150 }
151 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400152 if (do_acct)
153 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400154
155 if (bio_data_dir(bio))
156 wmb_pmem();
157
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200158 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700159 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200160}
161
162static int pmem_rw_page(struct block_device *bdev, sector_t sector,
163 struct page *page, int rw)
164{
165 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williamse10624f2016-01-06 12:03:41 -0800166 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200167
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300168 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -0600169 if (rw & WRITE)
170 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +0200171
Dan Williamse10624f2016-01-06 12:03:41 -0800172 /*
173 * The ->rw_page interface is subtle and tricky. The core
174 * retries on any error, so we can only invoke page_endio() in
175 * the successful completion case. Otherwise, we'll see crashes
176 * caused by double completion.
177 */
178 if (rc == 0)
179 page_endio(page, rw & WRITE, 0);
180
181 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200182}
183
184static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams34c0fd52016-01-15 16:56:14 -0800185 void __pmem **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200186{
187 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400188 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200189
Ross Zwislere2e05392015-08-18 13:55:41 -0600190 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800191 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200192
Dan Williamscfe30b82016-03-03 09:38:00 -0800193 return pmem->size - pmem->pfn_pad - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200194}
195
196static const struct block_device_operations pmem_fops = {
197 .owner = THIS_MODULE,
198 .rw_page = pmem_rw_page,
199 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400200 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200201};
202
Dan Williams9f53f9f2015-06-09 15:33:45 -0400203static struct pmem_device *pmem_alloc(struct device *dev,
204 struct resource *res, int id)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200205{
206 struct pmem_device *pmem;
Dan Williams468ded02016-01-15 16:56:46 -0800207 struct request_queue *q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200208
Christoph Hellwig708ab622015-08-10 23:07:08 -0400209 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200210 if (!pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400211 return ERR_PTR(-ENOMEM);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200212
213 pmem->phys_addr = res->start;
214 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400215 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400216 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200217
Christoph Hellwig708ab622015-08-10 23:07:08 -0400218 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
219 dev_name(dev))) {
Dan Williams9f53f9f2015-06-09 15:33:45 -0400220 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
221 &pmem->phys_addr, pmem->size);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400222 return ERR_PTR(-EBUSY);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200223 }
224
Dan Williams468ded02016-01-15 16:56:46 -0800225 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
226 if (!q)
227 return ERR_PTR(-ENOMEM);
228
Dan Williams34c0fd52016-01-15 16:56:14 -0800229 pmem->pfn_flags = PFN_DEV;
230 if (pmem_should_map_pages(dev)) {
Dan Williams4b94ffd2016-01-15 16:56:22 -0800231 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800232 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800233 pmem->pfn_flags |= PFN_MAP;
234 } else
Dan Williamsa6393152015-09-15 02:14:03 -0400235 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
236 pmem->phys_addr, pmem->size,
237 ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400238
Dan Williams468ded02016-01-15 16:56:46 -0800239 if (IS_ERR(pmem->virt_addr)) {
240 blk_cleanup_queue(q);
Dan Williamsb36f4762015-09-15 02:42:20 -0400241 return (void __force *) pmem->virt_addr;
Dan Williams468ded02016-01-15 16:56:46 -0800242 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400243
Dan Williams468ded02016-01-15 16:56:46 -0800244 pmem->pmem_queue = q;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400245 return pmem;
246}
247
248static void pmem_detach_disk(struct pmem_device *pmem)
249{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400250 if (!pmem->pmem_disk)
251 return;
252
Dan Williams8c2f7e82015-06-25 04:20:04 -0400253 del_gendisk(pmem->pmem_disk);
254 put_disk(pmem->pmem_disk);
255 blk_cleanup_queue(pmem->pmem_queue);
256}
257
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400258static int pmem_attach_disk(struct device *dev,
259 struct nd_namespace_common *ndns, struct pmem_device *pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400260{
Dan Williamsa3901802016-04-07 20:02:06 -0700261 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams538ea4a2015-10-05 20:35:56 -0400262 int nid = dev_to_node(dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700263 struct resource bb_res;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400264 struct gendisk *disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200265
Ross Zwisler9e853f22015-04-01 09:12:19 +0200266 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
Vishal Verma6b474962015-07-23 11:58:48 -0600267 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
Dan Williams43d3fa32015-05-16 12:28:50 -0400268 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200269 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
Dan Williams0f51c4f2015-05-16 12:28:54 -0400270 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200271
Dan Williams538ea4a2015-10-05 20:35:56 -0400272 disk = alloc_disk_node(0, nid);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400273 if (!disk) {
274 blk_cleanup_queue(pmem->pmem_queue);
275 return -ENOMEM;
276 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200277
Ross Zwisler9e853f22015-04-01 09:12:19 +0200278 disk->fops = &pmem_fops;
279 disk->private_data = pmem;
280 disk->queue = pmem->pmem_queue;
281 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400282 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400283 disk->driverfs_dev = dev;
Dan Williamscfe30b82016-03-03 09:38:00 -0800284 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
285 / 512);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200286 pmem->pmem_disk = disk;
Dan Williams710d69c2016-01-04 23:31:24 -0800287 devm_exit_badblocks(dev, &pmem->bb);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800288 if (devm_init_badblocks(dev, &pmem->bb))
289 return -ENOMEM;
Dan Williamsa3901802016-04-07 20:02:06 -0700290 bb_res.start = nsio->res.start + pmem->data_offset;
291 bb_res.end = nsio->res.end;
292 if (is_nd_pfn(dev)) {
293 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
294 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200295
Dan Williamsa3901802016-04-07 20:02:06 -0700296 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
297 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
298 }
299 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
300 &bb_res);
Dan Williams57f7f312016-01-06 12:03:42 -0800301 disk->bb = &pmem->bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200302 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400303 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200304
Dan Williams8c2f7e82015-06-25 04:20:04 -0400305 return 0;
306}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200307
Dan Williams8c2f7e82015-06-25 04:20:04 -0400308static int pmem_rw_bytes(struct nd_namespace_common *ndns,
309 resource_size_t offset, void *buf, size_t size, int rw)
310{
311 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
312
313 if (unlikely(offset + size > pmem->size)) {
314 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
315 return -EFAULT;
316 }
317
Dan Williams710d69c2016-01-04 23:31:24 -0800318 if (rw == READ) {
319 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
320
321 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
322 return -EIO;
Dan Williamsfc0c2022016-03-08 10:30:19 -0800323 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
Dan Williams710d69c2016-01-04 23:31:24 -0800324 } else {
Ross Zwisler61031952015-06-25 03:08:39 -0400325 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
326 wmb_pmem();
327 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400328
329 return 0;
330}
331
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400332static int nd_pfn_init(struct nd_pfn *nd_pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200333{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400334 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
335 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
336 struct nd_namespace_common *ndns = nd_pfn->ndns;
Dan Williamscfe30b82016-03-03 09:38:00 -0800337 u32 start_pad = 0, end_trunc = 0;
338 resource_size_t start, size;
339 struct nd_namespace_io *nsio;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400340 struct nd_region *nd_region;
341 unsigned long npfns;
342 phys_addr_t offset;
343 u64 checksum;
344 int rc;
345
346 if (!pfn_sb)
347 return -ENOMEM;
348
349 nd_pfn->pfn_sb = pfn_sb;
350 rc = nd_pfn_validate(nd_pfn);
Dan Williams3fa96262015-12-13 11:35:52 -0800351 if (rc == -ENODEV)
352 /* no info block, do init */;
353 else
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400354 return rc;
355
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400356 nd_region = to_nd_region(nd_pfn->dev.parent);
357 if (nd_region->ro) {
358 dev_info(&nd_pfn->dev,
359 "%s is read-only, unable to init metadata\n",
360 dev_name(&nd_region->dev));
361 goto err;
362 }
363
364 memset(pfn_sb, 0, sizeof(*pfn_sb));
Dan Williamscfe30b82016-03-03 09:38:00 -0800365
366 /*
367 * Check if pmem collides with 'System RAM' when section aligned and
368 * trim it accordingly
369 */
370 nsio = to_nd_namespace_io(&ndns->dev);
371 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
372 size = resource_size(&nsio->res);
373 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
374 IORES_DESC_NONE) == REGION_MIXED) {
375
376 start = nsio->res.start;
377 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
378 }
379
380 start = nsio->res.start;
381 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
382 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
383 IORES_DESC_NONE) == REGION_MIXED) {
384 size = resource_size(&nsio->res);
385 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
386 }
387
388 if (start_pad + end_trunc)
389 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
390 dev_name(&ndns->dev), start_pad + end_trunc);
391
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400392 /*
393 * Note, we use 64 here for the standard size of struct page,
394 * debugging options may cause it to be larger in which case the
395 * implementation will limit the pfns advertised through
396 * ->direct_access() to those that are included in the memmap.
397 */
Dan Williamscfe30b82016-03-03 09:38:00 -0800398 start += start_pad;
399 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
Dan Williams658922e2016-04-30 13:07:06 -0700400 if (nd_pfn->mode == PFN_MODE_PMEM) {
401 unsigned long memmap_size;
402
403 /*
404 * vmemmap_populate_hugepages() allocates the memmap array in
Dan Williams1b8d2af2016-05-06 10:20:10 -0700405 * PMD_SIZE chunks.
Dan Williams658922e2016-04-30 13:07:06 -0700406 */
Dan Williams1b8d2af2016-05-06 10:20:10 -0700407 memmap_size = ALIGN(64 * npfns, PMD_SIZE);
Dan Williams658922e2016-04-30 13:07:06 -0700408 offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
Dan Williamscfe30b82016-03-03 09:38:00 -0800409 - start;
Dan Williams658922e2016-04-30 13:07:06 -0700410 } else if (nd_pfn->mode == PFN_MODE_RAM)
Dan Williamscfe30b82016-03-03 09:38:00 -0800411 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400412 else
413 goto err;
414
Dan Williamscfe30b82016-03-03 09:38:00 -0800415 if (offset + start_pad + end_trunc >= pmem->size) {
416 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
417 dev_name(&ndns->dev));
418 goto err;
419 }
420
421 npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400422 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
423 pfn_sb->dataoff = cpu_to_le64(offset);
424 pfn_sb->npfns = cpu_to_le64(npfns);
425 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
426 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
Dan Williamsa34d5e82015-12-12 16:09:14 -0800427 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400428 pfn_sb->version_major = cpu_to_le16(1);
Dan Williamscfe30b82016-03-03 09:38:00 -0800429 pfn_sb->version_minor = cpu_to_le16(1);
430 pfn_sb->start_pad = cpu_to_le32(start_pad);
431 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400432 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
433 pfn_sb->checksum = cpu_to_le64(checksum);
434
435 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
436 if (rc)
437 goto err;
438
439 return 0;
440 err:
441 nd_pfn->pfn_sb = NULL;
442 kfree(pfn_sb);
443 return -ENXIO;
444}
445
446static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
447{
448 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
449 struct pmem_device *pmem;
450
451 /* free pmem disk */
452 pmem = dev_get_drvdata(&nd_pfn->dev);
453 pmem_detach_disk(pmem);
454
455 /* release nd_pfn resources */
456 kfree(nd_pfn->pfn_sb);
457 nd_pfn->pfn_sb = NULL;
458
459 return 0;
460}
461
Dan Williamsd9cbe092016-03-03 09:14:36 -0800462/*
463 * We hotplug memory at section granularity, pad the reserved area from
464 * the previous section base to the namespace base address.
465 */
466static unsigned long init_altmap_base(resource_size_t base)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400467{
Dan Williams45f68802016-03-06 08:04:12 -0800468 unsigned long base_pfn = PHYS_PFN(base);
Dan Williamsd9cbe092016-03-03 09:14:36 -0800469
470 return PFN_SECTION_ALIGN_DOWN(base_pfn);
471}
472
473static unsigned long init_altmap_reserve(resource_size_t base)
474{
Dan Williams45f68802016-03-06 08:04:12 -0800475 unsigned long reserve = PHYS_PFN(SZ_8K);
476 unsigned long base_pfn = PHYS_PFN(base);
Dan Williamsd9cbe092016-03-03 09:14:36 -0800477
478 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
479 return reserve;
480}
481
Dan Williamscfe30b82016-03-03 09:38:00 -0800482static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400483{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400484 int rc;
Dan Williamscfe30b82016-03-03 09:38:00 -0800485 struct resource res;
486 struct request_queue *q;
487 struct pmem_device *pmem;
488 struct vmem_altmap *altmap;
489 struct device *dev = &nd_pfn->dev;
490 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
491 struct nd_namespace_common *ndns = nd_pfn->ndns;
492 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
493 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
494 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
495 resource_size_t base = nsio->res.start + start_pad;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800496 struct vmem_altmap __altmap = {
Dan Williamscfe30b82016-03-03 09:38:00 -0800497 .base_pfn = init_altmap_base(base),
498 .reserve = init_altmap_reserve(base),
Dan Williamsd2c0f042016-01-15 16:56:26 -0800499 };
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400500
Dan Williamscfe30b82016-03-03 09:38:00 -0800501 pmem = dev_get_drvdata(dev);
502 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
503 pmem->pfn_pad = start_pad + end_trunc;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400504 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
505 if (nd_pfn->mode == PFN_MODE_RAM) {
Dan Williamscfe30b82016-03-03 09:38:00 -0800506 if (pmem->data_offset < SZ_8K)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400507 return -EINVAL;
508 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
509 altmap = NULL;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800510 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
Dan Williamscfe30b82016-03-03 09:38:00 -0800511 nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
Dan Williamsd2c0f042016-01-15 16:56:26 -0800512 / PAGE_SIZE;
513 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
514 dev_info(&nd_pfn->dev,
515 "number of pfns truncated from %lld to %ld\n",
516 le64_to_cpu(nd_pfn->pfn_sb->npfns),
517 nd_pfn->npfns);
518 altmap = & __altmap;
Dan Williams45f68802016-03-06 08:04:12 -0800519 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
Dan Williamsd2c0f042016-01-15 16:56:26 -0800520 altmap->alloc = 0;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400521 } else {
522 rc = -ENXIO;
523 goto err;
524 }
525
526 /* establish pfn range for lookup, and switch to direct map */
Dan Williams5c2c2582016-01-15 16:56:49 -0800527 q = pmem->pmem_queue;
Dan Williamscfe30b82016-03-03 09:38:00 -0800528 memcpy(&res, &nsio->res, sizeof(res));
529 res.start += start_pad;
530 res.end -= end_trunc;
Dan Williamsa6393152015-09-15 02:14:03 -0400531 devm_memunmap(dev, (void __force *) pmem->virt_addr);
Dan Williamscfe30b82016-03-03 09:38:00 -0800532 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800533 &q->q_usage_counter, altmap);
Dan Williams34c0fd52016-01-15 16:56:14 -0800534 pmem->pfn_flags |= PFN_MAP;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400535 if (IS_ERR(pmem->virt_addr)) {
536 rc = PTR_ERR(pmem->virt_addr);
537 goto err;
538 }
539
540 /* attach pmem disk in "pfn-mode" */
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400541 rc = pmem_attach_disk(dev, ndns, pmem);
542 if (rc)
543 goto err;
544
545 return rc;
546 err:
547 nvdimm_namespace_detach_pfn(ndns);
548 return rc;
Dan Williamscfe30b82016-03-03 09:38:00 -0800549
550}
551
552static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
553{
554 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
555 int rc;
556
557 if (!nd_pfn->uuid || !nd_pfn->ndns)
558 return -ENODEV;
559
560 rc = nd_pfn_init(nd_pfn);
561 if (rc)
562 return rc;
563 /* we need a valid pfn_sb before we can init a vmem_altmap */
564 return __nvdimm_namespace_attach_pfn(nd_pfn);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200565}
566
Dan Williams9f53f9f2015-06-09 15:33:45 -0400567static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200568{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400569 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400570 struct nd_namespace_common *ndns;
571 struct nd_namespace_io *nsio;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200572 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200573
Dan Williams8c2f7e82015-06-25 04:20:04 -0400574 ndns = nvdimm_namespace_common_probe(dev);
575 if (IS_ERR(ndns))
576 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400577
Dan Williams8c2f7e82015-06-25 04:20:04 -0400578 nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400579 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200580 if (IS_ERR(pmem))
581 return PTR_ERR(pmem);
582
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400583 pmem->ndns = ndns;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400584 dev_set_drvdata(dev, pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400585 ndns->rw_bytes = pmem_rw_bytes;
Dan Williams710d69c2016-01-04 23:31:24 -0800586 if (devm_init_badblocks(dev, &pmem->bb))
587 return -ENOMEM;
Dan Williamsa3901802016-04-07 20:02:06 -0700588 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
Christoph Hellwig708ab622015-08-10 23:07:08 -0400589
Dan Williams468ded02016-01-15 16:56:46 -0800590 if (is_nd_btt(dev)) {
591 /* btt allocates its own request_queue */
592 blk_cleanup_queue(pmem->pmem_queue);
593 pmem->pmem_queue = NULL;
Christoph Hellwig708ab622015-08-10 23:07:08 -0400594 return nvdimm_namespace_attach_btt(ndns);
Dan Williams468ded02016-01-15 16:56:46 -0800595 }
Christoph Hellwig708ab622015-08-10 23:07:08 -0400596
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400597 if (is_nd_pfn(dev))
598 return nvdimm_namespace_attach_pfn(ndns);
599
Dan Williams468ded02016-01-15 16:56:46 -0800600 if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
601 /*
602 * We'll come back as either btt-pmem, or pfn-pmem, so
603 * drop the queue allocation for now.
604 */
605 blk_cleanup_queue(pmem->pmem_queue);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400606 return -ENXIO;
607 }
608
609 return pmem_attach_disk(dev, ndns, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200610}
611
Dan Williams9f53f9f2015-06-09 15:33:45 -0400612static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200613{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400614 struct pmem_device *pmem = dev_get_drvdata(dev);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200615
Dan Williams8c2f7e82015-06-25 04:20:04 -0400616 if (is_nd_btt(dev))
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400617 nvdimm_namespace_detach_btt(pmem->ndns);
618 else if (is_nd_pfn(dev))
619 nvdimm_namespace_detach_pfn(pmem->ndns);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400620 else
621 pmem_detach_disk(pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400622
Ross Zwisler9e853f22015-04-01 09:12:19 +0200623 return 0;
624}
625
Dan Williams71999462016-02-18 10:29:49 -0800626static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
627{
628 struct pmem_device *pmem = dev_get_drvdata(dev);
629 struct nd_namespace_common *ndns = pmem->ndns;
Dan Williamsa3901802016-04-07 20:02:06 -0700630 struct nd_region *nd_region = to_nd_region(dev->parent);
631 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
632 struct resource res = {
633 .start = nsio->res.start + pmem->data_offset,
634 .end = nsio->res.end,
635 };
Dan Williams71999462016-02-18 10:29:49 -0800636
637 if (event != NVDIMM_REVALIDATE_POISON)
638 return;
639
Dan Williamsa3901802016-04-07 20:02:06 -0700640 if (is_nd_pfn(dev)) {
641 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
642 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
643
644 res.start += __le32_to_cpu(pfn_sb->start_pad);
645 res.end -= __le32_to_cpu(pfn_sb->end_trunc);
646 }
647
648 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
Dan Williams71999462016-02-18 10:29:49 -0800649}
650
Dan Williams9f53f9f2015-06-09 15:33:45 -0400651MODULE_ALIAS("pmem");
652MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400653MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400654static struct nd_device_driver nd_pmem_driver = {
655 .probe = nd_pmem_probe,
656 .remove = nd_pmem_remove,
Dan Williams71999462016-02-18 10:29:49 -0800657 .notify = nd_pmem_notify,
Dan Williams9f53f9f2015-06-09 15:33:45 -0400658 .drv = {
659 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200660 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400661 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200662};
663
664static int __init pmem_init(void)
665{
NeilBrown55155292016-03-09 09:21:54 +1100666 return nd_driver_register(&nd_pmem_driver);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200667}
668module_init(pmem_init);
669
670static void pmem_exit(void)
671{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400672 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200673}
674module_exit(pmem_exit);
675
676MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
677MODULE_LICENSE("GPL v2");