blob: 0cb450e1b400b5535562fd1c9bf795afa4bc0983 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080025#include <linux/badblocks.h>
Dan Williams9476df72016-01-15 16:56:19 -080026#include <linux/memremap.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040027#include <linux/vmalloc.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080028#include <linux/pfn_t.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020029#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040030#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040032#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040033#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020034
35struct pmem_device {
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040038 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +020039
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040042 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset;
Arnd Bergmannc4544202016-02-22 22:58:34 +010044 u64 pfn_flags;
Ross Zwisler61031952015-06-25 03:08:39 -040045 void __pmem *virt_addr;
Dan Williamscfe30b82016-03-03 09:38:00 -080046 /* immutable base size of the namespace */
Ross Zwisler9e853f22015-04-01 09:12:19 +020047 size_t size;
Dan Williamscfe30b82016-03-03 09:38:00 -080048 /* trim size when namespace capacity has been section aligned */
49 u32 pfn_pad;
Dan Williamsb95f5f42016-01-04 23:50:23 -080050 struct badblocks bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +020051};
52
53static int pmem_major;
Ross Zwisler9e853f22015-04-01 09:12:19 +020054
Dan Williamse10624f2016-01-06 12:03:41 -080055static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
56{
57 if (bb->count) {
58 sector_t first_bad;
59 int num_bad;
60
61 return !!badblocks_check(bb, sector, len / 512, &first_bad,
62 &num_bad);
63 }
64
65 return false;
66}
67
68static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
Ross Zwisler9e853f22015-04-01 09:12:19 +020069 unsigned int len, unsigned int off, int rw,
70 sector_t sector)
71{
72 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040073 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040074 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020075
76 if (rw == READ) {
Dan Williamse10624f2016-01-06 12:03:41 -080077 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
78 return -EIO;
Ross Zwisler61031952015-06-25 03:08:39 -040079 memcpy_from_pmem(mem + off, pmem_addr, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020080 flush_dcache_page(page);
81 } else {
82 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -040083 memcpy_to_pmem(pmem_addr, mem + off, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020084 }
85
86 kunmap_atomic(mem);
Dan Williamse10624f2016-01-06 12:03:41 -080087 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +020088}
89
Jens Axboedece1632015-11-05 10:41:16 -070090static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +020091{
Dan Williamse10624f2016-01-06 12:03:41 -080092 int rc = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -040093 bool do_acct;
94 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -040095 struct bio_vec bvec;
96 struct bvec_iter iter;
Ross Zwisler9e853f22015-04-01 09:12:19 +020097 struct block_device *bdev = bio->bi_bdev;
98 struct pmem_device *pmem = bdev->bd_disk->private_data;
Ross Zwisler9e853f22015-04-01 09:12:19 +020099
Dan Williamsf0dc0892015-05-16 12:28:53 -0400100 do_acct = nd_iostat_start(bio, &start);
Dan Williamse10624f2016-01-06 12:03:41 -0800101 bio_for_each_segment(bvec, bio, iter) {
102 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
103 bvec.bv_offset, bio_data_dir(bio),
104 iter.bi_sector);
105 if (rc) {
106 bio->bi_error = rc;
107 break;
108 }
109 }
Dan Williamsf0dc0892015-05-16 12:28:53 -0400110 if (do_acct)
111 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -0400112
113 if (bio_data_dir(bio))
114 wmb_pmem();
115
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200116 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700117 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200118}
119
120static int pmem_rw_page(struct block_device *bdev, sector_t sector,
121 struct page *page, int rw)
122{
123 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williamse10624f2016-01-06 12:03:41 -0800124 int rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200125
Dan Williamse10624f2016-01-06 12:03:41 -0800126 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -0600127 if (rw & WRITE)
128 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +0200129
Dan Williamse10624f2016-01-06 12:03:41 -0800130 /*
131 * The ->rw_page interface is subtle and tricky. The core
132 * retries on any error, so we can only invoke page_endio() in
133 * the successful completion case. Otherwise, we'll see crashes
134 * caused by double completion.
135 */
136 if (rc == 0)
137 page_endio(page, rw & WRITE, 0);
138
139 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200140}
141
142static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williams34c0fd52016-01-15 16:56:14 -0800143 void __pmem **kaddr, pfn_t *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200144{
145 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400146 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200147
Ross Zwislere2e05392015-08-18 13:55:41 -0600148 *kaddr = pmem->virt_addr + offset;
Dan Williams34c0fd52016-01-15 16:56:14 -0800149 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200150
Dan Williamscfe30b82016-03-03 09:38:00 -0800151 return pmem->size - pmem->pfn_pad - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200152}
153
154static const struct block_device_operations pmem_fops = {
155 .owner = THIS_MODULE,
156 .rw_page = pmem_rw_page,
157 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400158 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200159};
160
Dan Williams9f53f9f2015-06-09 15:33:45 -0400161static struct pmem_device *pmem_alloc(struct device *dev,
162 struct resource *res, int id)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200163{
164 struct pmem_device *pmem;
Dan Williams468ded02016-01-15 16:56:46 -0800165 struct request_queue *q;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200166
Christoph Hellwig708ab622015-08-10 23:07:08 -0400167 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200168 if (!pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400169 return ERR_PTR(-ENOMEM);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200170
171 pmem->phys_addr = res->start;
172 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400173 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400174 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200175
Christoph Hellwig708ab622015-08-10 23:07:08 -0400176 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
177 dev_name(dev))) {
Dan Williams9f53f9f2015-06-09 15:33:45 -0400178 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
179 &pmem->phys_addr, pmem->size);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400180 return ERR_PTR(-EBUSY);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200181 }
182
Dan Williams468ded02016-01-15 16:56:46 -0800183 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
184 if (!q)
185 return ERR_PTR(-ENOMEM);
186
Dan Williams34c0fd52016-01-15 16:56:14 -0800187 pmem->pfn_flags = PFN_DEV;
188 if (pmem_should_map_pages(dev)) {
Dan Williams4b94ffd2016-01-15 16:56:22 -0800189 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800190 &q->q_usage_counter, NULL);
Dan Williams34c0fd52016-01-15 16:56:14 -0800191 pmem->pfn_flags |= PFN_MAP;
192 } else
Dan Williamsa6393152015-09-15 02:14:03 -0400193 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
194 pmem->phys_addr, pmem->size,
195 ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400196
Dan Williams468ded02016-01-15 16:56:46 -0800197 if (IS_ERR(pmem->virt_addr)) {
198 blk_cleanup_queue(q);
Dan Williamsb36f4762015-09-15 02:42:20 -0400199 return (void __force *) pmem->virt_addr;
Dan Williams468ded02016-01-15 16:56:46 -0800200 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400201
Dan Williams468ded02016-01-15 16:56:46 -0800202 pmem->pmem_queue = q;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400203 return pmem;
204}
205
206static void pmem_detach_disk(struct pmem_device *pmem)
207{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400208 if (!pmem->pmem_disk)
209 return;
210
Dan Williams8c2f7e82015-06-25 04:20:04 -0400211 del_gendisk(pmem->pmem_disk);
212 put_disk(pmem->pmem_disk);
213 blk_cleanup_queue(pmem->pmem_queue);
214}
215
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400216static int pmem_attach_disk(struct device *dev,
217 struct nd_namespace_common *ndns, struct pmem_device *pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400218{
Dan Williams538ea4a2015-10-05 20:35:56 -0400219 int nid = dev_to_node(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400220 struct gendisk *disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200221
Ross Zwisler9e853f22015-04-01 09:12:19 +0200222 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
Vishal Verma6b474962015-07-23 11:58:48 -0600223 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
Dan Williams43d3fa32015-05-16 12:28:50 -0400224 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200225 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
Dan Williams0f51c4f2015-05-16 12:28:54 -0400226 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200227
Dan Williams538ea4a2015-10-05 20:35:56 -0400228 disk = alloc_disk_node(0, nid);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400229 if (!disk) {
230 blk_cleanup_queue(pmem->pmem_queue);
231 return -ENOMEM;
232 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200233
Ross Zwisler9e853f22015-04-01 09:12:19 +0200234 disk->major = pmem_major;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400235 disk->first_minor = 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200236 disk->fops = &pmem_fops;
237 disk->private_data = pmem;
238 disk->queue = pmem->pmem_queue;
239 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400240 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400241 disk->driverfs_dev = dev;
Dan Williamscfe30b82016-03-03 09:38:00 -0800242 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
243 / 512);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200244 pmem->pmem_disk = disk;
Dan Williams710d69c2016-01-04 23:31:24 -0800245 devm_exit_badblocks(dev, &pmem->bb);
Dan Williamsb95f5f42016-01-04 23:50:23 -0800246 if (devm_init_badblocks(dev, &pmem->bb))
247 return -ENOMEM;
248 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200249
Dan Williams57f7f312016-01-06 12:03:42 -0800250 disk->bb = &pmem->bb;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200251 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400252 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200253
Dan Williams8c2f7e82015-06-25 04:20:04 -0400254 return 0;
255}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200256
Dan Williams8c2f7e82015-06-25 04:20:04 -0400257static int pmem_rw_bytes(struct nd_namespace_common *ndns,
258 resource_size_t offset, void *buf, size_t size, int rw)
259{
260 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
261
262 if (unlikely(offset + size > pmem->size)) {
263 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
264 return -EFAULT;
265 }
266
Dan Williams710d69c2016-01-04 23:31:24 -0800267 if (rw == READ) {
268 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
269
270 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
271 return -EIO;
Ross Zwisler61031952015-06-25 03:08:39 -0400272 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
Dan Williams710d69c2016-01-04 23:31:24 -0800273 } else {
Ross Zwisler61031952015-06-25 03:08:39 -0400274 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
275 wmb_pmem();
276 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400277
278 return 0;
279}
280
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400281static int nd_pfn_init(struct nd_pfn *nd_pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200282{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400283 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
284 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
285 struct nd_namespace_common *ndns = nd_pfn->ndns;
Dan Williamscfe30b82016-03-03 09:38:00 -0800286 u32 start_pad = 0, end_trunc = 0;
287 resource_size_t start, size;
288 struct nd_namespace_io *nsio;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400289 struct nd_region *nd_region;
290 unsigned long npfns;
291 phys_addr_t offset;
292 u64 checksum;
293 int rc;
294
295 if (!pfn_sb)
296 return -ENOMEM;
297
298 nd_pfn->pfn_sb = pfn_sb;
299 rc = nd_pfn_validate(nd_pfn);
Dan Williams3fa96262015-12-13 11:35:52 -0800300 if (rc == -ENODEV)
301 /* no info block, do init */;
302 else
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400303 return rc;
304
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400305 nd_region = to_nd_region(nd_pfn->dev.parent);
306 if (nd_region->ro) {
307 dev_info(&nd_pfn->dev,
308 "%s is read-only, unable to init metadata\n",
309 dev_name(&nd_region->dev));
310 goto err;
311 }
312
313 memset(pfn_sb, 0, sizeof(*pfn_sb));
Dan Williamscfe30b82016-03-03 09:38:00 -0800314
315 /*
316 * Check if pmem collides with 'System RAM' when section aligned and
317 * trim it accordingly
318 */
319 nsio = to_nd_namespace_io(&ndns->dev);
320 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
321 size = resource_size(&nsio->res);
322 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
323 IORES_DESC_NONE) == REGION_MIXED) {
324
325 start = nsio->res.start;
326 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
327 }
328
329 start = nsio->res.start;
330 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
331 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
332 IORES_DESC_NONE) == REGION_MIXED) {
333 size = resource_size(&nsio->res);
334 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
335 }
336
337 if (start_pad + end_trunc)
338 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
339 dev_name(&ndns->dev), start_pad + end_trunc);
340
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400341 /*
342 * Note, we use 64 here for the standard size of struct page,
343 * debugging options may cause it to be larger in which case the
344 * implementation will limit the pfns advertised through
345 * ->direct_access() to those that are included in the memmap.
346 */
Dan Williamscfe30b82016-03-03 09:38:00 -0800347 start += start_pad;
348 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400349 if (nd_pfn->mode == PFN_MODE_PMEM)
Dan Williamscfe30b82016-03-03 09:38:00 -0800350 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
351 - start;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400352 else if (nd_pfn->mode == PFN_MODE_RAM)
Dan Williamscfe30b82016-03-03 09:38:00 -0800353 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400354 else
355 goto err;
356
Dan Williamscfe30b82016-03-03 09:38:00 -0800357 if (offset + start_pad + end_trunc >= pmem->size) {
358 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
359 dev_name(&ndns->dev));
360 goto err;
361 }
362
363 npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400364 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
365 pfn_sb->dataoff = cpu_to_le64(offset);
366 pfn_sb->npfns = cpu_to_le64(npfns);
367 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
368 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
Dan Williamsa34d5e82015-12-12 16:09:14 -0800369 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400370 pfn_sb->version_major = cpu_to_le16(1);
Dan Williamscfe30b82016-03-03 09:38:00 -0800371 pfn_sb->version_minor = cpu_to_le16(1);
372 pfn_sb->start_pad = cpu_to_le32(start_pad);
373 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400374 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
375 pfn_sb->checksum = cpu_to_le64(checksum);
376
377 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
378 if (rc)
379 goto err;
380
381 return 0;
382 err:
383 nd_pfn->pfn_sb = NULL;
384 kfree(pfn_sb);
385 return -ENXIO;
386}
387
388static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
389{
390 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
391 struct pmem_device *pmem;
392
393 /* free pmem disk */
394 pmem = dev_get_drvdata(&nd_pfn->dev);
395 pmem_detach_disk(pmem);
396
397 /* release nd_pfn resources */
398 kfree(nd_pfn->pfn_sb);
399 nd_pfn->pfn_sb = NULL;
400
401 return 0;
402}
403
Dan Williamsd9cbe092016-03-03 09:14:36 -0800404/*
405 * We hotplug memory at section granularity, pad the reserved area from
406 * the previous section base to the namespace base address.
407 */
408static unsigned long init_altmap_base(resource_size_t base)
409{
410 unsigned long base_pfn = __phys_to_pfn(base);
411
412 return PFN_SECTION_ALIGN_DOWN(base_pfn);
413}
414
415static unsigned long init_altmap_reserve(resource_size_t base)
416{
417 unsigned long reserve = __phys_to_pfn(SZ_8K);
418 unsigned long base_pfn = __phys_to_pfn(base);
419
420 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
421 return reserve;
422}
423
Dan Williamscfe30b82016-03-03 09:38:00 -0800424static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400425{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400426 int rc;
Dan Williamscfe30b82016-03-03 09:38:00 -0800427 struct resource res;
428 struct request_queue *q;
429 struct pmem_device *pmem;
430 struct vmem_altmap *altmap;
431 struct device *dev = &nd_pfn->dev;
432 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
433 struct nd_namespace_common *ndns = nd_pfn->ndns;
434 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
435 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
436 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
437 resource_size_t base = nsio->res.start + start_pad;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800438 struct vmem_altmap __altmap = {
Dan Williamscfe30b82016-03-03 09:38:00 -0800439 .base_pfn = init_altmap_base(base),
440 .reserve = init_altmap_reserve(base),
Dan Williamsd2c0f042016-01-15 16:56:26 -0800441 };
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400442
Dan Williamscfe30b82016-03-03 09:38:00 -0800443 pmem = dev_get_drvdata(dev);
444 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
445 pmem->pfn_pad = start_pad + end_trunc;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400446 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
447 if (nd_pfn->mode == PFN_MODE_RAM) {
Dan Williamscfe30b82016-03-03 09:38:00 -0800448 if (pmem->data_offset < SZ_8K)
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400449 return -EINVAL;
450 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
451 altmap = NULL;
Dan Williamsd2c0f042016-01-15 16:56:26 -0800452 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
Dan Williamscfe30b82016-03-03 09:38:00 -0800453 nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
Dan Williamsd2c0f042016-01-15 16:56:26 -0800454 / PAGE_SIZE;
455 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
456 dev_info(&nd_pfn->dev,
457 "number of pfns truncated from %lld to %ld\n",
458 le64_to_cpu(nd_pfn->pfn_sb->npfns),
459 nd_pfn->npfns);
460 altmap = & __altmap;
Dan Williamscfe30b82016-03-03 09:38:00 -0800461 altmap->free = __phys_to_pfn(pmem->data_offset - SZ_8K);
Dan Williamsd2c0f042016-01-15 16:56:26 -0800462 altmap->alloc = 0;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400463 } else {
464 rc = -ENXIO;
465 goto err;
466 }
467
468 /* establish pfn range for lookup, and switch to direct map */
Dan Williams5c2c2582016-01-15 16:56:49 -0800469 q = pmem->pmem_queue;
Dan Williamscfe30b82016-03-03 09:38:00 -0800470 memcpy(&res, &nsio->res, sizeof(res));
471 res.start += start_pad;
472 res.end -= end_trunc;
Dan Williamsa6393152015-09-15 02:14:03 -0400473 devm_memunmap(dev, (void __force *) pmem->virt_addr);
Dan Williamscfe30b82016-03-03 09:38:00 -0800474 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800475 &q->q_usage_counter, altmap);
Dan Williams34c0fd52016-01-15 16:56:14 -0800476 pmem->pfn_flags |= PFN_MAP;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400477 if (IS_ERR(pmem->virt_addr)) {
478 rc = PTR_ERR(pmem->virt_addr);
479 goto err;
480 }
481
482 /* attach pmem disk in "pfn-mode" */
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400483 rc = pmem_attach_disk(dev, ndns, pmem);
484 if (rc)
485 goto err;
486
487 return rc;
488 err:
489 nvdimm_namespace_detach_pfn(ndns);
490 return rc;
Dan Williamscfe30b82016-03-03 09:38:00 -0800491
492}
493
494static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
495{
496 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
497 int rc;
498
499 if (!nd_pfn->uuid || !nd_pfn->ndns)
500 return -ENODEV;
501
502 rc = nd_pfn_init(nd_pfn);
503 if (rc)
504 return rc;
505 /* we need a valid pfn_sb before we can init a vmem_altmap */
506 return __nvdimm_namespace_attach_pfn(nd_pfn);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200507}
508
Dan Williams9f53f9f2015-06-09 15:33:45 -0400509static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200510{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400511 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400512 struct nd_namespace_common *ndns;
513 struct nd_namespace_io *nsio;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200514 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200515
Dan Williams8c2f7e82015-06-25 04:20:04 -0400516 ndns = nvdimm_namespace_common_probe(dev);
517 if (IS_ERR(ndns))
518 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400519
Dan Williams8c2f7e82015-06-25 04:20:04 -0400520 nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400521 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200522 if (IS_ERR(pmem))
523 return PTR_ERR(pmem);
524
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400525 pmem->ndns = ndns;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400526 dev_set_drvdata(dev, pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400527 ndns->rw_bytes = pmem_rw_bytes;
Dan Williams710d69c2016-01-04 23:31:24 -0800528 if (devm_init_badblocks(dev, &pmem->bb))
529 return -ENOMEM;
530 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
Christoph Hellwig708ab622015-08-10 23:07:08 -0400531
Dan Williams468ded02016-01-15 16:56:46 -0800532 if (is_nd_btt(dev)) {
533 /* btt allocates its own request_queue */
534 blk_cleanup_queue(pmem->pmem_queue);
535 pmem->pmem_queue = NULL;
Christoph Hellwig708ab622015-08-10 23:07:08 -0400536 return nvdimm_namespace_attach_btt(ndns);
Dan Williams468ded02016-01-15 16:56:46 -0800537 }
Christoph Hellwig708ab622015-08-10 23:07:08 -0400538
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400539 if (is_nd_pfn(dev))
540 return nvdimm_namespace_attach_pfn(ndns);
541
Dan Williams468ded02016-01-15 16:56:46 -0800542 if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
543 /*
544 * We'll come back as either btt-pmem, or pfn-pmem, so
545 * drop the queue allocation for now.
546 */
547 blk_cleanup_queue(pmem->pmem_queue);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400548 return -ENXIO;
549 }
550
551 return pmem_attach_disk(dev, ndns, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200552}
553
Dan Williams9f53f9f2015-06-09 15:33:45 -0400554static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200555{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400556 struct pmem_device *pmem = dev_get_drvdata(dev);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200557
Dan Williams8c2f7e82015-06-25 04:20:04 -0400558 if (is_nd_btt(dev))
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400559 nvdimm_namespace_detach_btt(pmem->ndns);
560 else if (is_nd_pfn(dev))
561 nvdimm_namespace_detach_pfn(pmem->ndns);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400562 else
563 pmem_detach_disk(pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400564
Ross Zwisler9e853f22015-04-01 09:12:19 +0200565 return 0;
566}
567
Dan Williams9f53f9f2015-06-09 15:33:45 -0400568MODULE_ALIAS("pmem");
569MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400570MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400571static struct nd_device_driver nd_pmem_driver = {
572 .probe = nd_pmem_probe,
573 .remove = nd_pmem_remove,
574 .drv = {
575 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200576 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400577 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200578};
579
580static int __init pmem_init(void)
581{
582 int error;
583
584 pmem_major = register_blkdev(0, "pmem");
585 if (pmem_major < 0)
586 return pmem_major;
587
Dan Williams9f53f9f2015-06-09 15:33:45 -0400588 error = nd_driver_register(&nd_pmem_driver);
589 if (error) {
Ross Zwisler9e853f22015-04-01 09:12:19 +0200590 unregister_blkdev(pmem_major, "pmem");
Dan Williams9f53f9f2015-06-09 15:33:45 -0400591 return error;
592 }
593
594 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200595}
596module_init(pmem_init);
597
598static void pmem_exit(void)
599{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400600 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200601 unregister_blkdev(pmem_major, "pmem");
602}
603module_exit(pmem_exit);
604
605MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
606MODULE_LICENSE("GPL v2");