blob: 520c00321dadd033686fac58d06e27008c9e7b96 [file] [log] [blame]
Ross Zwisler9e853f22015-04-01 09:12:19 +02001/*
2 * Persistent Memory Driver
3 *
Dan Williams9f53f9f2015-06-09 15:33:45 -04004 * Copyright (c) 2014-2015, Intel Corporation.
Ross Zwisler9e853f22015-04-01 09:12:19 +02005 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040024#include <linux/memory_hotplug.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020025#include <linux/moduleparam.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040026#include <linux/vmalloc.h>
Ross Zwisler9e853f22015-04-01 09:12:19 +020027#include <linux/slab.h>
Ross Zwisler61031952015-06-25 03:08:39 -040028#include <linux/pmem.h>
Dan Williams9f53f9f2015-06-09 15:33:45 -040029#include <linux/nd.h>
Dan Williams32ab0a3f2015-08-01 02:16:37 -040030#include "pfn.h"
Dan Williams9f53f9f2015-06-09 15:33:45 -040031#include "nd.h"
Ross Zwisler9e853f22015-04-01 09:12:19 +020032
33struct pmem_device {
34 struct request_queue *pmem_queue;
35 struct gendisk *pmem_disk;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040036 struct nd_namespace_common *ndns;
Ross Zwisler9e853f22015-04-01 09:12:19 +020037
38 /* One contiguous memory region per device */
39 phys_addr_t phys_addr;
Dan Williams32ab0a3f2015-08-01 02:16:37 -040040 /* when non-zero this device is hosting a 'pfn' instance */
41 phys_addr_t data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040042 void __pmem *virt_addr;
Ross Zwisler9e853f22015-04-01 09:12:19 +020043 size_t size;
44};
45
46static int pmem_major;
Ross Zwisler9e853f22015-04-01 09:12:19 +020047
48static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
49 unsigned int len, unsigned int off, int rw,
50 sector_t sector)
51{
52 void *mem = kmap_atomic(page);
Dan Williams32ab0a3f2015-08-01 02:16:37 -040053 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
Ross Zwisler61031952015-06-25 03:08:39 -040054 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
Ross Zwisler9e853f22015-04-01 09:12:19 +020055
56 if (rw == READ) {
Ross Zwisler61031952015-06-25 03:08:39 -040057 memcpy_from_pmem(mem + off, pmem_addr, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020058 flush_dcache_page(page);
59 } else {
60 flush_dcache_page(page);
Ross Zwisler61031952015-06-25 03:08:39 -040061 memcpy_to_pmem(pmem_addr, mem + off, len);
Ross Zwisler9e853f22015-04-01 09:12:19 +020062 }
63
64 kunmap_atomic(mem);
65}
66
Jens Axboedece1632015-11-05 10:41:16 -070067static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
Ross Zwisler9e853f22015-04-01 09:12:19 +020068{
Dan Williamsf0dc0892015-05-16 12:28:53 -040069 bool do_acct;
70 unsigned long start;
Dan Williamsedc870e2015-05-16 12:28:51 -040071 struct bio_vec bvec;
72 struct bvec_iter iter;
Ross Zwisler9e853f22015-04-01 09:12:19 +020073 struct block_device *bdev = bio->bi_bdev;
74 struct pmem_device *pmem = bdev->bd_disk->private_data;
Ross Zwisler9e853f22015-04-01 09:12:19 +020075
Dan Williamsf0dc0892015-05-16 12:28:53 -040076 do_acct = nd_iostat_start(bio, &start);
Dan Williamsedc870e2015-05-16 12:28:51 -040077 bio_for_each_segment(bvec, bio, iter)
Ross Zwisler9e853f22015-04-01 09:12:19 +020078 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
Dan Williamsedc870e2015-05-16 12:28:51 -040079 bio_data_dir(bio), iter.bi_sector);
Dan Williamsf0dc0892015-05-16 12:28:53 -040080 if (do_acct)
81 nd_iostat_end(bio, start);
Ross Zwisler61031952015-06-25 03:08:39 -040082
83 if (bio_data_dir(bio))
84 wmb_pmem();
85
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020086 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -070087 return BLK_QC_T_NONE;
Ross Zwisler9e853f22015-04-01 09:12:19 +020088}
89
90static int pmem_rw_page(struct block_device *bdev, sector_t sector,
91 struct page *page, int rw)
92{
93 struct pmem_device *pmem = bdev->bd_disk->private_data;
94
95 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
Ross Zwislerba8fe0f2015-09-16 14:52:21 -060096 if (rw & WRITE)
97 wmb_pmem();
Ross Zwisler9e853f22015-04-01 09:12:19 +020098 page_endio(page, rw & WRITE, 0);
99
100 return 0;
101}
102
103static long pmem_direct_access(struct block_device *bdev, sector_t sector,
Dan Williamscb389b92015-08-07 17:41:00 -0400104 void __pmem **kaddr, unsigned long *pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200105{
106 struct pmem_device *pmem = bdev->bd_disk->private_data;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400107 resource_size_t offset = sector * 512 + pmem->data_offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200108
Ross Zwislere2e05392015-08-18 13:55:41 -0600109 *kaddr = pmem->virt_addr + offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200110 *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
111
Dan Williams589e75d2015-10-24 19:55:58 -0700112 return pmem->size - offset;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200113}
114
115static const struct block_device_operations pmem_fops = {
116 .owner = THIS_MODULE,
117 .rw_page = pmem_rw_page,
118 .direct_access = pmem_direct_access,
Dan Williams58138822015-06-23 20:08:34 -0400119 .revalidate_disk = nvdimm_revalidate_disk,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200120};
121
Dan Williams9f53f9f2015-06-09 15:33:45 -0400122static struct pmem_device *pmem_alloc(struct device *dev,
123 struct resource *res, int id)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200124{
125 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200126
Christoph Hellwig708ab622015-08-10 23:07:08 -0400127 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200128 if (!pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400129 return ERR_PTR(-ENOMEM);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200130
131 pmem->phys_addr = res->start;
132 pmem->size = resource_size(res);
Dan Williams96601ad2015-08-24 18:29:38 -0400133 if (!arch_has_wmb_pmem())
Ross Zwisler61031952015-06-25 03:08:39 -0400134 dev_warn(dev, "unable to guarantee persistence of writes\n");
Ross Zwisler9e853f22015-04-01 09:12:19 +0200135
Christoph Hellwig708ab622015-08-10 23:07:08 -0400136 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
137 dev_name(dev))) {
Dan Williams9f53f9f2015-06-09 15:33:45 -0400138 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
139 &pmem->phys_addr, pmem->size);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400140 return ERR_PTR(-EBUSY);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200141 }
142
Dan Williamsb36f4762015-09-15 02:42:20 -0400143 if (pmem_should_map_pages(dev))
144 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res);
145 else
Dan Williamsa6393152015-09-15 02:14:03 -0400146 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
147 pmem->phys_addr, pmem->size,
148 ARCH_MEMREMAP_PMEM);
Dan Williamsb36f4762015-09-15 02:42:20 -0400149
150 if (IS_ERR(pmem->virt_addr))
151 return (void __force *) pmem->virt_addr;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400152
153 return pmem;
154}
155
156static void pmem_detach_disk(struct pmem_device *pmem)
157{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400158 if (!pmem->pmem_disk)
159 return;
160
Dan Williams8c2f7e82015-06-25 04:20:04 -0400161 del_gendisk(pmem->pmem_disk);
162 put_disk(pmem->pmem_disk);
163 blk_cleanup_queue(pmem->pmem_queue);
164}
165
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400166static int pmem_attach_disk(struct device *dev,
167 struct nd_namespace_common *ndns, struct pmem_device *pmem)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400168{
Dan Williams538ea4a2015-10-05 20:35:56 -0400169 int nid = dev_to_node(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400170 struct gendisk *disk;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200171
Dan Williams538ea4a2015-10-05 20:35:56 -0400172 pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200173 if (!pmem->pmem_queue)
Dan Williams8c2f7e82015-06-25 04:20:04 -0400174 return -ENOMEM;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200175
176 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
Vishal Verma6b474962015-07-23 11:58:48 -0600177 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
Dan Williams43d3fa32015-05-16 12:28:50 -0400178 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200179 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
Dan Williams0f51c4f2015-05-16 12:28:54 -0400180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200181
Dan Williams538ea4a2015-10-05 20:35:56 -0400182 disk = alloc_disk_node(0, nid);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400183 if (!disk) {
184 blk_cleanup_queue(pmem->pmem_queue);
185 return -ENOMEM;
186 }
Ross Zwisler9e853f22015-04-01 09:12:19 +0200187
Ross Zwisler9e853f22015-04-01 09:12:19 +0200188 disk->major = pmem_major;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400189 disk->first_minor = 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200190 disk->fops = &pmem_fops;
191 disk->private_data = pmem;
192 disk->queue = pmem->pmem_queue;
193 disk->flags = GENHD_FL_EXT_DEVT;
Vishal Verma5212e112015-06-25 04:20:32 -0400194 nvdimm_namespace_disk_name(ndns, disk->disk_name);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400195 disk->driverfs_dev = dev;
196 set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200197 pmem->pmem_disk = disk;
198
199 add_disk(disk);
Dan Williams58138822015-06-23 20:08:34 -0400200 revalidate_disk(disk);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200201
Dan Williams8c2f7e82015-06-25 04:20:04 -0400202 return 0;
203}
Ross Zwisler9e853f22015-04-01 09:12:19 +0200204
Dan Williams8c2f7e82015-06-25 04:20:04 -0400205static int pmem_rw_bytes(struct nd_namespace_common *ndns,
206 resource_size_t offset, void *buf, size_t size, int rw)
207{
208 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
209
210 if (unlikely(offset + size > pmem->size)) {
211 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
212 return -EFAULT;
213 }
214
215 if (rw == READ)
Ross Zwisler61031952015-06-25 03:08:39 -0400216 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
217 else {
218 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
219 wmb_pmem();
220 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400221
222 return 0;
223}
224
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400225static int nd_pfn_init(struct nd_pfn *nd_pfn)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200226{
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400227 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
228 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
229 struct nd_namespace_common *ndns = nd_pfn->ndns;
230 struct nd_region *nd_region;
231 unsigned long npfns;
232 phys_addr_t offset;
233 u64 checksum;
234 int rc;
235
236 if (!pfn_sb)
237 return -ENOMEM;
238
239 nd_pfn->pfn_sb = pfn_sb;
240 rc = nd_pfn_validate(nd_pfn);
241 if (rc == 0 || rc == -EBUSY)
242 return rc;
243
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400244 nd_region = to_nd_region(nd_pfn->dev.parent);
245 if (nd_region->ro) {
246 dev_info(&nd_pfn->dev,
247 "%s is read-only, unable to init metadata\n",
248 dev_name(&nd_region->dev));
249 goto err;
250 }
251
252 memset(pfn_sb, 0, sizeof(*pfn_sb));
253 npfns = (pmem->size - SZ_8K) / SZ_4K;
254 /*
255 * Note, we use 64 here for the standard size of struct page,
256 * debugging options may cause it to be larger in which case the
257 * implementation will limit the pfns advertised through
258 * ->direct_access() to those that are included in the memmap.
259 */
260 if (nd_pfn->mode == PFN_MODE_PMEM)
261 offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE);
262 else if (nd_pfn->mode == PFN_MODE_RAM)
263 offset = SZ_8K;
264 else
265 goto err;
266
267 npfns = (pmem->size - offset) / SZ_4K;
268 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
269 pfn_sb->dataoff = cpu_to_le64(offset);
270 pfn_sb->npfns = cpu_to_le64(npfns);
271 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
272 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
273 pfn_sb->version_major = cpu_to_le16(1);
274 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
275 pfn_sb->checksum = cpu_to_le64(checksum);
276
277 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
278 if (rc)
279 goto err;
280
281 return 0;
282 err:
283 nd_pfn->pfn_sb = NULL;
284 kfree(pfn_sb);
285 return -ENXIO;
286}
287
288static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
289{
290 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
291 struct pmem_device *pmem;
292
293 /* free pmem disk */
294 pmem = dev_get_drvdata(&nd_pfn->dev);
295 pmem_detach_disk(pmem);
296
297 /* release nd_pfn resources */
298 kfree(nd_pfn->pfn_sb);
299 nd_pfn->pfn_sb = NULL;
300
301 return 0;
302}
303
304static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
305{
306 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
307 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
308 struct device *dev = &nd_pfn->dev;
309 struct vmem_altmap *altmap;
310 struct nd_region *nd_region;
311 struct nd_pfn_sb *pfn_sb;
312 struct pmem_device *pmem;
313 phys_addr_t offset;
314 int rc;
315
316 if (!nd_pfn->uuid || !nd_pfn->ndns)
317 return -ENODEV;
318
319 nd_region = to_nd_region(dev->parent);
320 rc = nd_pfn_init(nd_pfn);
321 if (rc)
322 return rc;
323
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400324 pfn_sb = nd_pfn->pfn_sb;
325 offset = le64_to_cpu(pfn_sb->dataoff);
326 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
327 if (nd_pfn->mode == PFN_MODE_RAM) {
328 if (offset != SZ_8K)
329 return -EINVAL;
330 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
331 altmap = NULL;
332 } else {
333 rc = -ENXIO;
334 goto err;
335 }
336
337 /* establish pfn range for lookup, and switch to direct map */
338 pmem = dev_get_drvdata(dev);
Dan Williamsa6393152015-09-15 02:14:03 -0400339 devm_memunmap(dev, (void __force *) pmem->virt_addr);
340 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res);
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400341 if (IS_ERR(pmem->virt_addr)) {
342 rc = PTR_ERR(pmem->virt_addr);
343 goto err;
344 }
345
346 /* attach pmem disk in "pfn-mode" */
347 pmem->data_offset = offset;
348 rc = pmem_attach_disk(dev, ndns, pmem);
349 if (rc)
350 goto err;
351
352 return rc;
353 err:
354 nvdimm_namespace_detach_pfn(ndns);
355 return rc;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200356}
357
Dan Williams9f53f9f2015-06-09 15:33:45 -0400358static int nd_pmem_probe(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200359{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400360 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400361 struct nd_namespace_common *ndns;
362 struct nd_namespace_io *nsio;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200363 struct pmem_device *pmem;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200364
Dan Williams8c2f7e82015-06-25 04:20:04 -0400365 ndns = nvdimm_namespace_common_probe(dev);
366 if (IS_ERR(ndns))
367 return PTR_ERR(ndns);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400368
Dan Williams8c2f7e82015-06-25 04:20:04 -0400369 nsio = to_nd_namespace_io(&ndns->dev);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400370 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200371 if (IS_ERR(pmem))
372 return PTR_ERR(pmem);
373
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400374 pmem->ndns = ndns;
Dan Williams9f53f9f2015-06-09 15:33:45 -0400375 dev_set_drvdata(dev, pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400376 ndns->rw_bytes = pmem_rw_bytes;
Christoph Hellwig708ab622015-08-10 23:07:08 -0400377
Dan Williams8c2f7e82015-06-25 04:20:04 -0400378 if (is_nd_btt(dev))
Christoph Hellwig708ab622015-08-10 23:07:08 -0400379 return nvdimm_namespace_attach_btt(ndns);
380
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400381 if (is_nd_pfn(dev))
382 return nvdimm_namespace_attach_pfn(ndns);
383
384 if (nd_btt_probe(ndns, pmem) == 0) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400385 /* we'll come back as btt-pmem */
Christoph Hellwig708ab622015-08-10 23:07:08 -0400386 return -ENXIO;
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400387 }
388
389 if (nd_pfn_probe(ndns, pmem) == 0) {
390 /* we'll come back as pfn-pmem */
391 return -ENXIO;
392 }
393
394 return pmem_attach_disk(dev, ndns, pmem);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200395}
396
Dan Williams9f53f9f2015-06-09 15:33:45 -0400397static int nd_pmem_remove(struct device *dev)
Ross Zwisler9e853f22015-04-01 09:12:19 +0200398{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400399 struct pmem_device *pmem = dev_get_drvdata(dev);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200400
Dan Williams8c2f7e82015-06-25 04:20:04 -0400401 if (is_nd_btt(dev))
Dan Williams32ab0a3f2015-08-01 02:16:37 -0400402 nvdimm_namespace_detach_btt(pmem->ndns);
403 else if (is_nd_pfn(dev))
404 nvdimm_namespace_detach_pfn(pmem->ndns);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400405 else
406 pmem_detach_disk(pmem);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400407
Ross Zwisler9e853f22015-04-01 09:12:19 +0200408 return 0;
409}
410
Dan Williams9f53f9f2015-06-09 15:33:45 -0400411MODULE_ALIAS("pmem");
412MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400413MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
Dan Williams9f53f9f2015-06-09 15:33:45 -0400414static struct nd_device_driver nd_pmem_driver = {
415 .probe = nd_pmem_probe,
416 .remove = nd_pmem_remove,
417 .drv = {
418 .name = "nd_pmem",
Ross Zwisler9e853f22015-04-01 09:12:19 +0200419 },
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400420 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
Ross Zwisler9e853f22015-04-01 09:12:19 +0200421};
422
423static int __init pmem_init(void)
424{
425 int error;
426
427 pmem_major = register_blkdev(0, "pmem");
428 if (pmem_major < 0)
429 return pmem_major;
430
Dan Williams9f53f9f2015-06-09 15:33:45 -0400431 error = nd_driver_register(&nd_pmem_driver);
432 if (error) {
Ross Zwisler9e853f22015-04-01 09:12:19 +0200433 unregister_blkdev(pmem_major, "pmem");
Dan Williams9f53f9f2015-06-09 15:33:45 -0400434 return error;
435 }
436
437 return 0;
Ross Zwisler9e853f22015-04-01 09:12:19 +0200438}
439module_init(pmem_init);
440
441static void pmem_exit(void)
442{
Dan Williams9f53f9f2015-06-09 15:33:45 -0400443 driver_unregister(&nd_pmem_driver.drv);
Ross Zwisler9e853f22015-04-01 09:12:19 +0200444 unregister_blkdev(pmem_major, "pmem");
445}
446module_exit(pmem_exit);
447
448MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
449MODULE_LICENSE("GPL v2");