Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Persistent Memory Driver |
| 3 | * |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 4 | * Copyright (c) 2014-2015, Intel Corporation. |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 5 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
| 6 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms and conditions of the GNU General Public License, |
| 10 | * version 2, as published by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 15 | * more details. |
| 16 | */ |
| 17 | |
| 18 | #include <asm/cacheflush.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | #include <linux/hdreg.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/moduleparam.h> |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 25 | #include <linux/badblocks.h> |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 26 | #include <linux/memremap.h> |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 27 | #include <linux/vmalloc.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 28 | #include <linux/pfn_t.h> |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 29 | #include <linux/slab.h> |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 30 | #include <linux/pmem.h> |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 31 | #include <linux/nd.h> |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 32 | #include "pfn.h" |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 33 | #include "nd.h" |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 34 | |
| 35 | struct pmem_device { |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 36 | /* One contiguous memory region per device */ |
| 37 | phys_addr_t phys_addr; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 38 | /* when non-zero this device is hosting a 'pfn' instance */ |
| 39 | phys_addr_t data_offset; |
Arnd Bergmann | c454420 | 2016-02-22 22:58:34 +0100 | [diff] [blame] | 40 | u64 pfn_flags; |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 41 | void __pmem *virt_addr; |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 42 | /* immutable base size of the namespace */ |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 43 | size_t size; |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 44 | /* trim size when namespace capacity has been section aligned */ |
| 45 | u32 pfn_pad; |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 46 | struct badblocks bb; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 47 | }; |
| 48 | |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 49 | static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, |
| 50 | unsigned int len) |
| 51 | { |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame^] | 52 | struct device *dev = pmem->bb.dev; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 53 | sector_t sector; |
| 54 | long cleared; |
| 55 | |
| 56 | sector = (offset - pmem->data_offset) / 512; |
| 57 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
| 58 | |
| 59 | if (cleared > 0 && cleared / 512) { |
| 60 | dev_dbg(dev, "%s: %llx clear %ld sector%s\n", |
| 61 | __func__, (unsigned long long) sector, |
| 62 | cleared / 512, cleared / 512 > 1 ? "s" : ""); |
| 63 | badblocks_clear(&pmem->bb, sector, cleared / 512); |
| 64 | } |
| 65 | invalidate_pmem(pmem->virt_addr + offset, len); |
| 66 | } |
| 67 | |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 68 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 69 | unsigned int len, unsigned int off, int rw, |
| 70 | sector_t sector) |
| 71 | { |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 72 | int rc = 0; |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 73 | bool bad_pmem = false; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 74 | void *mem = kmap_atomic(page); |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 75 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 76 | void __pmem *pmem_addr = pmem->virt_addr + pmem_off; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 77 | |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 78 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
| 79 | bad_pmem = true; |
| 80 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 81 | if (rw == READ) { |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 82 | if (unlikely(bad_pmem)) |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 83 | rc = -EIO; |
| 84 | else { |
Dan Williams | fc0c202 | 2016-03-08 10:30:19 -0800 | [diff] [blame] | 85 | rc = memcpy_from_pmem(mem + off, pmem_addr, len); |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 86 | flush_dcache_page(page); |
| 87 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 88 | } else { |
Dan Williams | 0a370d26 | 2016-04-14 19:40:47 -0700 | [diff] [blame] | 89 | /* |
| 90 | * Note that we write the data both before and after |
| 91 | * clearing poison. The write before clear poison |
| 92 | * handles situations where the latest written data is |
| 93 | * preserved and the clear poison operation simply marks |
| 94 | * the address range as valid without changing the data. |
| 95 | * In this case application software can assume that an |
| 96 | * interrupted write will either return the new good |
| 97 | * data or an error. |
| 98 | * |
| 99 | * However, if pmem_clear_poison() leaves the data in an |
| 100 | * indeterminate state we need to perform the write |
| 101 | * after clear poison. |
| 102 | */ |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 103 | flush_dcache_page(page); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 104 | memcpy_to_pmem(pmem_addr, mem + off, len); |
Dan Williams | 59e6473 | 2016-03-08 07:16:07 -0800 | [diff] [blame] | 105 | if (unlikely(bad_pmem)) { |
| 106 | pmem_clear_poison(pmem, pmem_off, len); |
| 107 | memcpy_to_pmem(pmem_addr, mem + off, len); |
| 108 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | kunmap_atomic(mem); |
Dan Williams | b5ebc8e | 2016-03-06 15:20:51 -0800 | [diff] [blame] | 112 | return rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 113 | } |
| 114 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 115 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 116 | { |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 117 | int rc = 0; |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 118 | bool do_acct; |
| 119 | unsigned long start; |
Dan Williams | edc870e | 2015-05-16 12:28:51 -0400 | [diff] [blame] | 120 | struct bio_vec bvec; |
| 121 | struct bvec_iter iter; |
Dan Williams | bd842b8 | 2016-03-18 23:47:43 -0700 | [diff] [blame] | 122 | struct pmem_device *pmem = q->queuedata; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 123 | |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 124 | do_acct = nd_iostat_start(bio, &start); |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 125 | bio_for_each_segment(bvec, bio, iter) { |
| 126 | rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, |
| 127 | bvec.bv_offset, bio_data_dir(bio), |
| 128 | iter.bi_sector); |
| 129 | if (rc) { |
| 130 | bio->bi_error = rc; |
| 131 | break; |
| 132 | } |
| 133 | } |
Dan Williams | f0dc089 | 2015-05-16 12:28:53 -0400 | [diff] [blame] | 134 | if (do_acct) |
| 135 | nd_iostat_end(bio, start); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 136 | |
| 137 | if (bio_data_dir(bio)) |
| 138 | wmb_pmem(); |
| 139 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 140 | bio_endio(bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 141 | return BLK_QC_T_NONE; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, |
| 145 | struct page *page, int rw) |
| 146 | { |
Dan Williams | bd842b8 | 2016-03-18 23:47:43 -0700 | [diff] [blame] | 147 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 148 | int rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 149 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 150 | rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector); |
Ross Zwisler | ba8fe0f | 2015-09-16 14:52:21 -0600 | [diff] [blame] | 151 | if (rw & WRITE) |
| 152 | wmb_pmem(); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 153 | |
Dan Williams | e10624f | 2016-01-06 12:03:41 -0800 | [diff] [blame] | 154 | /* |
| 155 | * The ->rw_page interface is subtle and tricky. The core |
| 156 | * retries on any error, so we can only invoke page_endio() in |
| 157 | * the successful completion case. Otherwise, we'll see crashes |
| 158 | * caused by double completion. |
| 159 | */ |
| 160 | if (rc == 0) |
| 161 | page_endio(page, rw & WRITE, 0); |
| 162 | |
| 163 | return rc; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | static long pmem_direct_access(struct block_device *bdev, sector_t sector, |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 167 | void __pmem **kaddr, pfn_t *pfn) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 168 | { |
Dan Williams | bd842b8 | 2016-03-18 23:47:43 -0700 | [diff] [blame] | 169 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 170 | resource_size_t offset = sector * 512 + pmem->data_offset; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 171 | |
Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 172 | *kaddr = pmem->virt_addr + offset; |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 173 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 174 | |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 175 | return pmem->size - pmem->pfn_pad - offset; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | static const struct block_device_operations pmem_fops = { |
| 179 | .owner = THIS_MODULE, |
| 180 | .rw_page = pmem_rw_page, |
| 181 | .direct_access = pmem_direct_access, |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 182 | .revalidate_disk = nvdimm_revalidate_disk, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 183 | }; |
| 184 | |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 185 | static void pmem_release_queue(void *q) |
| 186 | { |
| 187 | blk_cleanup_queue(q); |
| 188 | } |
| 189 | |
| 190 | void pmem_release_disk(void *disk) |
| 191 | { |
| 192 | del_gendisk(disk); |
| 193 | put_disk(disk); |
| 194 | } |
| 195 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 196 | static int pmem_attach_disk(struct device *dev, |
| 197 | struct nd_namespace_common *ndns) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 198 | { |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 199 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
| 200 | struct vmem_altmap __altmap, *altmap = NULL; |
| 201 | struct resource *res = &nsio->res; |
| 202 | struct nd_pfn *nd_pfn = NULL; |
| 203 | int nid = dev_to_node(dev); |
| 204 | struct nd_pfn_sb *pfn_sb; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 205 | struct pmem_device *pmem; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 206 | struct resource pfn_res; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 207 | struct request_queue *q; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 208 | struct gendisk *disk; |
| 209 | void *addr; |
| 210 | |
| 211 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ |
| 212 | if (is_nd_pfn(dev)) { |
| 213 | nd_pfn = to_nd_pfn(dev); |
| 214 | altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); |
| 215 | if (IS_ERR(altmap)) |
| 216 | return PTR_ERR(altmap); |
| 217 | } |
| 218 | |
| 219 | /* we're attaching a block device, disable raw namespace access */ |
| 220 | devm_nsio_disable(dev, nsio); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 221 | |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 222 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 223 | if (!pmem) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 224 | return -ENOMEM; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 225 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 226 | dev_set_drvdata(dev, pmem); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 227 | pmem->phys_addr = res->start; |
| 228 | pmem->size = resource_size(res); |
Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 229 | if (!arch_has_wmb_pmem()) |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 230 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 231 | |
Dan Williams | 947df02 | 2016-03-21 22:28:40 -0700 | [diff] [blame] | 232 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
| 233 | dev_name(dev))) { |
| 234 | dev_warn(dev, "could not reserve region %pR\n", res); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 235 | return -EBUSY; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 236 | } |
| 237 | |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 238 | q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); |
| 239 | if (!q) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 240 | return -ENOMEM; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 241 | |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 242 | pmem->pfn_flags = PFN_DEV; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 243 | if (is_nd_pfn(dev)) { |
| 244 | addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, |
| 245 | altmap); |
| 246 | pfn_sb = nd_pfn->pfn_sb; |
| 247 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
| 248 | pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); |
| 249 | pmem->pfn_flags |= PFN_MAP; |
| 250 | res = &pfn_res; /* for badblocks populate */ |
| 251 | res->start += pmem->data_offset; |
| 252 | } else if (pmem_should_map_pages(dev)) { |
| 253 | addr = devm_memremap_pages(dev, &nsio->res, |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 254 | &q->q_usage_counter, NULL); |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 255 | pmem->pfn_flags |= PFN_MAP; |
| 256 | } else |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 257 | addr = devm_memremap(dev, pmem->phys_addr, |
| 258 | pmem->size, ARCH_MEMREMAP_PMEM); |
Dan Williams | b36f476 | 2015-09-15 02:42:20 -0400 | [diff] [blame] | 259 | |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 260 | /* |
| 261 | * At release time the queue must be dead before |
| 262 | * devm_memremap_pages is unwound |
| 263 | */ |
| 264 | if (devm_add_action(dev, pmem_release_queue, q)) { |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 265 | blk_cleanup_queue(q); |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 266 | return -ENOMEM; |
Dan Williams | 468ded0 | 2016-01-15 16:56:46 -0800 | [diff] [blame] | 267 | } |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 268 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 269 | if (IS_ERR(addr)) |
| 270 | return PTR_ERR(addr); |
| 271 | pmem->virt_addr = (void __pmem *) addr; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 272 | |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame^] | 273 | blk_queue_make_request(q, pmem_make_request); |
| 274 | blk_queue_physical_block_size(q, PAGE_SIZE); |
| 275 | blk_queue_max_hw_sectors(q, UINT_MAX); |
| 276 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
| 277 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
| 278 | q->queuedata = pmem; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 279 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 280 | disk = alloc_disk_node(0, nid); |
Dan Williams | 030b99e | 2016-03-17 20:24:31 -0700 | [diff] [blame] | 281 | if (!disk) |
| 282 | return -ENOMEM; |
| 283 | if (devm_add_action(dev, pmem_release_disk, disk)) { |
| 284 | put_disk(disk); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 285 | return -ENOMEM; |
| 286 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 287 | |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 288 | disk->fops = &pmem_fops; |
Dan Williams | 5a92289 | 2016-03-21 15:43:53 -0700 | [diff] [blame^] | 289 | disk->queue = q; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 290 | disk->flags = GENHD_FL_EXT_DEVT; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 291 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 292 | disk->driverfs_dev = dev; |
Dan Williams | cfe30b8 | 2016-03-03 09:38:00 -0800 | [diff] [blame] | 293 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
| 294 | / 512); |
Dan Williams | b95f5f4 | 2016-01-04 23:50:23 -0800 | [diff] [blame] | 295 | if (devm_init_badblocks(dev, &pmem->bb)) |
| 296 | return -ENOMEM; |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 297 | nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res); |
Dan Williams | 57f7f31 | 2016-01-06 12:03:42 -0800 | [diff] [blame] | 298 | disk->bb = &pmem->bb; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 299 | add_disk(disk); |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 300 | revalidate_disk(disk); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 301 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 302 | return 0; |
| 303 | } |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 304 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 305 | static int nd_pmem_probe(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 306 | { |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 307 | struct nd_namespace_common *ndns; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 308 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 309 | ndns = nvdimm_namespace_common_probe(dev); |
| 310 | if (IS_ERR(ndns)) |
| 311 | return PTR_ERR(ndns); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 312 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 313 | if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev))) |
| 314 | return -ENXIO; |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 315 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 316 | if (is_nd_btt(dev)) |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 317 | return nvdimm_namespace_attach_btt(ndns); |
| 318 | |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 319 | if (is_nd_pfn(dev)) |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 320 | return pmem_attach_disk(dev, ndns); |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 321 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 322 | /* if we find a valid info-block we'll come back as that personality */ |
| 323 | if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0) |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 324 | return -ENXIO; |
Dan Williams | 32ab0a3f | 2015-08-01 02:16:37 -0400 | [diff] [blame] | 325 | |
Dan Williams | 200c79d | 2016-03-22 00:22:16 -0700 | [diff] [blame] | 326 | /* ...otherwise we're just a raw pmem device */ |
| 327 | return pmem_attach_disk(dev, ndns); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 328 | } |
| 329 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 330 | static int nd_pmem_remove(struct device *dev) |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 331 | { |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 332 | if (is_nd_btt(dev)) |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 333 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 334 | return 0; |
| 335 | } |
| 336 | |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 337 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
| 338 | { |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 339 | struct nd_region *nd_region = to_nd_region(dev->parent); |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 340 | struct pmem_device *pmem = dev_get_drvdata(dev); |
| 341 | resource_size_t offset = 0, end_trunc = 0; |
| 342 | struct nd_namespace_common *ndns; |
| 343 | struct nd_namespace_io *nsio; |
| 344 | struct resource res; |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 345 | |
| 346 | if (event != NVDIMM_REVALIDATE_POISON) |
| 347 | return; |
| 348 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 349 | if (is_nd_btt(dev)) { |
| 350 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 351 | |
| 352 | ndns = nd_btt->ndns; |
| 353 | } else if (is_nd_pfn(dev)) { |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 354 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
| 355 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
| 356 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 357 | ndns = nd_pfn->ndns; |
| 358 | offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); |
| 359 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); |
| 360 | } else |
| 361 | ndns = to_ndns(dev); |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 362 | |
Dan Williams | 298f2bc | 2016-03-15 16:41:04 -0700 | [diff] [blame] | 363 | nsio = to_nd_namespace_io(&ndns->dev); |
| 364 | res.start = nsio->res.start + offset; |
| 365 | res.end = nsio->res.end - end_trunc; |
Dan Williams | a390180 | 2016-04-07 20:02:06 -0700 | [diff] [blame] | 366 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 367 | } |
| 368 | |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 369 | MODULE_ALIAS("pmem"); |
| 370 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 371 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 372 | static struct nd_device_driver nd_pmem_driver = { |
| 373 | .probe = nd_pmem_probe, |
| 374 | .remove = nd_pmem_remove, |
Dan Williams | 7199946 | 2016-02-18 10:29:49 -0800 | [diff] [blame] | 375 | .notify = nd_pmem_notify, |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 376 | .drv = { |
| 377 | .name = "nd_pmem", |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 378 | }, |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 379 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 380 | }; |
| 381 | |
| 382 | static int __init pmem_init(void) |
| 383 | { |
NeilBrown | 5515529 | 2016-03-09 09:21:54 +1100 | [diff] [blame] | 384 | return nd_driver_register(&nd_pmem_driver); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 385 | } |
| 386 | module_init(pmem_init); |
| 387 | |
| 388 | static void pmem_exit(void) |
| 389 | { |
Dan Williams | 9f53f9f | 2015-06-09 15:33:45 -0400 | [diff] [blame] | 390 | driver_unregister(&nd_pmem_driver.drv); |
Ross Zwisler | 9e853f2 | 2015-04-01 09:12:19 +0200 | [diff] [blame] | 391 | } |
| 392 | module_exit(pmem_exit); |
| 393 | |
| 394 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); |
| 395 | MODULE_LICENSE("GPL v2"); |