Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 13 | #include <linux/scatterlist.h> |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 14 | #include <linux/highmem.h> |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 15 | #include <linux/sched.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 16 | #include <linux/slab.h> |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame^] | 17 | #include <linux/pmem.h> |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 18 | #include <linux/sort.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 19 | #include <linux/io.h> |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 20 | #include <linux/nd.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 21 | #include "nd-core.h" |
| 22 | #include "nd.h" |
| 23 | |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame^] | 24 | /* |
| 25 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
| 26 | * irrelevant. |
| 27 | */ |
| 28 | #include <linux/io-64-nonatomic-hi-lo.h> |
| 29 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 30 | static DEFINE_IDA(region_ida); |
| 31 | |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 32 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
| 33 | struct nd_region_data *ndrd) |
| 34 | { |
| 35 | int i, j; |
| 36 | |
| 37 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), |
| 38 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); |
| 39 | for (i = 0; i < nvdimm->num_flush; i++) { |
| 40 | struct resource *res = &nvdimm->flush_wpq[i]; |
| 41 | unsigned long pfn = PHYS_PFN(res->start); |
| 42 | void __iomem *flush_page; |
| 43 | |
| 44 | /* check if flush hints share a page */ |
| 45 | for (j = 0; j < i; j++) { |
| 46 | struct resource *res_j = &nvdimm->flush_wpq[j]; |
| 47 | unsigned long pfn_j = PHYS_PFN(res_j->start); |
| 48 | |
| 49 | if (pfn == pfn_j) |
| 50 | break; |
| 51 | } |
| 52 | |
| 53 | if (j < i) |
| 54 | flush_page = (void __iomem *) ((unsigned long) |
| 55 | ndrd->flush_wpq[dimm][j] & PAGE_MASK); |
| 56 | else |
| 57 | flush_page = devm_nvdimm_ioremap(dev, |
| 58 | PHYS_PFN(pfn), PAGE_SIZE); |
| 59 | if (!flush_page) |
| 60 | return -ENXIO; |
| 61 | ndrd->flush_wpq[dimm][i] = flush_page |
| 62 | + (res->start & ~PAGE_MASK); |
| 63 | } |
| 64 | |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | int nd_region_activate(struct nd_region *nd_region) |
| 69 | { |
| 70 | int i; |
| 71 | struct nd_region_data *ndrd; |
| 72 | struct device *dev = &nd_region->dev; |
| 73 | size_t flush_data_size = sizeof(void *); |
| 74 | |
| 75 | nvdimm_bus_lock(&nd_region->dev); |
| 76 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 77 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 78 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 79 | |
| 80 | /* at least one null hint slot per-dimm for the "no-hint" case */ |
| 81 | flush_data_size += sizeof(void *); |
| 82 | if (!nvdimm->num_flush) |
| 83 | continue; |
| 84 | flush_data_size += nvdimm->num_flush * sizeof(void *); |
| 85 | } |
| 86 | nvdimm_bus_unlock(&nd_region->dev); |
| 87 | |
| 88 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); |
| 89 | if (!ndrd) |
| 90 | return -ENOMEM; |
| 91 | dev_set_drvdata(dev, ndrd); |
| 92 | |
| 93 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 94 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 95 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 96 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); |
| 97 | |
| 98 | if (rc) |
| 99 | return rc; |
| 100 | } |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 105 | static void nd_region_release(struct device *dev) |
| 106 | { |
| 107 | struct nd_region *nd_region = to_nd_region(dev); |
| 108 | u16 i; |
| 109 | |
| 110 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 111 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 112 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 113 | |
| 114 | put_device(&nvdimm->dev); |
| 115 | } |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 116 | free_percpu(nd_region->lane); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 117 | ida_simple_remove(®ion_ida, nd_region->id); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 118 | if (is_nd_blk(dev)) |
| 119 | kfree(to_nd_blk_region(dev)); |
| 120 | else |
| 121 | kfree(nd_region); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | static struct device_type nd_blk_device_type = { |
| 125 | .name = "nd_blk", |
| 126 | .release = nd_region_release, |
| 127 | }; |
| 128 | |
| 129 | static struct device_type nd_pmem_device_type = { |
| 130 | .name = "nd_pmem", |
| 131 | .release = nd_region_release, |
| 132 | }; |
| 133 | |
| 134 | static struct device_type nd_volatile_device_type = { |
| 135 | .name = "nd_volatile", |
| 136 | .release = nd_region_release, |
| 137 | }; |
| 138 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 139 | bool is_nd_pmem(struct device *dev) |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 140 | { |
| 141 | return dev ? dev->type == &nd_pmem_device_type : false; |
| 142 | } |
| 143 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 144 | bool is_nd_blk(struct device *dev) |
| 145 | { |
| 146 | return dev ? dev->type == &nd_blk_device_type : false; |
| 147 | } |
| 148 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 149 | struct nd_region *to_nd_region(struct device *dev) |
| 150 | { |
| 151 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); |
| 152 | |
| 153 | WARN_ON(dev->type->release != nd_region_release); |
| 154 | return nd_region; |
| 155 | } |
| 156 | EXPORT_SYMBOL_GPL(to_nd_region); |
| 157 | |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 158 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
| 159 | { |
| 160 | struct nd_region *nd_region = to_nd_region(dev); |
| 161 | |
| 162 | WARN_ON(!is_nd_blk(dev)); |
| 163 | return container_of(nd_region, struct nd_blk_region, nd_region); |
| 164 | } |
| 165 | EXPORT_SYMBOL_GPL(to_nd_blk_region); |
| 166 | |
| 167 | void *nd_region_provider_data(struct nd_region *nd_region) |
| 168 | { |
| 169 | return nd_region->provider_data; |
| 170 | } |
| 171 | EXPORT_SYMBOL_GPL(nd_region_provider_data); |
| 172 | |
| 173 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) |
| 174 | { |
| 175 | return ndbr->blk_provider_data; |
| 176 | } |
| 177 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); |
| 178 | |
| 179 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) |
| 180 | { |
| 181 | ndbr->blk_provider_data = data; |
| 182 | } |
| 183 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); |
| 184 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 185 | /** |
| 186 | * nd_region_to_nstype() - region to an integer namespace type |
| 187 | * @nd_region: region-device to interrogate |
| 188 | * |
| 189 | * This is the 'nstype' attribute of a region as well, an input to the |
| 190 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match |
| 191 | * namespace devices with namespace drivers. |
| 192 | */ |
| 193 | int nd_region_to_nstype(struct nd_region *nd_region) |
| 194 | { |
| 195 | if (is_nd_pmem(&nd_region->dev)) { |
| 196 | u16 i, alias; |
| 197 | |
| 198 | for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { |
| 199 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 200 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 201 | |
| 202 | if (nvdimm->flags & NDD_ALIASING) |
| 203 | alias++; |
| 204 | } |
| 205 | if (alias) |
| 206 | return ND_DEVICE_NAMESPACE_PMEM; |
| 207 | else |
| 208 | return ND_DEVICE_NAMESPACE_IO; |
| 209 | } else if (is_nd_blk(&nd_region->dev)) { |
| 210 | return ND_DEVICE_NAMESPACE_BLK; |
| 211 | } |
| 212 | |
| 213 | return 0; |
| 214 | } |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 215 | EXPORT_SYMBOL(nd_region_to_nstype); |
| 216 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 217 | static ssize_t size_show(struct device *dev, |
| 218 | struct device_attribute *attr, char *buf) |
| 219 | { |
| 220 | struct nd_region *nd_region = to_nd_region(dev); |
| 221 | unsigned long long size = 0; |
| 222 | |
| 223 | if (is_nd_pmem(dev)) { |
| 224 | size = nd_region->ndr_size; |
| 225 | } else if (nd_region->ndr_mappings == 1) { |
| 226 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 227 | |
| 228 | size = nd_mapping->size; |
| 229 | } |
| 230 | |
| 231 | return sprintf(buf, "%llu\n", size); |
| 232 | } |
| 233 | static DEVICE_ATTR_RO(size); |
| 234 | |
| 235 | static ssize_t mappings_show(struct device *dev, |
| 236 | struct device_attribute *attr, char *buf) |
| 237 | { |
| 238 | struct nd_region *nd_region = to_nd_region(dev); |
| 239 | |
| 240 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); |
| 241 | } |
| 242 | static DEVICE_ATTR_RO(mappings); |
| 243 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 244 | static ssize_t nstype_show(struct device *dev, |
| 245 | struct device_attribute *attr, char *buf) |
| 246 | { |
| 247 | struct nd_region *nd_region = to_nd_region(dev); |
| 248 | |
| 249 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); |
| 250 | } |
| 251 | static DEVICE_ATTR_RO(nstype); |
| 252 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 253 | static ssize_t set_cookie_show(struct device *dev, |
| 254 | struct device_attribute *attr, char *buf) |
| 255 | { |
| 256 | struct nd_region *nd_region = to_nd_region(dev); |
| 257 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 258 | |
| 259 | if (is_nd_pmem(dev) && nd_set) |
| 260 | /* pass, should be precluded by region_visible */; |
| 261 | else |
| 262 | return -ENXIO; |
| 263 | |
| 264 | return sprintf(buf, "%#llx\n", nd_set->cookie); |
| 265 | } |
| 266 | static DEVICE_ATTR_RO(set_cookie); |
| 267 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 268 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
| 269 | { |
| 270 | resource_size_t blk_max_overlap = 0, available, overlap; |
| 271 | int i; |
| 272 | |
| 273 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
| 274 | |
| 275 | retry: |
| 276 | available = 0; |
| 277 | overlap = blk_max_overlap; |
| 278 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 279 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 280 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 281 | |
| 282 | /* if a dimm is disabled the available capacity is zero */ |
| 283 | if (!ndd) |
| 284 | return 0; |
| 285 | |
| 286 | if (is_nd_pmem(&nd_region->dev)) { |
| 287 | available += nd_pmem_available_dpa(nd_region, |
| 288 | nd_mapping, &overlap); |
| 289 | if (overlap > blk_max_overlap) { |
| 290 | blk_max_overlap = overlap; |
| 291 | goto retry; |
| 292 | } |
| 293 | } else if (is_nd_blk(&nd_region->dev)) { |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 294 | available += nd_blk_available_dpa(nd_mapping); |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 295 | } |
| 296 | } |
| 297 | |
| 298 | return available; |
| 299 | } |
| 300 | |
| 301 | static ssize_t available_size_show(struct device *dev, |
| 302 | struct device_attribute *attr, char *buf) |
| 303 | { |
| 304 | struct nd_region *nd_region = to_nd_region(dev); |
| 305 | unsigned long long available = 0; |
| 306 | |
| 307 | /* |
| 308 | * Flush in-flight updates and grab a snapshot of the available |
| 309 | * size. Of course, this value is potentially invalidated the |
| 310 | * memory nvdimm_bus_lock() is dropped, but that's userspace's |
| 311 | * problem to not race itself. |
| 312 | */ |
| 313 | nvdimm_bus_lock(dev); |
| 314 | wait_nvdimm_bus_probe_idle(dev); |
| 315 | available = nd_region_available_dpa(nd_region); |
| 316 | nvdimm_bus_unlock(dev); |
| 317 | |
| 318 | return sprintf(buf, "%llu\n", available); |
| 319 | } |
| 320 | static DEVICE_ATTR_RO(available_size); |
| 321 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 322 | static ssize_t init_namespaces_show(struct device *dev, |
| 323 | struct device_attribute *attr, char *buf) |
| 324 | { |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 325 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 326 | ssize_t rc; |
| 327 | |
| 328 | nvdimm_bus_lock(dev); |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 329 | if (ndrd) |
| 330 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 331 | else |
| 332 | rc = -ENXIO; |
| 333 | nvdimm_bus_unlock(dev); |
| 334 | |
| 335 | return rc; |
| 336 | } |
| 337 | static DEVICE_ATTR_RO(init_namespaces); |
| 338 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 339 | static ssize_t namespace_seed_show(struct device *dev, |
| 340 | struct device_attribute *attr, char *buf) |
| 341 | { |
| 342 | struct nd_region *nd_region = to_nd_region(dev); |
| 343 | ssize_t rc; |
| 344 | |
| 345 | nvdimm_bus_lock(dev); |
| 346 | if (nd_region->ns_seed) |
| 347 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); |
| 348 | else |
| 349 | rc = sprintf(buf, "\n"); |
| 350 | nvdimm_bus_unlock(dev); |
| 351 | return rc; |
| 352 | } |
| 353 | static DEVICE_ATTR_RO(namespace_seed); |
| 354 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 355 | static ssize_t btt_seed_show(struct device *dev, |
| 356 | struct device_attribute *attr, char *buf) |
| 357 | { |
| 358 | struct nd_region *nd_region = to_nd_region(dev); |
| 359 | ssize_t rc; |
| 360 | |
| 361 | nvdimm_bus_lock(dev); |
| 362 | if (nd_region->btt_seed) |
| 363 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); |
| 364 | else |
| 365 | rc = sprintf(buf, "\n"); |
| 366 | nvdimm_bus_unlock(dev); |
| 367 | |
| 368 | return rc; |
| 369 | } |
| 370 | static DEVICE_ATTR_RO(btt_seed); |
| 371 | |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 372 | static ssize_t pfn_seed_show(struct device *dev, |
| 373 | struct device_attribute *attr, char *buf) |
| 374 | { |
| 375 | struct nd_region *nd_region = to_nd_region(dev); |
| 376 | ssize_t rc; |
| 377 | |
| 378 | nvdimm_bus_lock(dev); |
| 379 | if (nd_region->pfn_seed) |
| 380 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); |
| 381 | else |
| 382 | rc = sprintf(buf, "\n"); |
| 383 | nvdimm_bus_unlock(dev); |
| 384 | |
| 385 | return rc; |
| 386 | } |
| 387 | static DEVICE_ATTR_RO(pfn_seed); |
| 388 | |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 389 | static ssize_t dax_seed_show(struct device *dev, |
| 390 | struct device_attribute *attr, char *buf) |
| 391 | { |
| 392 | struct nd_region *nd_region = to_nd_region(dev); |
| 393 | ssize_t rc; |
| 394 | |
| 395 | nvdimm_bus_lock(dev); |
| 396 | if (nd_region->dax_seed) |
| 397 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); |
| 398 | else |
| 399 | rc = sprintf(buf, "\n"); |
| 400 | nvdimm_bus_unlock(dev); |
| 401 | |
| 402 | return rc; |
| 403 | } |
| 404 | static DEVICE_ATTR_RO(dax_seed); |
| 405 | |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 406 | static ssize_t read_only_show(struct device *dev, |
| 407 | struct device_attribute *attr, char *buf) |
| 408 | { |
| 409 | struct nd_region *nd_region = to_nd_region(dev); |
| 410 | |
| 411 | return sprintf(buf, "%d\n", nd_region->ro); |
| 412 | } |
| 413 | |
| 414 | static ssize_t read_only_store(struct device *dev, |
| 415 | struct device_attribute *attr, const char *buf, size_t len) |
| 416 | { |
| 417 | bool ro; |
| 418 | int rc = strtobool(buf, &ro); |
| 419 | struct nd_region *nd_region = to_nd_region(dev); |
| 420 | |
| 421 | if (rc) |
| 422 | return rc; |
| 423 | |
| 424 | nd_region->ro = ro; |
| 425 | return len; |
| 426 | } |
| 427 | static DEVICE_ATTR_RW(read_only); |
| 428 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 429 | static struct attribute *nd_region_attributes[] = { |
| 430 | &dev_attr_size.attr, |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 431 | &dev_attr_nstype.attr, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 432 | &dev_attr_mappings.attr, |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 433 | &dev_attr_btt_seed.attr, |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 434 | &dev_attr_pfn_seed.attr, |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 435 | &dev_attr_dax_seed.attr, |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 436 | &dev_attr_read_only.attr, |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 437 | &dev_attr_set_cookie.attr, |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 438 | &dev_attr_available_size.attr, |
| 439 | &dev_attr_namespace_seed.attr, |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 440 | &dev_attr_init_namespaces.attr, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 441 | NULL, |
| 442 | }; |
| 443 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 444 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
| 445 | { |
| 446 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
| 447 | struct nd_region *nd_region = to_nd_region(dev); |
| 448 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 449 | int type = nd_region_to_nstype(nd_region); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 450 | |
Dmitry Krivenok | 6bb691a | 2015-12-02 09:39:29 +0300 | [diff] [blame] | 451 | if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) |
| 452 | return 0; |
| 453 | |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 454 | if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) |
| 455 | return 0; |
| 456 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 457 | if (a != &dev_attr_set_cookie.attr |
| 458 | && a != &dev_attr_available_size.attr) |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 459 | return a->mode; |
| 460 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 461 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
| 462 | || type == ND_DEVICE_NAMESPACE_BLK) |
| 463 | && a == &dev_attr_available_size.attr) |
| 464 | return a->mode; |
| 465 | else if (is_nd_pmem(dev) && nd_set) |
| 466 | return a->mode; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 467 | |
| 468 | return 0; |
| 469 | } |
| 470 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 471 | struct attribute_group nd_region_attribute_group = { |
| 472 | .attrs = nd_region_attributes, |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 473 | .is_visible = region_visible, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 474 | }; |
| 475 | EXPORT_SYMBOL_GPL(nd_region_attribute_group); |
| 476 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 477 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) |
| 478 | { |
| 479 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 480 | |
| 481 | if (nd_set) |
| 482 | return nd_set->cookie; |
| 483 | return 0; |
| 484 | } |
| 485 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 486 | /* |
| 487 | * Upon successful probe/remove, take/release a reference on the |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 488 | * associated interleave set (if present), and plant new btt + namespace |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 489 | * seeds. Also, on the removal of a BLK region, notify the provider to |
| 490 | * disable the region. |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 491 | */ |
| 492 | static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, |
| 493 | struct device *dev, bool probe) |
| 494 | { |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 495 | struct nd_region *nd_region; |
| 496 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 497 | if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 498 | int i; |
| 499 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 500 | nd_region = to_nd_region(dev); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 501 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 502 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 503 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 504 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 505 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame] | 506 | kfree(nd_mapping->labels); |
| 507 | nd_mapping->labels = NULL; |
| 508 | put_ndd(ndd); |
| 509 | nd_mapping->ndd = NULL; |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 510 | if (ndd) |
| 511 | atomic_dec(&nvdimm->busy); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 512 | } |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 513 | |
| 514 | if (is_nd_pmem(dev)) |
| 515 | return; |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 516 | } |
| 517 | if (dev->parent && is_nd_blk(dev->parent) && probe) { |
| 518 | nd_region = to_nd_region(dev->parent); |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 519 | nvdimm_bus_lock(dev); |
| 520 | if (nd_region->ns_seed == dev) |
| 521 | nd_region_create_blk_seed(nd_region); |
| 522 | nvdimm_bus_unlock(dev); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 523 | } |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 524 | if (is_nd_btt(dev) && probe) { |
Dan Williams | 8ca2435 | 2015-07-24 23:42:34 -0400 | [diff] [blame] | 525 | struct nd_btt *nd_btt = to_nd_btt(dev); |
| 526 | |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 527 | nd_region = to_nd_region(dev->parent); |
| 528 | nvdimm_bus_lock(dev); |
| 529 | if (nd_region->btt_seed == dev) |
| 530 | nd_region_create_btt_seed(nd_region); |
Dan Williams | 8ca2435 | 2015-07-24 23:42:34 -0400 | [diff] [blame] | 531 | if (nd_region->ns_seed == &nd_btt->ndns->dev && |
| 532 | is_nd_blk(dev->parent)) |
| 533 | nd_region_create_blk_seed(nd_region); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 534 | nvdimm_bus_unlock(dev); |
| 535 | } |
Dan Williams | 2dc4333 | 2015-12-13 11:41:36 -0800 | [diff] [blame] | 536 | if (is_nd_pfn(dev) && probe) { |
| 537 | nd_region = to_nd_region(dev->parent); |
| 538 | nvdimm_bus_lock(dev); |
| 539 | if (nd_region->pfn_seed == dev) |
| 540 | nd_region_create_pfn_seed(nd_region); |
| 541 | nvdimm_bus_unlock(dev); |
| 542 | } |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 543 | if (is_nd_dax(dev) && probe) { |
| 544 | nd_region = to_nd_region(dev->parent); |
| 545 | nvdimm_bus_lock(dev); |
| 546 | if (nd_region->dax_seed == dev) |
| 547 | nd_region_create_dax_seed(nd_region); |
| 548 | nvdimm_bus_unlock(dev); |
| 549 | } |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 550 | } |
| 551 | |
| 552 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) |
| 553 | { |
| 554 | nd_region_notify_driver_action(nvdimm_bus, dev, true); |
| 555 | } |
| 556 | |
| 557 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) |
| 558 | { |
| 559 | nd_region_notify_driver_action(nvdimm_bus, dev, false); |
| 560 | } |
| 561 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 562 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
| 563 | { |
| 564 | struct nd_region *nd_region = to_nd_region(dev); |
| 565 | struct nd_mapping *nd_mapping; |
| 566 | struct nvdimm *nvdimm; |
| 567 | |
| 568 | if (n >= nd_region->ndr_mappings) |
| 569 | return -ENXIO; |
| 570 | nd_mapping = &nd_region->mapping[n]; |
| 571 | nvdimm = nd_mapping->nvdimm; |
| 572 | |
| 573 | return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), |
| 574 | nd_mapping->start, nd_mapping->size); |
| 575 | } |
| 576 | |
| 577 | #define REGION_MAPPING(idx) \ |
| 578 | static ssize_t mapping##idx##_show(struct device *dev, \ |
| 579 | struct device_attribute *attr, char *buf) \ |
| 580 | { \ |
| 581 | return mappingN(dev, buf, idx); \ |
| 582 | } \ |
| 583 | static DEVICE_ATTR_RO(mapping##idx) |
| 584 | |
| 585 | /* |
| 586 | * 32 should be enough for a while, even in the presence of socket |
| 587 | * interleave a 32-way interleave set is a degenerate case. |
| 588 | */ |
| 589 | REGION_MAPPING(0); |
| 590 | REGION_MAPPING(1); |
| 591 | REGION_MAPPING(2); |
| 592 | REGION_MAPPING(3); |
| 593 | REGION_MAPPING(4); |
| 594 | REGION_MAPPING(5); |
| 595 | REGION_MAPPING(6); |
| 596 | REGION_MAPPING(7); |
| 597 | REGION_MAPPING(8); |
| 598 | REGION_MAPPING(9); |
| 599 | REGION_MAPPING(10); |
| 600 | REGION_MAPPING(11); |
| 601 | REGION_MAPPING(12); |
| 602 | REGION_MAPPING(13); |
| 603 | REGION_MAPPING(14); |
| 604 | REGION_MAPPING(15); |
| 605 | REGION_MAPPING(16); |
| 606 | REGION_MAPPING(17); |
| 607 | REGION_MAPPING(18); |
| 608 | REGION_MAPPING(19); |
| 609 | REGION_MAPPING(20); |
| 610 | REGION_MAPPING(21); |
| 611 | REGION_MAPPING(22); |
| 612 | REGION_MAPPING(23); |
| 613 | REGION_MAPPING(24); |
| 614 | REGION_MAPPING(25); |
| 615 | REGION_MAPPING(26); |
| 616 | REGION_MAPPING(27); |
| 617 | REGION_MAPPING(28); |
| 618 | REGION_MAPPING(29); |
| 619 | REGION_MAPPING(30); |
| 620 | REGION_MAPPING(31); |
| 621 | |
| 622 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) |
| 623 | { |
| 624 | struct device *dev = container_of(kobj, struct device, kobj); |
| 625 | struct nd_region *nd_region = to_nd_region(dev); |
| 626 | |
| 627 | if (n < nd_region->ndr_mappings) |
| 628 | return a->mode; |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | static struct attribute *mapping_attributes[] = { |
| 633 | &dev_attr_mapping0.attr, |
| 634 | &dev_attr_mapping1.attr, |
| 635 | &dev_attr_mapping2.attr, |
| 636 | &dev_attr_mapping3.attr, |
| 637 | &dev_attr_mapping4.attr, |
| 638 | &dev_attr_mapping5.attr, |
| 639 | &dev_attr_mapping6.attr, |
| 640 | &dev_attr_mapping7.attr, |
| 641 | &dev_attr_mapping8.attr, |
| 642 | &dev_attr_mapping9.attr, |
| 643 | &dev_attr_mapping10.attr, |
| 644 | &dev_attr_mapping11.attr, |
| 645 | &dev_attr_mapping12.attr, |
| 646 | &dev_attr_mapping13.attr, |
| 647 | &dev_attr_mapping14.attr, |
| 648 | &dev_attr_mapping15.attr, |
| 649 | &dev_attr_mapping16.attr, |
| 650 | &dev_attr_mapping17.attr, |
| 651 | &dev_attr_mapping18.attr, |
| 652 | &dev_attr_mapping19.attr, |
| 653 | &dev_attr_mapping20.attr, |
| 654 | &dev_attr_mapping21.attr, |
| 655 | &dev_attr_mapping22.attr, |
| 656 | &dev_attr_mapping23.attr, |
| 657 | &dev_attr_mapping24.attr, |
| 658 | &dev_attr_mapping25.attr, |
| 659 | &dev_attr_mapping26.attr, |
| 660 | &dev_attr_mapping27.attr, |
| 661 | &dev_attr_mapping28.attr, |
| 662 | &dev_attr_mapping29.attr, |
| 663 | &dev_attr_mapping30.attr, |
| 664 | &dev_attr_mapping31.attr, |
| 665 | NULL, |
| 666 | }; |
| 667 | |
| 668 | struct attribute_group nd_mapping_attribute_group = { |
| 669 | .is_visible = mapping_visible, |
| 670 | .attrs = mapping_attributes, |
| 671 | }; |
| 672 | EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); |
| 673 | |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 674 | int nd_blk_region_init(struct nd_region *nd_region) |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 675 | { |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 676 | struct device *dev = &nd_region->dev; |
| 677 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 678 | |
| 679 | if (!is_nd_blk(dev)) |
| 680 | return 0; |
| 681 | |
| 682 | if (nd_region->ndr_mappings < 1) { |
| 683 | dev_err(dev, "invalid BLK region\n"); |
| 684 | return -ENXIO; |
| 685 | } |
| 686 | |
| 687 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 688 | } |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 689 | |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 690 | /** |
| 691 | * nd_region_acquire_lane - allocate and lock a lane |
| 692 | * @nd_region: region id and number of lanes possible |
| 693 | * |
| 694 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. |
| 695 | * We optimize for the common case where there are 256 lanes, one |
| 696 | * per-cpu. For larger systems we need to lock to share lanes. For now |
| 697 | * this implementation assumes the cost of maintaining an allocator for |
| 698 | * free lanes is on the order of the lock hold time, so it implements a |
| 699 | * static lane = cpu % num_lanes mapping. |
| 700 | * |
| 701 | * In the case of a BTT instance on top of a BLK namespace a lane may be |
| 702 | * acquired recursively. We lock on the first instance. |
| 703 | * |
| 704 | * In the case of a BTT instance on top of PMEM, we only acquire a lane |
| 705 | * for the BTT metadata updates. |
| 706 | */ |
| 707 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) |
| 708 | { |
| 709 | unsigned int cpu, lane; |
| 710 | |
| 711 | cpu = get_cpu(); |
| 712 | if (nd_region->num_lanes < nr_cpu_ids) { |
| 713 | struct nd_percpu_lane *ndl_lock, *ndl_count; |
| 714 | |
| 715 | lane = cpu % nd_region->num_lanes; |
| 716 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); |
| 717 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); |
| 718 | if (ndl_count->count++ == 0) |
| 719 | spin_lock(&ndl_lock->lock); |
| 720 | } else |
| 721 | lane = cpu; |
| 722 | |
| 723 | return lane; |
| 724 | } |
| 725 | EXPORT_SYMBOL(nd_region_acquire_lane); |
| 726 | |
| 727 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) |
| 728 | { |
| 729 | if (nd_region->num_lanes < nr_cpu_ids) { |
| 730 | unsigned int cpu = get_cpu(); |
| 731 | struct nd_percpu_lane *ndl_lock, *ndl_count; |
| 732 | |
| 733 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); |
| 734 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); |
| 735 | if (--ndl_count->count == 0) |
| 736 | spin_unlock(&ndl_lock->lock); |
| 737 | put_cpu(); |
| 738 | } |
| 739 | put_cpu(); |
| 740 | } |
| 741 | EXPORT_SYMBOL(nd_region_release_lane); |
| 742 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 743 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
| 744 | struct nd_region_desc *ndr_desc, struct device_type *dev_type, |
| 745 | const char *caller) |
| 746 | { |
| 747 | struct nd_region *nd_region; |
| 748 | struct device *dev; |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 749 | void *region_buf; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 750 | unsigned int i; |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 751 | int ro = 0; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 752 | |
| 753 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 754 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; |
| 755 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 756 | |
| 757 | if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { |
| 758 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", |
| 759 | caller, dev_name(&nvdimm->dev), i); |
| 760 | |
| 761 | return NULL; |
| 762 | } |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 763 | |
| 764 | if (nvdimm->flags & NDD_UNARMED) |
| 765 | ro = 1; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 766 | } |
| 767 | |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 768 | if (dev_type == &nd_blk_device_type) { |
| 769 | struct nd_blk_region_desc *ndbr_desc; |
| 770 | struct nd_blk_region *ndbr; |
| 771 | |
| 772 | ndbr_desc = to_blk_region_desc(ndr_desc); |
| 773 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) |
| 774 | * ndr_desc->num_mappings, |
| 775 | GFP_KERNEL); |
| 776 | if (ndbr) { |
| 777 | nd_region = &ndbr->nd_region; |
| 778 | ndbr->enable = ndbr_desc->enable; |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 779 | ndbr->do_io = ndbr_desc->do_io; |
| 780 | } |
| 781 | region_buf = ndbr; |
| 782 | } else { |
| 783 | nd_region = kzalloc(sizeof(struct nd_region) |
| 784 | + sizeof(struct nd_mapping) |
| 785 | * ndr_desc->num_mappings, |
| 786 | GFP_KERNEL); |
| 787 | region_buf = nd_region; |
| 788 | } |
| 789 | |
| 790 | if (!region_buf) |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 791 | return NULL; |
| 792 | nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 793 | if (nd_region->id < 0) |
| 794 | goto err_id; |
| 795 | |
| 796 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); |
| 797 | if (!nd_region->lane) |
| 798 | goto err_percpu; |
| 799 | |
| 800 | for (i = 0; i < nr_cpu_ids; i++) { |
| 801 | struct nd_percpu_lane *ndl; |
| 802 | |
| 803 | ndl = per_cpu_ptr(nd_region->lane, i); |
| 804 | spin_lock_init(&ndl->lock); |
| 805 | ndl->count = 0; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 806 | } |
| 807 | |
| 808 | memcpy(nd_region->mapping, ndr_desc->nd_mapping, |
| 809 | sizeof(struct nd_mapping) * ndr_desc->num_mappings); |
| 810 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 811 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; |
| 812 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 813 | |
| 814 | get_device(&nvdimm->dev); |
| 815 | } |
| 816 | nd_region->ndr_mappings = ndr_desc->num_mappings; |
| 817 | nd_region->provider_data = ndr_desc->provider_data; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 818 | nd_region->nd_set = ndr_desc->nd_set; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 819 | nd_region->num_lanes = ndr_desc->num_lanes; |
Dan Williams | 004f1af | 2015-08-24 19:20:23 -0400 | [diff] [blame] | 820 | nd_region->flags = ndr_desc->flags; |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 821 | nd_region->ro = ro; |
Toshi Kani | 41d7a6d | 2015-06-19 12:18:33 -0600 | [diff] [blame] | 822 | nd_region->numa_node = ndr_desc->numa_node; |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 823 | ida_init(&nd_region->ns_ida); |
Dan Williams | 8c2f7e8 | 2015-06-25 04:20:04 -0400 | [diff] [blame] | 824 | ida_init(&nd_region->btt_ida); |
Dan Williams | e145574 | 2015-07-30 17:57:47 -0400 | [diff] [blame] | 825 | ida_init(&nd_region->pfn_ida); |
Dan Williams | cd03412 | 2016-03-11 10:15:36 -0800 | [diff] [blame] | 826 | ida_init(&nd_region->dax_ida); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 827 | dev = &nd_region->dev; |
| 828 | dev_set_name(dev, "region%d", nd_region->id); |
| 829 | dev->parent = &nvdimm_bus->dev; |
| 830 | dev->type = dev_type; |
| 831 | dev->groups = ndr_desc->attr_groups; |
| 832 | nd_region->ndr_size = resource_size(ndr_desc->res); |
| 833 | nd_region->ndr_start = ndr_desc->res->start; |
| 834 | nd_device_register(dev); |
| 835 | |
| 836 | return nd_region; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 837 | |
| 838 | err_percpu: |
| 839 | ida_simple_remove(®ion_ida, nd_region->id); |
| 840 | err_id: |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 841 | kfree(region_buf); |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 842 | return NULL; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 843 | } |
| 844 | |
| 845 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, |
| 846 | struct nd_region_desc *ndr_desc) |
| 847 | { |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 848 | ndr_desc->num_lanes = ND_MAX_LANES; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 849 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
| 850 | __func__); |
| 851 | } |
| 852 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); |
| 853 | |
| 854 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, |
| 855 | struct nd_region_desc *ndr_desc) |
| 856 | { |
| 857 | if (ndr_desc->num_mappings > 1) |
| 858 | return NULL; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 859 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 860 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
| 861 | __func__); |
| 862 | } |
| 863 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); |
| 864 | |
| 865 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, |
| 866 | struct nd_region_desc *ndr_desc) |
| 867 | { |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 868 | ndr_desc->num_lanes = ND_MAX_LANES; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 869 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
| 870 | __func__); |
| 871 | } |
| 872 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); |
Dan Williams | b354aba | 2016-05-17 20:24:16 -0700 | [diff] [blame] | 873 | |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame^] | 874 | /** |
| 875 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media |
| 876 | * @nd_region: blk or interleaved pmem region |
| 877 | */ |
| 878 | void nvdimm_flush(struct nd_region *nd_region) |
| 879 | { |
| 880 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); |
| 881 | int i; |
| 882 | |
| 883 | /* |
| 884 | * The first wmb() is needed to 'sfence' all previous writes |
| 885 | * such that they are architecturally visible for the platform |
| 886 | * buffer flush. Note that we've already arranged for pmem |
| 887 | * writes to avoid the cache via arch_memcpy_to_pmem(). The |
| 888 | * final wmb() ensures ordering for the NVDIMM flush write. |
| 889 | */ |
| 890 | wmb(); |
| 891 | for (i = 0; i < nd_region->ndr_mappings; i++) |
| 892 | if (ndrd->flush_wpq[i][0]) |
| 893 | writeq(1, ndrd->flush_wpq[i][0]); |
| 894 | wmb(); |
| 895 | } |
| 896 | EXPORT_SYMBOL_GPL(nvdimm_flush); |
| 897 | |
| 898 | /** |
| 899 | * nvdimm_has_flush - determine write flushing requirements |
| 900 | * @nd_region: blk or interleaved pmem region |
| 901 | * |
| 902 | * Returns 1 if writes require flushing |
| 903 | * Returns 0 if writes do not require flushing |
| 904 | * Returns -ENXIO if flushing capability can not be determined |
| 905 | */ |
| 906 | int nvdimm_has_flush(struct nd_region *nd_region) |
| 907 | { |
| 908 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); |
| 909 | int i; |
| 910 | |
| 911 | /* no nvdimm == flushing capability unknown */ |
| 912 | if (nd_region->ndr_mappings == 0) |
| 913 | return -ENXIO; |
| 914 | |
| 915 | for (i = 0; i < nd_region->ndr_mappings; i++) |
| 916 | /* flush hints present, flushing required */ |
| 917 | if (ndrd->flush_wpq[i][0]) |
| 918 | return 1; |
| 919 | |
| 920 | /* |
| 921 | * The platform defines dimm devices without hints, assume |
| 922 | * platform persistence mechanism like ADR |
| 923 | */ |
| 924 | return 0; |
| 925 | } |
| 926 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); |
| 927 | |
Dan Williams | b354aba | 2016-05-17 20:24:16 -0700 | [diff] [blame] | 928 | void __exit nd_region_devs_exit(void) |
| 929 | { |
| 930 | ida_destroy(®ion_ida); |
| 931 | } |