Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 13 | #include <linux/scatterlist.h> |
| 14 | #include <linux/sched.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 15 | #include <linux/slab.h> |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 16 | #include <linux/sort.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 17 | #include <linux/io.h> |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 18 | #include <linux/nd.h> |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 19 | #include "nd-core.h" |
| 20 | #include "nd.h" |
| 21 | |
| 22 | static DEFINE_IDA(region_ida); |
| 23 | |
| 24 | static void nd_region_release(struct device *dev) |
| 25 | { |
| 26 | struct nd_region *nd_region = to_nd_region(dev); |
| 27 | u16 i; |
| 28 | |
| 29 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 30 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 31 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 32 | |
| 33 | put_device(&nvdimm->dev); |
| 34 | } |
| 35 | ida_simple_remove(®ion_ida, nd_region->id); |
| 36 | kfree(nd_region); |
| 37 | } |
| 38 | |
| 39 | static struct device_type nd_blk_device_type = { |
| 40 | .name = "nd_blk", |
| 41 | .release = nd_region_release, |
| 42 | }; |
| 43 | |
| 44 | static struct device_type nd_pmem_device_type = { |
| 45 | .name = "nd_pmem", |
| 46 | .release = nd_region_release, |
| 47 | }; |
| 48 | |
| 49 | static struct device_type nd_volatile_device_type = { |
| 50 | .name = "nd_volatile", |
| 51 | .release = nd_region_release, |
| 52 | }; |
| 53 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 54 | bool is_nd_pmem(struct device *dev) |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 55 | { |
| 56 | return dev ? dev->type == &nd_pmem_device_type : false; |
| 57 | } |
| 58 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 59 | bool is_nd_blk(struct device *dev) |
| 60 | { |
| 61 | return dev ? dev->type == &nd_blk_device_type : false; |
| 62 | } |
| 63 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 64 | struct nd_region *to_nd_region(struct device *dev) |
| 65 | { |
| 66 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); |
| 67 | |
| 68 | WARN_ON(dev->type->release != nd_region_release); |
| 69 | return nd_region; |
| 70 | } |
| 71 | EXPORT_SYMBOL_GPL(to_nd_region); |
| 72 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 73 | /** |
| 74 | * nd_region_to_nstype() - region to an integer namespace type |
| 75 | * @nd_region: region-device to interrogate |
| 76 | * |
| 77 | * This is the 'nstype' attribute of a region as well, an input to the |
| 78 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match |
| 79 | * namespace devices with namespace drivers. |
| 80 | */ |
| 81 | int nd_region_to_nstype(struct nd_region *nd_region) |
| 82 | { |
| 83 | if (is_nd_pmem(&nd_region->dev)) { |
| 84 | u16 i, alias; |
| 85 | |
| 86 | for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { |
| 87 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 88 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 89 | |
| 90 | if (nvdimm->flags & NDD_ALIASING) |
| 91 | alias++; |
| 92 | } |
| 93 | if (alias) |
| 94 | return ND_DEVICE_NAMESPACE_PMEM; |
| 95 | else |
| 96 | return ND_DEVICE_NAMESPACE_IO; |
| 97 | } else if (is_nd_blk(&nd_region->dev)) { |
| 98 | return ND_DEVICE_NAMESPACE_BLK; |
| 99 | } |
| 100 | |
| 101 | return 0; |
| 102 | } |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 103 | EXPORT_SYMBOL(nd_region_to_nstype); |
| 104 | |
| 105 | static int is_uuid_busy(struct device *dev, void *data) |
| 106 | { |
| 107 | struct nd_region *nd_region = to_nd_region(dev->parent); |
| 108 | u8 *uuid = data; |
| 109 | |
| 110 | switch (nd_region_to_nstype(nd_region)) { |
| 111 | case ND_DEVICE_NAMESPACE_PMEM: { |
| 112 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
| 113 | |
| 114 | if (!nspm->uuid) |
| 115 | break; |
| 116 | if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0) |
| 117 | return -EBUSY; |
| 118 | break; |
| 119 | } |
| 120 | case ND_DEVICE_NAMESPACE_BLK: { |
| 121 | /* TODO: blk namespace support */ |
| 122 | break; |
| 123 | } |
| 124 | default: |
| 125 | break; |
| 126 | } |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | static int is_namespace_uuid_busy(struct device *dev, void *data) |
| 132 | { |
| 133 | if (is_nd_pmem(dev) || is_nd_blk(dev)) |
| 134 | return device_for_each_child(dev, data, is_uuid_busy); |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | /** |
| 139 | * nd_is_uuid_unique - verify that no other namespace has @uuid |
| 140 | * @dev: any device on a nvdimm_bus |
| 141 | * @uuid: uuid to check |
| 142 | */ |
| 143 | bool nd_is_uuid_unique(struct device *dev, u8 *uuid) |
| 144 | { |
| 145 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 146 | |
| 147 | if (!nvdimm_bus) |
| 148 | return false; |
| 149 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev)); |
| 150 | if (device_for_each_child(&nvdimm_bus->dev, uuid, |
| 151 | is_namespace_uuid_busy) != 0) |
| 152 | return false; |
| 153 | return true; |
| 154 | } |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 155 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 156 | static ssize_t size_show(struct device *dev, |
| 157 | struct device_attribute *attr, char *buf) |
| 158 | { |
| 159 | struct nd_region *nd_region = to_nd_region(dev); |
| 160 | unsigned long long size = 0; |
| 161 | |
| 162 | if (is_nd_pmem(dev)) { |
| 163 | size = nd_region->ndr_size; |
| 164 | } else if (nd_region->ndr_mappings == 1) { |
| 165 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; |
| 166 | |
| 167 | size = nd_mapping->size; |
| 168 | } |
| 169 | |
| 170 | return sprintf(buf, "%llu\n", size); |
| 171 | } |
| 172 | static DEVICE_ATTR_RO(size); |
| 173 | |
| 174 | static ssize_t mappings_show(struct device *dev, |
| 175 | struct device_attribute *attr, char *buf) |
| 176 | { |
| 177 | struct nd_region *nd_region = to_nd_region(dev); |
| 178 | |
| 179 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); |
| 180 | } |
| 181 | static DEVICE_ATTR_RO(mappings); |
| 182 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 183 | static ssize_t nstype_show(struct device *dev, |
| 184 | struct device_attribute *attr, char *buf) |
| 185 | { |
| 186 | struct nd_region *nd_region = to_nd_region(dev); |
| 187 | |
| 188 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); |
| 189 | } |
| 190 | static DEVICE_ATTR_RO(nstype); |
| 191 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 192 | static ssize_t set_cookie_show(struct device *dev, |
| 193 | struct device_attribute *attr, char *buf) |
| 194 | { |
| 195 | struct nd_region *nd_region = to_nd_region(dev); |
| 196 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 197 | |
| 198 | if (is_nd_pmem(dev) && nd_set) |
| 199 | /* pass, should be precluded by region_visible */; |
| 200 | else |
| 201 | return -ENXIO; |
| 202 | |
| 203 | return sprintf(buf, "%#llx\n", nd_set->cookie); |
| 204 | } |
| 205 | static DEVICE_ATTR_RO(set_cookie); |
| 206 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 207 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
| 208 | { |
| 209 | resource_size_t blk_max_overlap = 0, available, overlap; |
| 210 | int i; |
| 211 | |
| 212 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
| 213 | |
| 214 | retry: |
| 215 | available = 0; |
| 216 | overlap = blk_max_overlap; |
| 217 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 218 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 219 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 220 | |
| 221 | /* if a dimm is disabled the available capacity is zero */ |
| 222 | if (!ndd) |
| 223 | return 0; |
| 224 | |
| 225 | if (is_nd_pmem(&nd_region->dev)) { |
| 226 | available += nd_pmem_available_dpa(nd_region, |
| 227 | nd_mapping, &overlap); |
| 228 | if (overlap > blk_max_overlap) { |
| 229 | blk_max_overlap = overlap; |
| 230 | goto retry; |
| 231 | } |
| 232 | } else if (is_nd_blk(&nd_region->dev)) { |
| 233 | /* TODO: BLK Namespace support */ |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | return available; |
| 238 | } |
| 239 | |
| 240 | static ssize_t available_size_show(struct device *dev, |
| 241 | struct device_attribute *attr, char *buf) |
| 242 | { |
| 243 | struct nd_region *nd_region = to_nd_region(dev); |
| 244 | unsigned long long available = 0; |
| 245 | |
| 246 | /* |
| 247 | * Flush in-flight updates and grab a snapshot of the available |
| 248 | * size. Of course, this value is potentially invalidated the |
| 249 | * memory nvdimm_bus_lock() is dropped, but that's userspace's |
| 250 | * problem to not race itself. |
| 251 | */ |
| 252 | nvdimm_bus_lock(dev); |
| 253 | wait_nvdimm_bus_probe_idle(dev); |
| 254 | available = nd_region_available_dpa(nd_region); |
| 255 | nvdimm_bus_unlock(dev); |
| 256 | |
| 257 | return sprintf(buf, "%llu\n", available); |
| 258 | } |
| 259 | static DEVICE_ATTR_RO(available_size); |
| 260 | |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 261 | static ssize_t init_namespaces_show(struct device *dev, |
| 262 | struct device_attribute *attr, char *buf) |
| 263 | { |
| 264 | struct nd_region_namespaces *num_ns = dev_get_drvdata(dev); |
| 265 | ssize_t rc; |
| 266 | |
| 267 | nvdimm_bus_lock(dev); |
| 268 | if (num_ns) |
| 269 | rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count); |
| 270 | else |
| 271 | rc = -ENXIO; |
| 272 | nvdimm_bus_unlock(dev); |
| 273 | |
| 274 | return rc; |
| 275 | } |
| 276 | static DEVICE_ATTR_RO(init_namespaces); |
| 277 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 278 | static ssize_t namespace_seed_show(struct device *dev, |
| 279 | struct device_attribute *attr, char *buf) |
| 280 | { |
| 281 | struct nd_region *nd_region = to_nd_region(dev); |
| 282 | ssize_t rc; |
| 283 | |
| 284 | nvdimm_bus_lock(dev); |
| 285 | if (nd_region->ns_seed) |
| 286 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); |
| 287 | else |
| 288 | rc = sprintf(buf, "\n"); |
| 289 | nvdimm_bus_unlock(dev); |
| 290 | return rc; |
| 291 | } |
| 292 | static DEVICE_ATTR_RO(namespace_seed); |
| 293 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 294 | static struct attribute *nd_region_attributes[] = { |
| 295 | &dev_attr_size.attr, |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 296 | &dev_attr_nstype.attr, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 297 | &dev_attr_mappings.attr, |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 298 | &dev_attr_set_cookie.attr, |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 299 | &dev_attr_available_size.attr, |
| 300 | &dev_attr_namespace_seed.attr, |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 301 | &dev_attr_init_namespaces.attr, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 302 | NULL, |
| 303 | }; |
| 304 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 305 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
| 306 | { |
| 307 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
| 308 | struct nd_region *nd_region = to_nd_region(dev); |
| 309 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 310 | int type = nd_region_to_nstype(nd_region); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 311 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 312 | if (a != &dev_attr_set_cookie.attr |
| 313 | && a != &dev_attr_available_size.attr) |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 314 | return a->mode; |
| 315 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 316 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
| 317 | || type == ND_DEVICE_NAMESPACE_BLK) |
| 318 | && a == &dev_attr_available_size.attr) |
| 319 | return a->mode; |
| 320 | else if (is_nd_pmem(dev) && nd_set) |
| 321 | return a->mode; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 322 | |
| 323 | return 0; |
| 324 | } |
| 325 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 326 | struct attribute_group nd_region_attribute_group = { |
| 327 | .attrs = nd_region_attributes, |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 328 | .is_visible = region_visible, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 329 | }; |
| 330 | EXPORT_SYMBOL_GPL(nd_region_attribute_group); |
| 331 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 332 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) |
| 333 | { |
| 334 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| 335 | |
| 336 | if (nd_set) |
| 337 | return nd_set->cookie; |
| 338 | return 0; |
| 339 | } |
| 340 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 341 | /* |
| 342 | * Upon successful probe/remove, take/release a reference on the |
| 343 | * associated interleave set (if present) |
| 344 | */ |
| 345 | static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, |
| 346 | struct device *dev, bool probe) |
| 347 | { |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 348 | if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 349 | struct nd_region *nd_region = to_nd_region(dev); |
| 350 | int i; |
| 351 | |
| 352 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 353 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 354 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 355 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 356 | |
Dan Williams | bf9bccc | 2015-06-17 17:14:46 -0400 | [diff] [blame^] | 357 | kfree(nd_mapping->labels); |
| 358 | nd_mapping->labels = NULL; |
| 359 | put_ndd(ndd); |
| 360 | nd_mapping->ndd = NULL; |
| 361 | atomic_dec(&nvdimm->busy); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) |
| 367 | { |
| 368 | nd_region_notify_driver_action(nvdimm_bus, dev, true); |
| 369 | } |
| 370 | |
| 371 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) |
| 372 | { |
| 373 | nd_region_notify_driver_action(nvdimm_bus, dev, false); |
| 374 | } |
| 375 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 376 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
| 377 | { |
| 378 | struct nd_region *nd_region = to_nd_region(dev); |
| 379 | struct nd_mapping *nd_mapping; |
| 380 | struct nvdimm *nvdimm; |
| 381 | |
| 382 | if (n >= nd_region->ndr_mappings) |
| 383 | return -ENXIO; |
| 384 | nd_mapping = &nd_region->mapping[n]; |
| 385 | nvdimm = nd_mapping->nvdimm; |
| 386 | |
| 387 | return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), |
| 388 | nd_mapping->start, nd_mapping->size); |
| 389 | } |
| 390 | |
| 391 | #define REGION_MAPPING(idx) \ |
| 392 | static ssize_t mapping##idx##_show(struct device *dev, \ |
| 393 | struct device_attribute *attr, char *buf) \ |
| 394 | { \ |
| 395 | return mappingN(dev, buf, idx); \ |
| 396 | } \ |
| 397 | static DEVICE_ATTR_RO(mapping##idx) |
| 398 | |
| 399 | /* |
| 400 | * 32 should be enough for a while, even in the presence of socket |
| 401 | * interleave a 32-way interleave set is a degenerate case. |
| 402 | */ |
| 403 | REGION_MAPPING(0); |
| 404 | REGION_MAPPING(1); |
| 405 | REGION_MAPPING(2); |
| 406 | REGION_MAPPING(3); |
| 407 | REGION_MAPPING(4); |
| 408 | REGION_MAPPING(5); |
| 409 | REGION_MAPPING(6); |
| 410 | REGION_MAPPING(7); |
| 411 | REGION_MAPPING(8); |
| 412 | REGION_MAPPING(9); |
| 413 | REGION_MAPPING(10); |
| 414 | REGION_MAPPING(11); |
| 415 | REGION_MAPPING(12); |
| 416 | REGION_MAPPING(13); |
| 417 | REGION_MAPPING(14); |
| 418 | REGION_MAPPING(15); |
| 419 | REGION_MAPPING(16); |
| 420 | REGION_MAPPING(17); |
| 421 | REGION_MAPPING(18); |
| 422 | REGION_MAPPING(19); |
| 423 | REGION_MAPPING(20); |
| 424 | REGION_MAPPING(21); |
| 425 | REGION_MAPPING(22); |
| 426 | REGION_MAPPING(23); |
| 427 | REGION_MAPPING(24); |
| 428 | REGION_MAPPING(25); |
| 429 | REGION_MAPPING(26); |
| 430 | REGION_MAPPING(27); |
| 431 | REGION_MAPPING(28); |
| 432 | REGION_MAPPING(29); |
| 433 | REGION_MAPPING(30); |
| 434 | REGION_MAPPING(31); |
| 435 | |
| 436 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) |
| 437 | { |
| 438 | struct device *dev = container_of(kobj, struct device, kobj); |
| 439 | struct nd_region *nd_region = to_nd_region(dev); |
| 440 | |
| 441 | if (n < nd_region->ndr_mappings) |
| 442 | return a->mode; |
| 443 | return 0; |
| 444 | } |
| 445 | |
| 446 | static struct attribute *mapping_attributes[] = { |
| 447 | &dev_attr_mapping0.attr, |
| 448 | &dev_attr_mapping1.attr, |
| 449 | &dev_attr_mapping2.attr, |
| 450 | &dev_attr_mapping3.attr, |
| 451 | &dev_attr_mapping4.attr, |
| 452 | &dev_attr_mapping5.attr, |
| 453 | &dev_attr_mapping6.attr, |
| 454 | &dev_attr_mapping7.attr, |
| 455 | &dev_attr_mapping8.attr, |
| 456 | &dev_attr_mapping9.attr, |
| 457 | &dev_attr_mapping10.attr, |
| 458 | &dev_attr_mapping11.attr, |
| 459 | &dev_attr_mapping12.attr, |
| 460 | &dev_attr_mapping13.attr, |
| 461 | &dev_attr_mapping14.attr, |
| 462 | &dev_attr_mapping15.attr, |
| 463 | &dev_attr_mapping16.attr, |
| 464 | &dev_attr_mapping17.attr, |
| 465 | &dev_attr_mapping18.attr, |
| 466 | &dev_attr_mapping19.attr, |
| 467 | &dev_attr_mapping20.attr, |
| 468 | &dev_attr_mapping21.attr, |
| 469 | &dev_attr_mapping22.attr, |
| 470 | &dev_attr_mapping23.attr, |
| 471 | &dev_attr_mapping24.attr, |
| 472 | &dev_attr_mapping25.attr, |
| 473 | &dev_attr_mapping26.attr, |
| 474 | &dev_attr_mapping27.attr, |
| 475 | &dev_attr_mapping28.attr, |
| 476 | &dev_attr_mapping29.attr, |
| 477 | &dev_attr_mapping30.attr, |
| 478 | &dev_attr_mapping31.attr, |
| 479 | NULL, |
| 480 | }; |
| 481 | |
| 482 | struct attribute_group nd_mapping_attribute_group = { |
| 483 | .is_visible = mapping_visible, |
| 484 | .attrs = mapping_attributes, |
| 485 | }; |
| 486 | EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); |
| 487 | |
| 488 | void *nd_region_provider_data(struct nd_region *nd_region) |
| 489 | { |
| 490 | return nd_region->provider_data; |
| 491 | } |
| 492 | EXPORT_SYMBOL_GPL(nd_region_provider_data); |
| 493 | |
| 494 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
| 495 | struct nd_region_desc *ndr_desc, struct device_type *dev_type, |
| 496 | const char *caller) |
| 497 | { |
| 498 | struct nd_region *nd_region; |
| 499 | struct device *dev; |
| 500 | u16 i; |
| 501 | |
| 502 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 503 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; |
| 504 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 505 | |
| 506 | if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { |
| 507 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", |
| 508 | caller, dev_name(&nvdimm->dev), i); |
| 509 | |
| 510 | return NULL; |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | nd_region = kzalloc(sizeof(struct nd_region) |
| 515 | + sizeof(struct nd_mapping) * ndr_desc->num_mappings, |
| 516 | GFP_KERNEL); |
| 517 | if (!nd_region) |
| 518 | return NULL; |
| 519 | nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); |
| 520 | if (nd_region->id < 0) { |
| 521 | kfree(nd_region); |
| 522 | return NULL; |
| 523 | } |
| 524 | |
| 525 | memcpy(nd_region->mapping, ndr_desc->nd_mapping, |
| 526 | sizeof(struct nd_mapping) * ndr_desc->num_mappings); |
| 527 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
| 528 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; |
| 529 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
| 530 | |
| 531 | get_device(&nvdimm->dev); |
| 532 | } |
| 533 | nd_region->ndr_mappings = ndr_desc->num_mappings; |
| 534 | nd_region->provider_data = ndr_desc->provider_data; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 535 | nd_region->nd_set = ndr_desc->nd_set; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 536 | dev = &nd_region->dev; |
| 537 | dev_set_name(dev, "region%d", nd_region->id); |
| 538 | dev->parent = &nvdimm_bus->dev; |
| 539 | dev->type = dev_type; |
| 540 | dev->groups = ndr_desc->attr_groups; |
| 541 | nd_region->ndr_size = resource_size(ndr_desc->res); |
| 542 | nd_region->ndr_start = ndr_desc->res->start; |
| 543 | nd_device_register(dev); |
| 544 | |
| 545 | return nd_region; |
| 546 | } |
| 547 | |
| 548 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, |
| 549 | struct nd_region_desc *ndr_desc) |
| 550 | { |
| 551 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
| 552 | __func__); |
| 553 | } |
| 554 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); |
| 555 | |
| 556 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, |
| 557 | struct nd_region_desc *ndr_desc) |
| 558 | { |
| 559 | if (ndr_desc->num_mappings > 1) |
| 560 | return NULL; |
| 561 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
| 562 | __func__); |
| 563 | } |
| 564 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); |
| 565 | |
| 566 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, |
| 567 | struct nd_region_desc *ndr_desc) |
| 568 | { |
| 569 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
| 570 | __func__); |
| 571 | } |
| 572 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); |