blob: f0536c2789e943be760dc8f25ba56bdd6e559c5c [file] [log] [blame]
Dan Williams3d880022015-05-31 15:02:11 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
Dan Williams6ff3e912016-10-05 14:04:15 -070015#include <linux/sort.h>
Dan Williams3d880022015-05-31 15:02:11 -040016#include <linux/slab.h>
Dan Williams004f1af2015-08-24 19:20:23 -040017#include <linux/pmem.h>
Dan Williamsae8219f2016-09-19 16:04:21 -070018#include <linux/list.h>
Dan Williams3d880022015-05-31 15:02:11 -040019#include <linux/nd.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040020#include "nd-core.h"
Dan Williams3d880022015-05-31 15:02:11 -040021#include "nd.h"
22
23static void namespace_io_release(struct device *dev)
24{
25 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
26
27 kfree(nsio);
28}
29
Dan Williamsbf9bccc2015-06-17 17:14:46 -040030static void namespace_pmem_release(struct device *dev)
31{
32 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
Dan Williams0e3b0d12016-10-06 23:13:15 -070033 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -040034
Dan Williams0e3b0d12016-10-06 23:13:15 -070035 if (nspm->id >= 0)
36 ida_simple_remove(&nd_region->ns_ida, nspm->id);
Dan Williamsbf9bccc2015-06-17 17:14:46 -040037 kfree(nspm->alt_name);
38 kfree(nspm->uuid);
39 kfree(nspm);
40}
41
42static void namespace_blk_release(struct device *dev)
43{
Dan Williams1b40e092015-05-01 13:34:01 -040044 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
45 struct nd_region *nd_region = to_nd_region(dev->parent);
46
47 if (nsblk->id >= 0)
48 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
49 kfree(nsblk->alt_name);
50 kfree(nsblk->uuid);
51 kfree(nsblk->res);
52 kfree(nsblk);
Dan Williamsbf9bccc2015-06-17 17:14:46 -040053}
54
Dan Williams3d880022015-05-31 15:02:11 -040055static struct device_type namespace_io_device_type = {
56 .name = "nd_namespace_io",
57 .release = namespace_io_release,
58};
59
Dan Williamsbf9bccc2015-06-17 17:14:46 -040060static struct device_type namespace_pmem_device_type = {
61 .name = "nd_namespace_pmem",
62 .release = namespace_pmem_release,
63};
64
65static struct device_type namespace_blk_device_type = {
66 .name = "nd_namespace_blk",
67 .release = namespace_blk_release,
68};
69
Dan Williams6ff3e912016-10-05 14:04:15 -070070static bool is_namespace_pmem(const struct device *dev)
Dan Williamsbf9bccc2015-06-17 17:14:46 -040071{
72 return dev ? dev->type == &namespace_pmem_device_type : false;
73}
74
Dan Williams6ff3e912016-10-05 14:04:15 -070075static bool is_namespace_blk(const struct device *dev)
Dan Williamsbf9bccc2015-06-17 17:14:46 -040076{
77 return dev ? dev->type == &namespace_blk_device_type : false;
78}
79
Dan Williams6ff3e912016-10-05 14:04:15 -070080static bool is_namespace_io(const struct device *dev)
Dan Williamsbf9bccc2015-06-17 17:14:46 -040081{
82 return dev ? dev->type == &namespace_io_device_type : false;
83}
84
Dan Williamse07ecd72016-01-05 18:37:23 -080085static int is_uuid_busy(struct device *dev, void *data)
86{
87 u8 *uuid1 = data, *uuid2 = NULL;
88
89 if (is_namespace_pmem(dev)) {
90 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
91
92 uuid2 = nspm->uuid;
93 } else if (is_namespace_blk(dev)) {
94 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
95
96 uuid2 = nsblk->uuid;
97 } else if (is_nd_btt(dev)) {
98 struct nd_btt *nd_btt = to_nd_btt(dev);
99
100 uuid2 = nd_btt->uuid;
101 } else if (is_nd_pfn(dev)) {
102 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
103
104 uuid2 = nd_pfn->uuid;
105 }
106
107 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
108 return -EBUSY;
109
110 return 0;
111}
112
113static int is_namespace_uuid_busy(struct device *dev, void *data)
114{
115 if (is_nd_pmem(dev) || is_nd_blk(dev))
116 return device_for_each_child(dev, data, is_uuid_busy);
117 return 0;
118}
119
120/**
121 * nd_is_uuid_unique - verify that no other namespace has @uuid
122 * @dev: any device on a nvdimm_bus
123 * @uuid: uuid to check
124 */
125bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
126{
127 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
128
129 if (!nvdimm_bus)
130 return false;
131 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
132 if (device_for_each_child(&nvdimm_bus->dev, uuid,
133 is_namespace_uuid_busy) != 0)
134 return false;
135 return true;
136}
137
Dan Williams004f1af2015-08-24 19:20:23 -0400138bool pmem_should_map_pages(struct device *dev)
139{
140 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamscfe30b82016-03-03 09:38:00 -0800141 struct nd_namespace_io *nsio;
Dan Williams004f1af2015-08-24 19:20:23 -0400142
143 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
144 return false;
145
146 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
147 return false;
148
149 if (is_nd_pfn(dev) || is_nd_btt(dev))
150 return false;
151
Dan Williamscfe30b82016-03-03 09:38:00 -0800152 nsio = to_nd_namespace_io(dev);
153 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
154 IORESOURCE_SYSTEM_RAM,
155 IORES_DESC_NONE) == REGION_MIXED)
156 return false;
157
Dan Williams004f1af2015-08-24 19:20:23 -0400158#ifdef ARCH_MEMREMAP_PMEM
159 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
160#else
161 return false;
162#endif
163}
164EXPORT_SYMBOL(pmem_should_map_pages);
165
Vishal Verma5212e112015-06-25 04:20:32 -0400166const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
167 char *name)
168{
169 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
Dan Williams004f1af2015-08-24 19:20:23 -0400170 const char *suffix = NULL;
Vishal Verma5212e112015-06-25 04:20:32 -0400171
Dan Williams0731de02015-12-14 15:34:15 -0800172 if (ndns->claim && is_nd_btt(ndns->claim))
173 suffix = "s";
Vishal Verma5212e112015-06-25 04:20:32 -0400174
Dan Williams004f1af2015-08-24 19:20:23 -0400175 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
Dan Williams004f1af2015-08-24 19:20:23 -0400176 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
177 } else if (is_namespace_blk(&ndns->dev)) {
Vishal Verma5212e112015-06-25 04:20:32 -0400178 struct nd_namespace_blk *nsblk;
179
180 nsblk = to_nd_namespace_blk(&ndns->dev);
Dan Williams004f1af2015-08-24 19:20:23 -0400181 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
182 suffix ? suffix : "");
Vishal Verma5212e112015-06-25 04:20:32 -0400183 } else {
184 return NULL;
185 }
186
187 return name;
188}
189EXPORT_SYMBOL(nvdimm_namespace_disk_name);
190
Vishal Verma6ec68952015-07-29 14:58:09 -0600191const u8 *nd_dev_to_uuid(struct device *dev)
192{
193 static const u8 null_uuid[16];
194
195 if (!dev)
196 return null_uuid;
197
198 if (is_namespace_pmem(dev)) {
199 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
200
201 return nspm->uuid;
202 } else if (is_namespace_blk(dev)) {
203 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
204
205 return nsblk->uuid;
206 } else
207 return null_uuid;
208}
209EXPORT_SYMBOL(nd_dev_to_uuid);
210
Dan Williams3d880022015-05-31 15:02:11 -0400211static ssize_t nstype_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 struct nd_region *nd_region = to_nd_region(dev->parent);
215
216 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
217}
218static DEVICE_ATTR_RO(nstype);
219
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400220static ssize_t __alt_name_store(struct device *dev, const char *buf,
221 const size_t len)
222{
223 char *input, *pos, *alt_name, **ns_altname;
224 ssize_t rc;
225
226 if (is_namespace_pmem(dev)) {
227 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
228
229 ns_altname = &nspm->alt_name;
230 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400231 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
232
233 ns_altname = &nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400234 } else
235 return -ENXIO;
236
Dan Williams8c2f7e82015-06-25 04:20:04 -0400237 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400238 return -EBUSY;
239
240 input = kmemdup(buf, len + 1, GFP_KERNEL);
241 if (!input)
242 return -ENOMEM;
243
244 input[len] = '\0';
245 pos = strim(input);
246 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
247 rc = -EINVAL;
248 goto out;
249 }
250
251 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
252 if (!alt_name) {
253 rc = -ENOMEM;
254 goto out;
255 }
256 kfree(*ns_altname);
257 *ns_altname = alt_name;
258 sprintf(*ns_altname, "%s", pos);
259 rc = len;
260
261out:
262 kfree(input);
263 return rc;
264}
265
Dan Williams1b40e092015-05-01 13:34:01 -0400266static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
267{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400268 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400269 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
270 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
271 struct nd_label_id label_id;
272 resource_size_t size = 0;
273 struct resource *res;
274
275 if (!nsblk->uuid)
276 return 0;
277 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
278 for_each_dpa_resource(ndd, res)
279 if (strcmp(res->name, label_id.id) == 0)
280 size += resource_size(res);
281 return size;
282}
283
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400284static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
285{
286 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
287 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
288 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
289 struct nd_label_id label_id;
290 struct resource *res;
291 int count, i;
292
293 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
294 return false;
295
296 count = 0;
297 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
298 for_each_dpa_resource(ndd, res) {
299 if (strcmp(res->name, label_id.id) != 0)
300 continue;
301 /*
Geert Uytterhoevenae551e92016-08-31 11:45:25 +0200302 * Resources with unacknowledged adjustments indicate a
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400303 * failure to update labels
304 */
305 if (res->flags & DPA_RESOURCE_ADJUSTED)
306 return false;
307 count++;
308 }
309
310 /* These values match after a successful label update */
311 if (count != nsblk->num_resources)
312 return false;
313
314 for (i = 0; i < nsblk->num_resources; i++) {
315 struct resource *found = NULL;
316
317 for_each_dpa_resource(ndd, res)
318 if (res == nsblk->res[i]) {
319 found = res;
320 break;
321 }
322 /* stale resource */
323 if (!found)
324 return false;
325 }
326
327 return true;
328}
329
330resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
331{
332 resource_size_t size;
333
334 nvdimm_bus_lock(&nsblk->common.dev);
335 size = __nd_namespace_blk_validate(nsblk);
336 nvdimm_bus_unlock(&nsblk->common.dev);
337
338 return size;
339}
340EXPORT_SYMBOL(nd_namespace_blk_validate);
341
342
Dan Williamsf524bf22015-05-30 12:36:02 -0400343static int nd_namespace_label_update(struct nd_region *nd_region,
344 struct device *dev)
345{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400346 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
Dan Williamsf524bf22015-05-30 12:36:02 -0400347 "namespace must be idle during label update\n");
Dan Williams8c2f7e82015-06-25 04:20:04 -0400348 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsf524bf22015-05-30 12:36:02 -0400349 return 0;
350
351 /*
352 * Only allow label writes that will result in a valid namespace
353 * or deletion of an existing namespace.
354 */
355 if (is_namespace_pmem(dev)) {
356 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
Dan Williams0ba1c632015-05-30 12:35:36 -0400357 resource_size_t size = resource_size(&nspm->nsio.res);
Dan Williamsf524bf22015-05-30 12:36:02 -0400358
359 if (size == 0 && nspm->uuid)
360 /* delete allocation */;
361 else if (!nspm->uuid)
362 return 0;
363
364 return nd_pmem_namespace_label_update(nd_region, nspm, size);
365 } else if (is_namespace_blk(dev)) {
Dan Williams0ba1c632015-05-30 12:35:36 -0400366 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
367 resource_size_t size = nd_namespace_blk_size(nsblk);
368
369 if (size == 0 && nsblk->uuid)
370 /* delete allocation */;
371 else if (!nsblk->uuid || !nsblk->lbasize)
372 return 0;
373
374 return nd_blk_namespace_label_update(nd_region, nsblk, size);
Dan Williamsf524bf22015-05-30 12:36:02 -0400375 } else
376 return -ENXIO;
377}
378
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400379static ssize_t alt_name_store(struct device *dev,
380 struct device_attribute *attr, const char *buf, size_t len)
381{
Dan Williamsf524bf22015-05-30 12:36:02 -0400382 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400383 ssize_t rc;
384
385 device_lock(dev);
386 nvdimm_bus_lock(dev);
387 wait_nvdimm_bus_probe_idle(dev);
388 rc = __alt_name_store(dev, buf, len);
Dan Williamsf524bf22015-05-30 12:36:02 -0400389 if (rc >= 0)
390 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400391 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
392 nvdimm_bus_unlock(dev);
393 device_unlock(dev);
394
Dan Williamsf524bf22015-05-30 12:36:02 -0400395 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400396}
397
398static ssize_t alt_name_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
400{
401 char *ns_altname;
402
403 if (is_namespace_pmem(dev)) {
404 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
405
406 ns_altname = nspm->alt_name;
407 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400408 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
409
410 ns_altname = nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400411 } else
412 return -ENXIO;
413
414 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
415}
416static DEVICE_ATTR_RW(alt_name);
417
418static int scan_free(struct nd_region *nd_region,
419 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
420 resource_size_t n)
421{
422 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
423 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
424 int rc = 0;
425
426 while (n) {
427 struct resource *res, *last;
428 resource_size_t new_start;
429
430 last = NULL;
431 for_each_dpa_resource(ndd, res)
432 if (strcmp(res->name, label_id->id) == 0)
433 last = res;
434 res = last;
435 if (!res)
436 return 0;
437
438 if (n >= resource_size(res)) {
439 n -= resource_size(res);
440 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
441 nvdimm_free_dpa(ndd, res);
442 /* retry with last resource deleted */
443 continue;
444 }
445
446 /*
447 * Keep BLK allocations relegated to high DPA as much as
448 * possible
449 */
450 if (is_blk)
451 new_start = res->start + n;
452 else
453 new_start = res->start;
454
455 rc = adjust_resource(res, new_start, resource_size(res) - n);
Dan Williams1b40e092015-05-01 13:34:01 -0400456 if (rc == 0)
457 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400458 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
459 break;
460 }
461
462 return rc;
463}
464
465/**
466 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
467 * @nd_region: the set of dimms to reclaim @n bytes from
468 * @label_id: unique identifier for the namespace consuming this dpa range
469 * @n: number of bytes per-dimm to release
470 *
471 * Assumes resources are ordered. Starting from the end try to
472 * adjust_resource() the allocation to @n, but if @n is larger than the
473 * allocation delete it and find the 'new' last allocation in the label
474 * set.
475 */
476static int shrink_dpa_allocation(struct nd_region *nd_region,
477 struct nd_label_id *label_id, resource_size_t n)
478{
479 int i;
480
481 for (i = 0; i < nd_region->ndr_mappings; i++) {
482 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
483 int rc;
484
485 rc = scan_free(nd_region, nd_mapping, label_id, n);
486 if (rc)
487 return rc;
488 }
489
490 return 0;
491}
492
493static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
494 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
495 resource_size_t n)
496{
497 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
498 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
499 resource_size_t first_dpa;
500 struct resource *res;
501 int rc = 0;
502
503 /* allocate blk from highest dpa first */
504 if (is_blk)
505 first_dpa = nd_mapping->start + nd_mapping->size - n;
506 else
507 first_dpa = nd_mapping->start;
508
509 /* first resource allocation for this label-id or dimm */
510 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
511 if (!res)
512 rc = -EBUSY;
513
514 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
515 return rc ? n : 0;
516}
517
Dan Williams1b40e092015-05-01 13:34:01 -0400518static bool space_valid(bool is_pmem, bool is_reserve,
519 struct nd_label_id *label_id, struct resource *res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400520{
521 /*
522 * For BLK-space any space is valid, for PMEM-space, it must be
Dan Williams1b40e092015-05-01 13:34:01 -0400523 * contiguous with an existing allocation unless we are
524 * reserving pmem.
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400525 */
Dan Williams1b40e092015-05-01 13:34:01 -0400526 if (is_reserve || !is_pmem)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400527 return true;
528 if (!res || strcmp(res->name, label_id->id) == 0)
529 return true;
530 return false;
531}
532
533enum alloc_loc {
534 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
535};
536
537static resource_size_t scan_allocate(struct nd_region *nd_region,
538 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
539 resource_size_t n)
540{
541 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
Dan Williams1b40e092015-05-01 13:34:01 -0400542 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400543 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
544 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
545 const resource_size_t to_allocate = n;
546 struct resource *res;
547 int first;
548
549 retry:
550 first = 0;
551 for_each_dpa_resource(ndd, res) {
552 resource_size_t allocate, available = 0, free_start, free_end;
553 struct resource *next = res->sibling, *new_res = NULL;
554 enum alloc_loc loc = ALLOC_ERR;
555 const char *action;
556 int rc = 0;
557
558 /* ignore resources outside this nd_mapping */
559 if (res->start > mapping_end)
560 continue;
561 if (res->end < nd_mapping->start)
562 continue;
563
564 /* space at the beginning of the mapping */
565 if (!first++ && res->start > nd_mapping->start) {
566 free_start = nd_mapping->start;
567 available = res->start - free_start;
Dan Williams1b40e092015-05-01 13:34:01 -0400568 if (space_valid(is_pmem, is_reserve, label_id, NULL))
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400569 loc = ALLOC_BEFORE;
570 }
571
572 /* space between allocations */
573 if (!loc && next) {
574 free_start = res->start + resource_size(res);
575 free_end = min(mapping_end, next->start - 1);
Dan Williams1b40e092015-05-01 13:34:01 -0400576 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400577 && free_start < free_end) {
578 available = free_end + 1 - free_start;
579 loc = ALLOC_MID;
580 }
581 }
582
583 /* space at the end of the mapping */
584 if (!loc && !next) {
585 free_start = res->start + resource_size(res);
586 free_end = mapping_end;
Dan Williams1b40e092015-05-01 13:34:01 -0400587 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400588 && free_start < free_end) {
589 available = free_end + 1 - free_start;
590 loc = ALLOC_AFTER;
591 }
592 }
593
594 if (!loc || !available)
595 continue;
596 allocate = min(available, n);
597 switch (loc) {
598 case ALLOC_BEFORE:
599 if (strcmp(res->name, label_id->id) == 0) {
600 /* adjust current resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400601 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400602 return n;
603 rc = adjust_resource(res, res->start - allocate,
604 resource_size(res) + allocate);
605 action = "cur grow up";
606 } else
607 action = "allocate";
608 break;
609 case ALLOC_MID:
610 if (strcmp(next->name, label_id->id) == 0) {
611 /* adjust next resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400612 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400613 return n;
614 rc = adjust_resource(next, next->start
615 - allocate, resource_size(next)
616 + allocate);
617 new_res = next;
618 action = "next grow up";
619 } else if (strcmp(res->name, label_id->id) == 0) {
620 action = "grow down";
621 } else
622 action = "allocate";
623 break;
624 case ALLOC_AFTER:
625 if (strcmp(res->name, label_id->id) == 0)
626 action = "grow down";
627 else
628 action = "allocate";
629 break;
630 default:
631 return n;
632 }
633
634 if (strcmp(action, "allocate") == 0) {
635 /* BLK allocate bottom up */
636 if (!is_pmem)
637 free_start += available - allocate;
Dan Williams1b40e092015-05-01 13:34:01 -0400638 else if (!is_reserve && free_start != nd_mapping->start)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400639 return n;
640
641 new_res = nvdimm_allocate_dpa(ndd, label_id,
642 free_start, allocate);
643 if (!new_res)
644 rc = -EBUSY;
645 } else if (strcmp(action, "grow down") == 0) {
646 /* adjust current resource down */
647 rc = adjust_resource(res, res->start, resource_size(res)
648 + allocate);
Dan Williams1b40e092015-05-01 13:34:01 -0400649 if (rc == 0)
650 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400651 }
652
653 if (!new_res)
654 new_res = res;
655
656 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
657 action, loc, rc);
658
659 if (rc)
660 return n;
661
662 n -= allocate;
663 if (n) {
664 /*
665 * Retry scan with newly inserted resources.
666 * For example, if we did an ALLOC_BEFORE
667 * insertion there may also have been space
668 * available for an ALLOC_AFTER insertion, so we
669 * need to check this same resource again
670 */
671 goto retry;
672 } else
673 return 0;
674 }
675
Dan Williams1b40e092015-05-01 13:34:01 -0400676 /*
677 * If we allocated nothing in the BLK case it may be because we are in
678 * an initial "pmem-reserve pass". Only do an initial BLK allocation
679 * when none of the DPA space is reserved.
680 */
681 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400682 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
683 return n;
684}
685
Dan Williams1b40e092015-05-01 13:34:01 -0400686static int merge_dpa(struct nd_region *nd_region,
687 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
688{
689 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
690 struct resource *res;
691
692 if (strncmp("pmem", label_id->id, 4) == 0)
693 return 0;
694 retry:
695 for_each_dpa_resource(ndd, res) {
696 int rc;
697 struct resource *next = res->sibling;
698 resource_size_t end = res->start + resource_size(res);
699
700 if (!next || strcmp(res->name, label_id->id) != 0
701 || strcmp(next->name, label_id->id) != 0
702 || end != next->start)
703 continue;
704 end += resource_size(next);
705 nvdimm_free_dpa(ndd, next);
706 rc = adjust_resource(res, res->start, end - res->start);
707 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
708 if (rc)
709 return rc;
710 res->flags |= DPA_RESOURCE_ADJUSTED;
711 goto retry;
712 }
713
714 return 0;
715}
716
717static int __reserve_free_pmem(struct device *dev, void *data)
718{
719 struct nvdimm *nvdimm = data;
720 struct nd_region *nd_region;
721 struct nd_label_id label_id;
722 int i;
723
724 if (!is_nd_pmem(dev))
725 return 0;
726
727 nd_region = to_nd_region(dev);
728 if (nd_region->ndr_mappings == 0)
729 return 0;
730
731 memset(&label_id, 0, sizeof(label_id));
732 strcat(label_id.id, "pmem-reserve");
733 for (i = 0; i < nd_region->ndr_mappings; i++) {
734 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
735 resource_size_t n, rem = 0;
736
737 if (nd_mapping->nvdimm != nvdimm)
738 continue;
739
740 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
741 if (n == 0)
742 return 0;
743 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
744 dev_WARN_ONCE(&nd_region->dev, rem,
745 "pmem reserve underrun: %#llx of %#llx bytes\n",
746 (unsigned long long) n - rem,
747 (unsigned long long) n);
748 return rem ? -ENXIO : 0;
749 }
750
751 return 0;
752}
753
754static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
755 struct nd_mapping *nd_mapping)
756{
757 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
758 struct resource *res, *_res;
759
760 for_each_dpa_resource_safe(ndd, res, _res)
761 if (strcmp(res->name, "pmem-reserve") == 0)
762 nvdimm_free_dpa(ndd, res);
763}
764
765static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
766 struct nd_mapping *nd_mapping)
767{
768 struct nvdimm *nvdimm = nd_mapping->nvdimm;
769 int rc;
770
771 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
772 __reserve_free_pmem);
773 if (rc)
774 release_free_pmem(nvdimm_bus, nd_mapping);
775 return rc;
776}
777
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400778/**
779 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
780 * @nd_region: the set of dimms to allocate @n more bytes from
781 * @label_id: unique identifier for the namespace consuming this dpa range
782 * @n: number of bytes per-dimm to add to the existing allocation
783 *
784 * Assumes resources are ordered. For BLK regions, first consume
785 * BLK-only available DPA free space, then consume PMEM-aliased DPA
786 * space starting at the highest DPA. For PMEM regions start
787 * allocations from the start of an interleave set and end at the first
788 * BLK allocation or the end of the interleave set, whichever comes
789 * first.
790 */
791static int grow_dpa_allocation(struct nd_region *nd_region,
792 struct nd_label_id *label_id, resource_size_t n)
793{
Dan Williams1b40e092015-05-01 13:34:01 -0400794 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
795 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400796 int i;
797
798 for (i = 0; i < nd_region->ndr_mappings; i++) {
799 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williams1b40e092015-05-01 13:34:01 -0400800 resource_size_t rem = n;
801 int rc, j;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400802
Dan Williams1b40e092015-05-01 13:34:01 -0400803 /*
804 * In the BLK case try once with all unallocated PMEM
805 * reserved, and once without
806 */
807 for (j = is_pmem; j < 2; j++) {
808 bool blk_only = j == 0;
809
810 if (blk_only) {
811 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
812 if (rc)
813 return rc;
814 }
815 rem = scan_allocate(nd_region, nd_mapping,
816 label_id, rem);
817 if (blk_only)
818 release_free_pmem(nvdimm_bus, nd_mapping);
819
820 /* try again and allow encroachments into PMEM */
821 if (rem == 0)
822 break;
823 }
824
825 dev_WARN_ONCE(&nd_region->dev, rem,
826 "allocation underrun: %#llx of %#llx bytes\n",
827 (unsigned long long) n - rem,
828 (unsigned long long) n);
829 if (rem)
830 return -ENXIO;
831
832 rc = merge_dpa(nd_region, nd_mapping, label_id);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400833 if (rc)
834 return rc;
835 }
836
837 return 0;
838}
839
Dan Williams0e3b0d12016-10-06 23:13:15 -0700840static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400841 struct nd_namespace_pmem *nspm, resource_size_t size)
842{
843 struct resource *res = &nspm->nsio.res;
Dan Williams0e3b0d12016-10-06 23:13:15 -0700844 resource_size_t offset = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400845
Dan Williams0e3b0d12016-10-06 23:13:15 -0700846 if (size && !nspm->uuid) {
847 WARN_ON_ONCE(1);
848 size = 0;
849 }
850
851 if (size && nspm->uuid) {
852 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
853 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
854 struct nd_label_id label_id;
855 struct resource *res;
856
857 if (!ndd) {
858 size = 0;
859 goto out;
860 }
861
862 nd_label_gen_id(&label_id, nspm->uuid, 0);
863
864 /* calculate a spa offset from the dpa allocation offset */
865 for_each_dpa_resource(ndd, res)
866 if (strcmp(res->name, label_id.id) == 0) {
867 offset = (res->start - nd_mapping->start)
868 * nd_region->ndr_mappings;
869 goto out;
870 }
871
872 WARN_ON_ONCE(1);
873 size = 0;
874 }
875
876 out:
877 res->start = nd_region->ndr_start + offset;
878 res->end = res->start + size - 1;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400879}
880
Dmitry Krivenokbd26d0d2015-12-02 00:48:12 +0300881static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
882{
883 if (!uuid) {
884 dev_dbg(dev, "%s: uuid not set\n", where);
885 return true;
886 }
887 return false;
888}
889
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400890static ssize_t __size_store(struct device *dev, unsigned long long val)
891{
892 resource_size_t allocated = 0, available = 0;
893 struct nd_region *nd_region = to_nd_region(dev->parent);
894 struct nd_mapping *nd_mapping;
895 struct nvdimm_drvdata *ndd;
896 struct nd_label_id label_id;
897 u32 flags = 0, remainder;
898 u8 *uuid = NULL;
899 int rc, i;
900
Dan Williams8c2f7e82015-06-25 04:20:04 -0400901 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400902 return -EBUSY;
903
904 if (is_namespace_pmem(dev)) {
905 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
906
907 uuid = nspm->uuid;
908 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400909 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
910
911 uuid = nsblk->uuid;
912 flags = NSLABEL_FLAG_LOCAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400913 }
914
915 /*
916 * We need a uuid for the allocation-label and dimm(s) on which
917 * to store the label.
918 */
Dmitry Krivenokbd26d0d2015-12-02 00:48:12 +0300919 if (uuid_not_set(uuid, dev, __func__))
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400920 return -ENXIO;
Dmitry Krivenokbd26d0d2015-12-02 00:48:12 +0300921 if (nd_region->ndr_mappings == 0) {
922 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
923 return -ENXIO;
924 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400925
926 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
927 if (remainder) {
928 dev_dbg(dev, "%llu is not %dK aligned\n", val,
929 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
930 return -EINVAL;
931 }
932
933 nd_label_gen_id(&label_id, uuid, flags);
934 for (i = 0; i < nd_region->ndr_mappings; i++) {
935 nd_mapping = &nd_region->mapping[i];
936 ndd = to_ndd(nd_mapping);
937
938 /*
939 * All dimms in an interleave set, or the base dimm for a blk
940 * region, need to be enabled for the size to be changed.
941 */
942 if (!ndd)
943 return -ENXIO;
944
945 allocated += nvdimm_allocated_dpa(ndd, &label_id);
946 }
947 available = nd_region_available_dpa(nd_region);
948
949 if (val > available + allocated)
950 return -ENOSPC;
951
952 if (val == allocated)
953 return 0;
954
955 val = div_u64(val, nd_region->ndr_mappings);
956 allocated = div_u64(allocated, nd_region->ndr_mappings);
957 if (val < allocated)
958 rc = shrink_dpa_allocation(nd_region, &label_id,
959 allocated - val);
960 else
961 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
962
963 if (rc)
964 return rc;
965
966 if (is_namespace_pmem(dev)) {
967 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
968
Dan Williams0e3b0d12016-10-06 23:13:15 -0700969 nd_namespace_pmem_set_resource(nd_region, nspm,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400970 val * nd_region->ndr_mappings);
Dan Williams1b40e092015-05-01 13:34:01 -0400971 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400972 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
973
Dan Williams1b40e092015-05-01 13:34:01 -0400974 /*
975 * Try to delete the namespace if we deleted all of its
Dan Williams8c2f7e82015-06-25 04:20:04 -0400976 * allocation, this is not the seed device for the
977 * region, and it is not actively claimed by a btt
978 * instance.
Dan Williams1b40e092015-05-01 13:34:01 -0400979 */
Dan Williams8c2f7e82015-06-25 04:20:04 -0400980 if (val == 0 && nd_region->ns_seed != dev
981 && !nsblk->common.claim)
Dan Williams1b40e092015-05-01 13:34:01 -0400982 nd_device_unregister(dev, ND_ASYNC);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400983 }
984
985 return rc;
986}
987
988static ssize_t size_store(struct device *dev,
989 struct device_attribute *attr, const char *buf, size_t len)
990{
Dan Williamsf524bf22015-05-30 12:36:02 -0400991 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400992 unsigned long long val;
993 u8 **uuid = NULL;
994 int rc;
995
996 rc = kstrtoull(buf, 0, &val);
997 if (rc)
998 return rc;
999
1000 device_lock(dev);
1001 nvdimm_bus_lock(dev);
1002 wait_nvdimm_bus_probe_idle(dev);
1003 rc = __size_store(dev, val);
Dan Williamsf524bf22015-05-30 12:36:02 -04001004 if (rc >= 0)
1005 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001006
1007 if (is_namespace_pmem(dev)) {
1008 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1009
1010 uuid = &nspm->uuid;
1011 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -04001012 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1013
1014 uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001015 }
1016
1017 if (rc == 0 && val == 0 && uuid) {
1018 /* setting size zero == 'delete namespace' */
1019 kfree(*uuid);
1020 *uuid = NULL;
1021 }
1022
1023 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
1024 ? "fail" : "success", rc);
1025
1026 nvdimm_bus_unlock(dev);
1027 device_unlock(dev);
1028
Dan Williamsf524bf22015-05-30 12:36:02 -04001029 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001030}
1031
Dan Williams8c2f7e82015-06-25 04:20:04 -04001032resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001033{
Dan Williams8c2f7e82015-06-25 04:20:04 -04001034 struct device *dev = &ndns->dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001035
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001036 if (is_namespace_pmem(dev)) {
1037 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1038
Dan Williams8c2f7e82015-06-25 04:20:04 -04001039 return resource_size(&nspm->nsio.res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001040 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -04001041 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001042 } else if (is_namespace_io(dev)) {
1043 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1044
Dan Williams8c2f7e82015-06-25 04:20:04 -04001045 return resource_size(&nsio->res);
1046 } else
1047 WARN_ONCE(1, "unknown namespace type\n");
1048 return 0;
1049}
Dan Williams1b40e092015-05-01 13:34:01 -04001050
Dan Williams8c2f7e82015-06-25 04:20:04 -04001051resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1052{
1053 resource_size_t size;
1054
1055 nvdimm_bus_lock(&ndns->dev);
1056 size = __nvdimm_namespace_capacity(ndns);
1057 nvdimm_bus_unlock(&ndns->dev);
1058
1059 return size;
1060}
1061EXPORT_SYMBOL(nvdimm_namespace_capacity);
1062
1063static ssize_t size_show(struct device *dev,
1064 struct device_attribute *attr, char *buf)
1065{
1066 return sprintf(buf, "%llu\n", (unsigned long long)
1067 nvdimm_namespace_capacity(to_ndns(dev)));
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001068}
1069static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1070
Dan Williamsf95b4bc2016-09-21 18:16:21 -07001071static u8 *namespace_to_uuid(struct device *dev)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001072{
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001073 if (is_namespace_pmem(dev)) {
1074 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1075
Dan Williamsf95b4bc2016-09-21 18:16:21 -07001076 return nspm->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001077 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -04001078 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1079
Dan Williamsf95b4bc2016-09-21 18:16:21 -07001080 return nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001081 } else
Dan Williamsf95b4bc2016-09-21 18:16:21 -07001082 return ERR_PTR(-ENXIO);
1083}
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001084
Dan Williamsf95b4bc2016-09-21 18:16:21 -07001085static ssize_t uuid_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1087{
1088 u8 *uuid = namespace_to_uuid(dev);
1089
1090 if (IS_ERR(uuid))
1091 return PTR_ERR(uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001092 if (uuid)
1093 return sprintf(buf, "%pUb\n", uuid);
1094 return sprintf(buf, "\n");
1095}
1096
1097/**
1098 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1099 * @nd_region: parent region so we can updates all dimms in the set
1100 * @dev: namespace type for generating label_id
1101 * @new_uuid: incoming uuid
1102 * @old_uuid: reference to the uuid storage location in the namespace object
1103 */
1104static int namespace_update_uuid(struct nd_region *nd_region,
1105 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1106{
1107 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1108 struct nd_label_id old_label_id;
1109 struct nd_label_id new_label_id;
Dan Williamsf524bf22015-05-30 12:36:02 -04001110 int i;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001111
Dan Williamsf524bf22015-05-30 12:36:02 -04001112 if (!nd_is_uuid_unique(dev, new_uuid))
1113 return -EINVAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001114
1115 if (*old_uuid == NULL)
1116 goto out;
1117
Dan Williamsf524bf22015-05-30 12:36:02 -04001118 /*
1119 * If we've already written a label with this uuid, then it's
1120 * too late to rename because we can't reliably update the uuid
1121 * without losing the old namespace. Userspace must delete this
1122 * namespace to abandon the old uuid.
1123 */
1124 for (i = 0; i < nd_region->ndr_mappings; i++) {
1125 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1126
1127 /*
1128 * This check by itself is sufficient because old_uuid
1129 * would be NULL above if this uuid did not exist in the
1130 * currently written set.
1131 *
1132 * FIXME: can we delete uuid with zero dpa allocated?
1133 */
Dan Williamsae8219f2016-09-19 16:04:21 -07001134 if (list_empty(&nd_mapping->labels))
Dan Williamsf524bf22015-05-30 12:36:02 -04001135 return -EBUSY;
1136 }
1137
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001138 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1139 nd_label_gen_id(&new_label_id, new_uuid, flags);
1140 for (i = 0; i < nd_region->ndr_mappings; i++) {
1141 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1142 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1143 struct resource *res;
1144
1145 for_each_dpa_resource(ndd, res)
1146 if (strcmp(res->name, old_label_id.id) == 0)
1147 sprintf((void *) res->name, "%s",
1148 new_label_id.id);
1149 }
1150 kfree(*old_uuid);
1151 out:
1152 *old_uuid = new_uuid;
1153 return 0;
1154}
1155
1156static ssize_t uuid_store(struct device *dev,
1157 struct device_attribute *attr, const char *buf, size_t len)
1158{
1159 struct nd_region *nd_region = to_nd_region(dev->parent);
1160 u8 *uuid = NULL;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001161 ssize_t rc = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001162 u8 **ns_uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001163
1164 if (is_namespace_pmem(dev)) {
1165 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1166
1167 ns_uuid = &nspm->uuid;
1168 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -04001169 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1170
1171 ns_uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001172 } else
1173 return -ENXIO;
1174
1175 device_lock(dev);
1176 nvdimm_bus_lock(dev);
1177 wait_nvdimm_bus_probe_idle(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001178 if (to_ndns(dev)->claim)
1179 rc = -EBUSY;
1180 if (rc >= 0)
1181 rc = nd_uuid_store(dev, &uuid, buf, len);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001182 if (rc >= 0)
1183 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
Dan Williamsf524bf22015-05-30 12:36:02 -04001184 if (rc >= 0)
1185 rc = nd_namespace_label_update(nd_region, dev);
1186 else
1187 kfree(uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001188 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1189 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1190 nvdimm_bus_unlock(dev);
1191 device_unlock(dev);
1192
Dan Williamsf524bf22015-05-30 12:36:02 -04001193 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001194}
1195static DEVICE_ATTR_RW(uuid);
1196
1197static ssize_t resource_show(struct device *dev,
1198 struct device_attribute *attr, char *buf)
1199{
1200 struct resource *res;
1201
1202 if (is_namespace_pmem(dev)) {
1203 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1204
1205 res = &nspm->nsio.res;
1206 } else if (is_namespace_io(dev)) {
1207 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1208
1209 res = &nsio->res;
1210 } else
1211 return -ENXIO;
1212
1213 /* no address to convey if the namespace has no allocation */
1214 if (resource_size(res) == 0)
1215 return -ENXIO;
1216 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1217}
1218static DEVICE_ATTR_RO(resource);
1219
Vishal Vermafcae6952015-06-25 04:22:39 -04001220static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1221 4096, 4104, 4160, 4224, 0 };
Dan Williams1b40e092015-05-01 13:34:01 -04001222
1223static ssize_t sector_size_show(struct device *dev,
1224 struct device_attribute *attr, char *buf)
1225{
1226 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1227
1228 if (!is_namespace_blk(dev))
1229 return -ENXIO;
1230
1231 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1232}
1233
1234static ssize_t sector_size_store(struct device *dev,
1235 struct device_attribute *attr, const char *buf, size_t len)
1236{
1237 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
Dan Williamsf524bf22015-05-30 12:36:02 -04001238 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001239 ssize_t rc = 0;
Dan Williams1b40e092015-05-01 13:34:01 -04001240
1241 if (!is_namespace_blk(dev))
1242 return -ENXIO;
1243
1244 device_lock(dev);
1245 nvdimm_bus_lock(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001246 if (to_ndns(dev)->claim)
1247 rc = -EBUSY;
1248 if (rc >= 0)
1249 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1250 ns_lbasize_supported);
Dan Williamsf524bf22015-05-30 12:36:02 -04001251 if (rc >= 0)
1252 rc = nd_namespace_label_update(nd_region, dev);
1253 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1254 rc, rc < 0 ? "tried" : "wrote", buf,
1255 buf[len - 1] == '\n' ? "" : "\n");
Dan Williams1b40e092015-05-01 13:34:01 -04001256 nvdimm_bus_unlock(dev);
1257 device_unlock(dev);
1258
1259 return rc ? rc : len;
1260}
1261static DEVICE_ATTR_RW(sector_size);
1262
Dan Williams0ba1c632015-05-30 12:35:36 -04001263static ssize_t dpa_extents_show(struct device *dev,
1264 struct device_attribute *attr, char *buf)
1265{
1266 struct nd_region *nd_region = to_nd_region(dev->parent);
1267 struct nd_label_id label_id;
1268 int count = 0, i;
1269 u8 *uuid = NULL;
1270 u32 flags = 0;
1271
1272 nvdimm_bus_lock(dev);
1273 if (is_namespace_pmem(dev)) {
1274 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1275
1276 uuid = nspm->uuid;
1277 flags = 0;
1278 } else if (is_namespace_blk(dev)) {
1279 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1280
1281 uuid = nsblk->uuid;
1282 flags = NSLABEL_FLAG_LOCAL;
1283 }
1284
1285 if (!uuid)
1286 goto out;
1287
1288 nd_label_gen_id(&label_id, uuid, flags);
1289 for (i = 0; i < nd_region->ndr_mappings; i++) {
1290 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1291 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1292 struct resource *res;
1293
1294 for_each_dpa_resource(ndd, res)
1295 if (strcmp(res->name, label_id.id) == 0)
1296 count++;
1297 }
1298 out:
1299 nvdimm_bus_unlock(dev);
1300
1301 return sprintf(buf, "%d\n", count);
1302}
1303static DEVICE_ATTR_RO(dpa_extents);
1304
Dan Williams8c2f7e82015-06-25 04:20:04 -04001305static ssize_t holder_show(struct device *dev,
1306 struct device_attribute *attr, char *buf)
1307{
1308 struct nd_namespace_common *ndns = to_ndns(dev);
1309 ssize_t rc;
1310
1311 device_lock(dev);
1312 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1313 device_unlock(dev);
1314
1315 return rc;
1316}
1317static DEVICE_ATTR_RO(holder);
1318
Dan Williams0731de02015-12-14 15:34:15 -08001319static ssize_t mode_show(struct device *dev,
1320 struct device_attribute *attr, char *buf)
1321{
1322 struct nd_namespace_common *ndns = to_ndns(dev);
1323 struct device *claim;
1324 char *mode;
1325 ssize_t rc;
1326
1327 device_lock(dev);
1328 claim = ndns->claim;
Dan Williams9c412422016-01-23 15:34:10 -08001329 if (claim && is_nd_btt(claim))
Dan Williams0731de02015-12-14 15:34:15 -08001330 mode = "safe";
Dan Williams9c412422016-01-23 15:34:10 -08001331 else if (claim && is_nd_pfn(claim))
1332 mode = "memory";
Dan Williamscd034122016-03-11 10:15:36 -08001333 else if (claim && is_nd_dax(claim))
1334 mode = "dax";
Dan Williams9c412422016-01-23 15:34:10 -08001335 else if (!claim && pmem_should_map_pages(dev))
1336 mode = "memory";
Dan Williams0731de02015-12-14 15:34:15 -08001337 else
1338 mode = "raw";
1339 rc = sprintf(buf, "%s\n", mode);
1340 device_unlock(dev);
1341
1342 return rc;
1343}
1344static DEVICE_ATTR_RO(mode);
1345
Dan Williams8c2f7e82015-06-25 04:20:04 -04001346static ssize_t force_raw_store(struct device *dev,
1347 struct device_attribute *attr, const char *buf, size_t len)
1348{
1349 bool force_raw;
1350 int rc = strtobool(buf, &force_raw);
1351
1352 if (rc)
1353 return rc;
1354
1355 to_ndns(dev)->force_raw = force_raw;
1356 return len;
1357}
1358
1359static ssize_t force_raw_show(struct device *dev,
1360 struct device_attribute *attr, char *buf)
1361{
1362 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1363}
1364static DEVICE_ATTR_RW(force_raw);
1365
Dan Williams3d880022015-05-31 15:02:11 -04001366static struct attribute *nd_namespace_attributes[] = {
1367 &dev_attr_nstype.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001368 &dev_attr_size.attr,
Dan Williams0731de02015-12-14 15:34:15 -08001369 &dev_attr_mode.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001370 &dev_attr_uuid.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001371 &dev_attr_holder.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001372 &dev_attr_resource.attr,
1373 &dev_attr_alt_name.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001374 &dev_attr_force_raw.attr,
Dan Williams1b40e092015-05-01 13:34:01 -04001375 &dev_attr_sector_size.attr,
Dan Williams0ba1c632015-05-30 12:35:36 -04001376 &dev_attr_dpa_extents.attr,
Dan Williams3d880022015-05-31 15:02:11 -04001377 NULL,
1378};
1379
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001380static umode_t namespace_visible(struct kobject *kobj,
1381 struct attribute *a, int n)
1382{
1383 struct device *dev = container_of(kobj, struct device, kobj);
1384
1385 if (a == &dev_attr_resource.attr) {
1386 if (is_namespace_blk(dev))
1387 return 0;
1388 return a->mode;
1389 }
1390
1391 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1392 if (a == &dev_attr_size.attr)
1393 return S_IWUSR | S_IRUGO;
Dan Williams1b40e092015-05-01 13:34:01 -04001394
1395 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1396 return 0;
1397
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001398 return a->mode;
1399 }
1400
Dan Williams8c2f7e82015-06-25 04:20:04 -04001401 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1402 || a == &dev_attr_holder.attr
Dan Williams0731de02015-12-14 15:34:15 -08001403 || a == &dev_attr_force_raw.attr
1404 || a == &dev_attr_mode.attr)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001405 return a->mode;
1406
1407 return 0;
1408}
1409
Dan Williams3d880022015-05-31 15:02:11 -04001410static struct attribute_group nd_namespace_attribute_group = {
1411 .attrs = nd_namespace_attributes,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001412 .is_visible = namespace_visible,
Dan Williams3d880022015-05-31 15:02:11 -04001413};
1414
1415static const struct attribute_group *nd_namespace_attribute_groups[] = {
1416 &nd_device_attribute_group,
1417 &nd_namespace_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001418 &nd_numa_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001419 NULL,
1420};
1421
Dan Williams8c2f7e82015-06-25 04:20:04 -04001422struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1423{
1424 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
Dan Williamse1455742015-07-30 17:57:47 -04001425 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
Dan Williamscd034122016-03-11 10:15:36 -08001426 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001427 struct nd_namespace_common *ndns = NULL;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001428 resource_size_t size;
1429
Dan Williamscd034122016-03-11 10:15:36 -08001430 if (nd_btt || nd_pfn || nd_dax) {
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001431 if (nd_btt)
Dan Williamse1455742015-07-30 17:57:47 -04001432 ndns = nd_btt->ndns;
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001433 else if (nd_pfn)
Dan Williamse1455742015-07-30 17:57:47 -04001434 ndns = nd_pfn->ndns;
Dan Williamscd034122016-03-11 10:15:36 -08001435 else if (nd_dax)
1436 ndns = nd_dax->nd_pfn.ndns;
Dan Williamse1455742015-07-30 17:57:47 -04001437
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001438 if (!ndns)
Dan Williams8c2f7e82015-06-25 04:20:04 -04001439 return ERR_PTR(-ENODEV);
1440
1441 /*
1442 * Flush any in-progess probes / removals in the driver
1443 * for the raw personality of this namespace.
1444 */
1445 device_lock(&ndns->dev);
1446 device_unlock(&ndns->dev);
1447 if (ndns->dev.driver) {
1448 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001449 dev_name(dev));
Dan Williams8c2f7e82015-06-25 04:20:04 -04001450 return ERR_PTR(-EBUSY);
1451 }
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001452 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001453 "host (%s) vs claim (%s) mismatch\n",
Dan Williams0bfb8dd2016-04-13 17:06:48 -07001454 dev_name(dev),
Dan Williams8c2f7e82015-06-25 04:20:04 -04001455 dev_name(ndns->claim)))
1456 return ERR_PTR(-ENXIO);
1457 } else {
1458 ndns = to_ndns(dev);
1459 if (ndns->claim) {
1460 dev_dbg(dev, "claimed by %s, failing probe\n",
1461 dev_name(ndns->claim));
1462
1463 return ERR_PTR(-ENXIO);
1464 }
1465 }
1466
1467 size = nvdimm_namespace_capacity(ndns);
1468 if (size < ND_MIN_NAMESPACE_SIZE) {
1469 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1470 &size, ND_MIN_NAMESPACE_SIZE);
1471 return ERR_PTR(-ENODEV);
1472 }
1473
1474 if (is_namespace_pmem(&ndns->dev)) {
1475 struct nd_namespace_pmem *nspm;
1476
1477 nspm = to_nd_namespace_pmem(&ndns->dev);
Dmitry Krivenokbd26d0d2015-12-02 00:48:12 +03001478 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
Dan Williams8c2f7e82015-06-25 04:20:04 -04001479 return ERR_PTR(-ENODEV);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001480 } else if (is_namespace_blk(&ndns->dev)) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001481 struct nd_namespace_blk *nsblk;
1482
1483 nsblk = to_nd_namespace_blk(&ndns->dev);
Dmitry Krivenokbd26d0d2015-12-02 00:48:12 +03001484 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1485 return ERR_PTR(-ENODEV);
1486 if (!nsblk->lbasize) {
1487 dev_dbg(&ndns->dev, "%s: sector size not set\n",
1488 __func__);
1489 return ERR_PTR(-ENODEV);
1490 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001491 if (!nd_namespace_blk_validate(nsblk))
1492 return ERR_PTR(-ENODEV);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001493 }
1494
1495 return ndns;
1496}
1497EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1498
Dan Williams3d880022015-05-31 15:02:11 -04001499static struct device **create_namespace_io(struct nd_region *nd_region)
1500{
1501 struct nd_namespace_io *nsio;
1502 struct device *dev, **devs;
1503 struct resource *res;
1504
1505 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1506 if (!nsio)
1507 return NULL;
1508
1509 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1510 if (!devs) {
1511 kfree(nsio);
1512 return NULL;
1513 }
1514
Dan Williams8c2f7e82015-06-25 04:20:04 -04001515 dev = &nsio->common.dev;
Dan Williams3d880022015-05-31 15:02:11 -04001516 dev->type = &namespace_io_device_type;
1517 dev->parent = &nd_region->dev;
1518 res = &nsio->res;
1519 res->name = dev_name(&nd_region->dev);
1520 res->flags = IORESOURCE_MEM;
1521 res->start = nd_region->ndr_start;
1522 res->end = res->start + nd_region->ndr_size - 1;
1523
1524 devs[0] = dev;
1525 return devs;
1526}
1527
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001528static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1529 u64 cookie, u16 pos)
1530{
1531 struct nd_namespace_label *found = NULL;
1532 int i;
1533
1534 for (i = 0; i < nd_region->ndr_mappings; i++) {
1535 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsae8219f2016-09-19 16:04:21 -07001536 struct nd_label_ent *label_ent;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001537 bool found_uuid = false;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001538
Dan Williamsae8219f2016-09-19 16:04:21 -07001539 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1540 struct nd_namespace_label *nd_label = label_ent->label;
1541 u16 position, nlabel;
1542 u64 isetcookie;
1543
1544 if (!nd_label)
1545 continue;
1546 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1547 position = __le16_to_cpu(nd_label->position);
1548 nlabel = __le16_to_cpu(nd_label->nlabel);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001549
1550 if (isetcookie != cookie)
1551 continue;
1552
1553 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1554 continue;
1555
1556 if (found_uuid) {
1557 dev_dbg(to_ndd(nd_mapping)->dev,
1558 "%s duplicate entry for uuid\n",
1559 __func__);
1560 return false;
1561 }
1562 found_uuid = true;
1563 if (nlabel != nd_region->ndr_mappings)
1564 continue;
1565 if (position != pos)
1566 continue;
1567 found = nd_label;
1568 break;
1569 }
1570 if (found)
1571 break;
1572 }
1573 return found != NULL;
1574}
1575
1576static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1577{
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001578 int i;
1579
1580 if (!pmem_id)
1581 return -ENODEV;
1582
1583 for (i = 0; i < nd_region->ndr_mappings; i++) {
1584 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williams0e3b0d12016-10-06 23:13:15 -07001585 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williamsae8219f2016-09-19 16:04:21 -07001586 struct nd_namespace_label *nd_label = NULL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001587 u64 hw_start, hw_end, pmem_start, pmem_end;
Dan Williamsae8219f2016-09-19 16:04:21 -07001588 struct nd_label_ent *label_ent;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001589
Dan Williams8a5f50d2016-09-22 15:42:59 -07001590 WARN_ON(!mutex_is_locked(&nd_mapping->lock));
Dan Williamsae8219f2016-09-19 16:04:21 -07001591 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1592 nd_label = label_ent->label;
1593 if (!nd_label)
1594 continue;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001595 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1596 break;
Dan Williamsae8219f2016-09-19 16:04:21 -07001597 nd_label = NULL;
1598 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001599
1600 if (!nd_label) {
1601 WARN_ON(1);
1602 return -EINVAL;
1603 }
1604
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001605 /*
1606 * Check that this label is compliant with the dpa
1607 * range published in NFIT
1608 */
1609 hw_start = nd_mapping->start;
1610 hw_end = hw_start + nd_mapping->size;
Dan Williamsae8219f2016-09-19 16:04:21 -07001611 pmem_start = __le64_to_cpu(nd_label->dpa);
1612 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
Dan Williams0e3b0d12016-10-06 23:13:15 -07001613 if (pmem_start >= hw_start && pmem_start < hw_end
1614 && pmem_end <= hw_end && pmem_end > hw_start)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001615 /* pass */;
Dan Williams0e3b0d12016-10-06 23:13:15 -07001616 else {
1617 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1618 dev_name(ndd->dev), nd_label->uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001619 return -EINVAL;
Dan Williams0e3b0d12016-10-06 23:13:15 -07001620 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001621
Dan Williams8a5f50d2016-09-22 15:42:59 -07001622 /* move recently validated label to the front of the list */
1623 list_move(&label_ent->list, &nd_mapping->labels);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001624 }
1625 return 0;
1626}
1627
1628/**
Dan Williams8a5f50d2016-09-22 15:42:59 -07001629 * create_namespace_pmem - validate interleave set labelling, retrieve label0
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001630 * @nd_region: region with mappings to validate
Dan Williams8a5f50d2016-09-22 15:42:59 -07001631 * @nspm: target namespace to create
1632 * @nd_label: target pmem namespace label to evaluate
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001633 */
Dan Williams8a5f50d2016-09-22 15:42:59 -07001634struct device *create_namespace_pmem(struct nd_region *nd_region,
1635 struct nd_namespace_label *nd_label)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001636{
1637 u64 cookie = nd_region_interleave_set_cookie(nd_region);
Dan Williamsae8219f2016-09-19 16:04:21 -07001638 struct nd_label_ent *label_ent;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001639 struct nd_namespace_pmem *nspm;
Dan Williamsae8219f2016-09-19 16:04:21 -07001640 struct nd_mapping *nd_mapping;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001641 resource_size_t size = 0;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001642 struct resource *res;
1643 struct device *dev;
Dan Williamsae8219f2016-09-19 16:04:21 -07001644 int rc = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001645 u16 i;
1646
Dan Williams47652182016-09-15 18:08:05 -07001647 if (cookie == 0) {
1648 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
Dan Williams8a5f50d2016-09-22 15:42:59 -07001649 return ERR_PTR(-ENXIO);
Dan Williams47652182016-09-15 18:08:05 -07001650 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001651
Dan Williams8a5f50d2016-09-22 15:42:59 -07001652 if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1653 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1654 nd_label->uuid);
1655 return ERR_PTR(-EAGAIN);
Dan Williamsae8219f2016-09-19 16:04:21 -07001656 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001657
Dan Williams8a5f50d2016-09-22 15:42:59 -07001658 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1659 if (!nspm)
1660 return ERR_PTR(-ENOMEM);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001661
Dan Williams0e3b0d12016-10-06 23:13:15 -07001662 nspm->id = -1;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001663 dev = &nspm->nsio.common.dev;
1664 dev->type = &namespace_pmem_device_type;
1665 dev->parent = &nd_region->dev;
1666 res = &nspm->nsio.res;
1667 res->name = dev_name(&nd_region->dev);
1668 res->flags = IORESOURCE_MEM;
1669
1670 for (i = 0; i < nd_region->ndr_mappings; i++)
1671 if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
Dan Williamsae8219f2016-09-19 16:04:21 -07001672 break;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001673 if (i < nd_region->ndr_mappings) {
Dan Williams0e3b0d12016-10-06 23:13:15 -07001674 struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
1675
Dan Williams8a5f50d2016-09-22 15:42:59 -07001676 /*
1677 * Give up if we don't find an instance of a uuid at each
1678 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1679 * find a dimm with two instances of the same uuid.
1680 */
Dan Williams0e3b0d12016-10-06 23:13:15 -07001681 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1682 dev_name(ndd->dev), nd_label->uuid);
Dan Williams8a5f50d2016-09-22 15:42:59 -07001683 rc = -EINVAL;
Dan Williamsae8219f2016-09-19 16:04:21 -07001684 goto err;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001685 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001686
1687 /*
1688 * Fix up each mapping's 'labels' to have the validated pmem label for
1689 * that position at labels[0], and NULL at labels[1]. In the process,
1690 * check that the namespace aligns with interleave-set. We know
1691 * that it does not overlap with any blk namespaces by virtue of
1692 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1693 * succeeded).
1694 */
Dan Williams8a5f50d2016-09-22 15:42:59 -07001695 rc = select_pmem_id(nd_region, nd_label->uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001696 if (rc)
1697 goto err;
1698
1699 /* Calculate total size and populate namespace properties from label0 */
1700 for (i = 0; i < nd_region->ndr_mappings; i++) {
Dan Williamsae8219f2016-09-19 16:04:21 -07001701 struct nd_namespace_label *label0;
1702
1703 nd_mapping = &nd_region->mapping[i];
Dan Williamsae8219f2016-09-19 16:04:21 -07001704 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1705 typeof(*label_ent), list);
1706 label0 = label_ent ? label_ent->label : 0;
Dan Williamsae8219f2016-09-19 16:04:21 -07001707
1708 if (!label0) {
1709 WARN_ON(1);
1710 continue;
1711 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001712
1713 size += __le64_to_cpu(label0->rawsize);
1714 if (__le16_to_cpu(label0->position) != 0)
1715 continue;
1716 WARN_ON(nspm->alt_name || nspm->uuid);
1717 nspm->alt_name = kmemdup((void __force *) label0->name,
1718 NSLABEL_NAME_LEN, GFP_KERNEL);
1719 nspm->uuid = kmemdup((void __force *) label0->uuid,
1720 NSLABEL_UUID_LEN, GFP_KERNEL);
1721 }
1722
1723 if (!nspm->alt_name || !nspm->uuid) {
1724 rc = -ENOMEM;
1725 goto err;
1726 }
1727
Dan Williams0e3b0d12016-10-06 23:13:15 -07001728 nd_namespace_pmem_set_resource(nd_region, nspm, size);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001729
Dan Williams8a5f50d2016-09-22 15:42:59 -07001730 return dev;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001731 err:
Dan Williams8a5f50d2016-09-22 15:42:59 -07001732 namespace_pmem_release(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001733 switch (rc) {
1734 case -EINVAL:
1735 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1736 break;
1737 case -ENODEV:
1738 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1739 break;
1740 default:
1741 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1742 __func__, rc);
1743 break;
1744 }
Dan Williams8a5f50d2016-09-22 15:42:59 -07001745 return ERR_PTR(rc);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001746}
1747
Dan Williams1b40e092015-05-01 13:34:01 -04001748struct resource *nsblk_add_resource(struct nd_region *nd_region,
1749 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1750 resource_size_t start)
1751{
1752 struct nd_label_id label_id;
1753 struct resource *res;
1754
1755 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1756 res = krealloc(nsblk->res,
1757 sizeof(void *) * (nsblk->num_resources + 1),
1758 GFP_KERNEL);
1759 if (!res)
1760 return NULL;
1761 nsblk->res = (struct resource **) res;
1762 for_each_dpa_resource(ndd, res)
1763 if (strcmp(res->name, label_id.id) == 0
1764 && res->start == start) {
1765 nsblk->res[nsblk->num_resources++] = res;
1766 return res;
1767 }
1768 return NULL;
1769}
1770
1771static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1772{
1773 struct nd_namespace_blk *nsblk;
1774 struct device *dev;
1775
1776 if (!is_nd_blk(&nd_region->dev))
1777 return NULL;
1778
1779 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1780 if (!nsblk)
1781 return NULL;
1782
Dan Williams8c2f7e82015-06-25 04:20:04 -04001783 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001784 dev->type = &namespace_blk_device_type;
1785 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1786 if (nsblk->id < 0) {
1787 kfree(nsblk);
1788 return NULL;
1789 }
1790 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1791 dev->parent = &nd_region->dev;
1792 dev->groups = nd_namespace_attribute_groups;
1793
Dan Williams8c2f7e82015-06-25 04:20:04 -04001794 return &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001795}
1796
1797void nd_region_create_blk_seed(struct nd_region *nd_region)
1798{
1799 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1800 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1801 /*
1802 * Seed creation failures are not fatal, provisioning is simply
1803 * disabled until memory becomes available
1804 */
1805 if (!nd_region->ns_seed)
1806 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1807 else
1808 nd_device_register(nd_region->ns_seed);
1809}
1810
Dan Williamscd034122016-03-11 10:15:36 -08001811void nd_region_create_dax_seed(struct nd_region *nd_region)
1812{
1813 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1814 nd_region->dax_seed = nd_dax_create(nd_region);
1815 /*
1816 * Seed creation failures are not fatal, provisioning is simply
1817 * disabled until memory becomes available
1818 */
1819 if (!nd_region->dax_seed)
1820 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1821}
1822
Dan Williams2dc43332015-12-13 11:41:36 -08001823void nd_region_create_pfn_seed(struct nd_region *nd_region)
1824{
1825 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1826 nd_region->pfn_seed = nd_pfn_create(nd_region);
1827 /*
1828 * Seed creation failures are not fatal, provisioning is simply
1829 * disabled until memory becomes available
1830 */
1831 if (!nd_region->pfn_seed)
1832 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1833}
1834
Dan Williams8c2f7e82015-06-25 04:20:04 -04001835void nd_region_create_btt_seed(struct nd_region *nd_region)
1836{
1837 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1838 nd_region->btt_seed = nd_btt_create(nd_region);
1839 /*
1840 * Seed creation failures are not fatal, provisioning is simply
1841 * disabled until memory becomes available
1842 */
1843 if (!nd_region->btt_seed)
1844 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1845}
1846
Dan Williams8a5f50d2016-09-22 15:42:59 -07001847static int add_namespace_resource(struct nd_region *nd_region,
1848 struct nd_namespace_label *nd_label, struct device **devs,
1849 int count)
Dan Williams1b40e092015-05-01 13:34:01 -04001850{
Dan Williams8a5f50d2016-09-22 15:42:59 -07001851 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
Dan Williamsae8219f2016-09-19 16:04:21 -07001852 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williams8a5f50d2016-09-22 15:42:59 -07001853 int i;
1854
1855 for (i = 0; i < count; i++) {
1856 u8 *uuid = namespace_to_uuid(devs[i]);
1857 struct resource *res;
1858
1859 if (IS_ERR_OR_NULL(uuid)) {
1860 WARN_ON(1);
1861 continue;
1862 }
1863
1864 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
1865 continue;
1866 if (is_namespace_blk(devs[i])) {
1867 res = nsblk_add_resource(nd_region, ndd,
1868 to_nd_namespace_blk(devs[i]),
1869 __le64_to_cpu(nd_label->dpa));
1870 if (!res)
1871 return -ENXIO;
1872 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
1873 } else {
1874 dev_err(&nd_region->dev,
1875 "error: conflicting extents for uuid: %pUb\n",
1876 nd_label->uuid);
1877 return -ENXIO;
1878 }
1879 break;
1880 }
1881
1882 return i;
1883}
1884
1885struct device *create_namespace_blk(struct nd_region *nd_region,
1886 struct nd_namespace_label *nd_label, int count)
1887{
1888
1889 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1890 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williams1b40e092015-05-01 13:34:01 -04001891 struct nd_namespace_blk *nsblk;
Dan Williams8a5f50d2016-09-22 15:42:59 -07001892 char *name[NSLABEL_NAME_LEN];
1893 struct device *dev = NULL;
1894 struct resource *res;
1895
1896 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1897 if (!nsblk)
1898 return ERR_PTR(-ENOMEM);
1899 dev = &nsblk->common.dev;
1900 dev->type = &namespace_blk_device_type;
1901 dev->parent = &nd_region->dev;
1902 nsblk->id = -1;
1903 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1904 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1905 GFP_KERNEL);
1906 if (!nsblk->uuid)
1907 goto blk_err;
1908 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1909 if (name[0])
1910 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1911 GFP_KERNEL);
1912 res = nsblk_add_resource(nd_region, ndd, nsblk,
1913 __le64_to_cpu(nd_label->dpa));
1914 if (!res)
1915 goto blk_err;
1916 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
1917 return dev;
1918 blk_err:
1919 namespace_blk_release(dev);
1920 return ERR_PTR(-ENXIO);
1921}
1922
Dan Williams6ff3e912016-10-05 14:04:15 -07001923static int cmp_dpa(const void *a, const void *b)
1924{
1925 const struct device *dev_a = *(const struct device **) a;
1926 const struct device *dev_b = *(const struct device **) b;
1927 struct nd_namespace_blk *nsblk_a, *nsblk_b;
1928 struct nd_namespace_pmem *nspm_a, *nspm_b;
1929
1930 if (is_namespace_io(dev_a))
1931 return 0;
1932
1933 if (is_namespace_blk(dev_a)) {
1934 nsblk_a = to_nd_namespace_blk(dev_a);
1935 nsblk_b = to_nd_namespace_blk(dev_b);
1936
1937 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
1938 sizeof(resource_size_t));
1939 }
1940
1941 nspm_a = to_nd_namespace_pmem(dev_a);
1942 nspm_b = to_nd_namespace_pmem(dev_b);
1943
1944 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
1945 sizeof(resource_size_t));
1946}
1947
Dan Williams8a5f50d2016-09-22 15:42:59 -07001948static struct device **scan_labels(struct nd_region *nd_region)
1949{
1950 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1951 struct device *dev, **devs = NULL;
1952 struct nd_label_ent *label_ent, *e;
Dan Williamsae8219f2016-09-19 16:04:21 -07001953 int i, count = 0;
Dan Williams1b40e092015-05-01 13:34:01 -04001954
Dan Williams8a5f50d2016-09-22 15:42:59 -07001955 /* "safe" because create_namespace_pmem() might list_move() label_ent */
1956 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
Dan Williamsae8219f2016-09-19 16:04:21 -07001957 struct nd_namespace_label *nd_label = label_ent->label;
Dan Williams1b40e092015-05-01 13:34:01 -04001958 struct device **__devs;
Dan Williamsae8219f2016-09-19 16:04:21 -07001959 u32 flags;
Dan Williams1b40e092015-05-01 13:34:01 -04001960
Dan Williamsae8219f2016-09-19 16:04:21 -07001961 if (!nd_label)
1962 continue;
1963 flags = __le32_to_cpu(nd_label->flags);
Dan Williams8a5f50d2016-09-22 15:42:59 -07001964 if (is_nd_blk(&nd_region->dev)
1965 == !!(flags & NSLABEL_FLAG_LOCAL))
1966 /* pass, region matches label type */;
Dan Williams1b40e092015-05-01 13:34:01 -04001967 else
1968 continue;
1969
Dan Williams8a5f50d2016-09-22 15:42:59 -07001970 i = add_namespace_resource(nd_region, nd_label, devs, count);
1971 if (i < 0)
1972 goto err;
Dan Williams1b40e092015-05-01 13:34:01 -04001973 if (i < count)
1974 continue;
1975 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1976 if (!__devs)
1977 goto err;
1978 memcpy(__devs, devs, sizeof(dev) * count);
1979 kfree(devs);
1980 devs = __devs;
1981
Dan Williams8a5f50d2016-09-22 15:42:59 -07001982 if (is_nd_blk(&nd_region->dev)) {
1983 dev = create_namespace_blk(nd_region, nd_label, count);
1984 if (IS_ERR(dev))
1985 goto err;
1986 devs[count++] = dev;
1987 } else {
1988 dev = create_namespace_pmem(nd_region, nd_label);
1989 if (IS_ERR(dev)) {
1990 switch (PTR_ERR(dev)) {
1991 case -EAGAIN:
1992 /* skip invalid labels */
1993 continue;
1994 case -ENODEV:
1995 /* fallthrough to seed creation */
1996 break;
1997 default:
1998 goto err;
1999 }
2000 } else
2001 devs[count++] = dev;
2002
2003 /* we only expect one valid pmem label set per region */
2004 break;
2005 }
Dan Williams1b40e092015-05-01 13:34:01 -04002006 }
2007
Dan Williams8a5f50d2016-09-22 15:42:59 -07002008 dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
2009 __func__, count, is_nd_blk(&nd_region->dev)
2010 ? "blk" : "pmem", count == 1 ? "" : "s");
Dan Williams1b40e092015-05-01 13:34:01 -04002011
2012 if (count == 0) {
2013 /* Publish a zero-sized namespace for userspace to configure. */
Dan Williamsae8219f2016-09-19 16:04:21 -07002014 nd_mapping_free_labels(nd_mapping);
Dan Williams1b40e092015-05-01 13:34:01 -04002015
2016 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2017 if (!devs)
2018 goto err;
Dan Williams8a5f50d2016-09-22 15:42:59 -07002019 if (is_nd_blk(&nd_region->dev)) {
2020 struct nd_namespace_blk *nsblk;
2021
2022 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2023 if (!nsblk)
2024 goto err;
2025 dev = &nsblk->common.dev;
2026 dev->type = &namespace_blk_device_type;
2027 } else {
2028 struct nd_namespace_pmem *nspm;
2029
2030 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2031 if (!nspm)
2032 goto err;
2033 dev = &nspm->nsio.common.dev;
2034 dev->type = &namespace_pmem_device_type;
Dan Williams0e3b0d12016-10-06 23:13:15 -07002035 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
Dan Williams8a5f50d2016-09-22 15:42:59 -07002036 }
Dan Williams1b40e092015-05-01 13:34:01 -04002037 dev->parent = &nd_region->dev;
2038 devs[count++] = dev;
Dan Williams8a5f50d2016-09-22 15:42:59 -07002039 } else if (is_nd_pmem(&nd_region->dev)) {
2040 /* clean unselected labels */
2041 for (i = 0; i < nd_region->ndr_mappings; i++) {
Dan Williams0e3b0d12016-10-06 23:13:15 -07002042 struct list_head *l, *e;
2043 LIST_HEAD(list);
2044 int j;
2045
Dan Williams8a5f50d2016-09-22 15:42:59 -07002046 nd_mapping = &nd_region->mapping[i];
2047 if (list_empty(&nd_mapping->labels)) {
2048 WARN_ON(1);
2049 continue;
2050 }
Dan Williams0e3b0d12016-10-06 23:13:15 -07002051
2052 j = count;
2053 list_for_each_safe(l, e, &nd_mapping->labels) {
2054 if (!j--)
2055 break;
2056 list_move_tail(l, &list);
2057 }
Dan Williams8a5f50d2016-09-22 15:42:59 -07002058 nd_mapping_free_labels(nd_mapping);
Dan Williams0e3b0d12016-10-06 23:13:15 -07002059 list_splice_init(&list, &nd_mapping->labels);
Dan Williams8a5f50d2016-09-22 15:42:59 -07002060 }
Dan Williams1b40e092015-05-01 13:34:01 -04002061 }
2062
Dan Williams6ff3e912016-10-05 14:04:15 -07002063 if (count > 1)
2064 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2065
Dan Williams1b40e092015-05-01 13:34:01 -04002066 return devs;
2067
Dan Williamsae8219f2016-09-19 16:04:21 -07002068 err:
Dan Williams8a5f50d2016-09-22 15:42:59 -07002069 for (i = 0; devs[i]; i++)
2070 if (is_nd_blk(&nd_region->dev))
2071 namespace_blk_release(devs[i]);
2072 else
2073 namespace_pmem_release(devs[i]);
Dan Williams1b40e092015-05-01 13:34:01 -04002074 kfree(devs);
2075 return NULL;
2076}
2077
Dan Williams8a5f50d2016-09-22 15:42:59 -07002078static struct device **create_namespaces(struct nd_region *nd_region)
Dan Williamsae8219f2016-09-19 16:04:21 -07002079{
2080 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2081 struct device **devs;
Dan Williams8a5f50d2016-09-22 15:42:59 -07002082 int i;
Dan Williamsae8219f2016-09-19 16:04:21 -07002083
2084 if (nd_region->ndr_mappings == 0)
2085 return NULL;
2086
Dan Williams8a5f50d2016-09-22 15:42:59 -07002087 /* lock down all mappings while we scan labels */
2088 for (i = 0; i < nd_region->ndr_mappings; i++) {
2089 nd_mapping = &nd_region->mapping[i];
2090 mutex_lock_nested(&nd_mapping->lock, i);
2091 }
2092
2093 devs = scan_labels(nd_region);
2094
2095 for (i = 0; i < nd_region->ndr_mappings; i++) {
2096 int reverse = nd_region->ndr_mappings - 1 - i;
2097
2098 nd_mapping = &nd_region->mapping[reverse];
2099 mutex_unlock(&nd_mapping->lock);
2100 }
Dan Williamsae8219f2016-09-19 16:04:21 -07002101
2102 return devs;
2103}
2104
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002105static int init_active_labels(struct nd_region *nd_region)
2106{
2107 int i;
2108
2109 for (i = 0; i < nd_region->ndr_mappings; i++) {
2110 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2111 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2112 struct nvdimm *nvdimm = nd_mapping->nvdimm;
Dan Williamsae8219f2016-09-19 16:04:21 -07002113 struct nd_label_ent *label_ent;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002114 int count, j;
2115
2116 /*
2117 * If the dimm is disabled then prevent the region from
2118 * being activated if it aliases DPA.
2119 */
2120 if (!ndd) {
2121 if ((nvdimm->flags & NDD_ALIASING) == 0)
2122 return 0;
2123 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
2124 dev_name(&nd_mapping->nvdimm->dev));
2125 return -ENXIO;
2126 }
2127 nd_mapping->ndd = ndd;
2128 atomic_inc(&nvdimm->busy);
2129 get_ndd(ndd);
2130
2131 count = nd_label_active_count(ndd);
2132 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
2133 if (!count)
2134 continue;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002135 for (j = 0; j < count; j++) {
2136 struct nd_namespace_label *label;
2137
Dan Williamsae8219f2016-09-19 16:04:21 -07002138 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2139 if (!label_ent)
2140 break;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002141 label = nd_label_active(ndd, j);
Dan Williamsae8219f2016-09-19 16:04:21 -07002142 label_ent->label = label;
2143
2144 mutex_lock(&nd_mapping->lock);
2145 list_add_tail(&label_ent->list, &nd_mapping->labels);
2146 mutex_unlock(&nd_mapping->lock);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002147 }
Dan Williamsae8219f2016-09-19 16:04:21 -07002148
2149 if (j >= count)
2150 continue;
2151
2152 mutex_lock(&nd_mapping->lock);
2153 nd_mapping_free_labels(nd_mapping);
2154 mutex_unlock(&nd_mapping->lock);
2155 return -ENOMEM;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002156 }
2157
2158 return 0;
2159}
2160
Dan Williams3d880022015-05-31 15:02:11 -04002161int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2162{
2163 struct device **devs = NULL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002164 int i, rc = 0, type;
Dan Williams3d880022015-05-31 15:02:11 -04002165
2166 *err = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002167 nvdimm_bus_lock(&nd_region->dev);
2168 rc = init_active_labels(nd_region);
2169 if (rc) {
2170 nvdimm_bus_unlock(&nd_region->dev);
2171 return rc;
2172 }
2173
2174 type = nd_region_to_nstype(nd_region);
2175 switch (type) {
Dan Williams3d880022015-05-31 15:02:11 -04002176 case ND_DEVICE_NAMESPACE_IO:
2177 devs = create_namespace_io(nd_region);
2178 break;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002179 case ND_DEVICE_NAMESPACE_PMEM:
Dan Williams1b40e092015-05-01 13:34:01 -04002180 case ND_DEVICE_NAMESPACE_BLK:
Dan Williams8a5f50d2016-09-22 15:42:59 -07002181 devs = create_namespaces(nd_region);
Dan Williams1b40e092015-05-01 13:34:01 -04002182 break;
Dan Williams3d880022015-05-31 15:02:11 -04002183 default:
2184 break;
2185 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04002186 nvdimm_bus_unlock(&nd_region->dev);
Dan Williams3d880022015-05-31 15:02:11 -04002187
2188 if (!devs)
2189 return -ENODEV;
2190
Dan Williams3d880022015-05-31 15:02:11 -04002191 for (i = 0; devs[i]; i++) {
2192 struct device *dev = devs[i];
Dan Williams1b40e092015-05-01 13:34:01 -04002193 int id;
Dan Williams3d880022015-05-31 15:02:11 -04002194
Dan Williams1b40e092015-05-01 13:34:01 -04002195 if (type == ND_DEVICE_NAMESPACE_BLK) {
2196 struct nd_namespace_blk *nsblk;
2197
2198 nsblk = to_nd_namespace_blk(dev);
2199 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2200 GFP_KERNEL);
2201 nsblk->id = id;
Dan Williams0e3b0d12016-10-06 23:13:15 -07002202 } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2203 struct nd_namespace_pmem *nspm;
2204
2205 nspm = to_nd_namespace_pmem(dev);
2206 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2207 GFP_KERNEL);
2208 nspm->id = id;
Dan Williams1b40e092015-05-01 13:34:01 -04002209 } else
2210 id = i;
2211
2212 if (id < 0)
2213 break;
2214 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
Dan Williams3d880022015-05-31 15:02:11 -04002215 dev->groups = nd_namespace_attribute_groups;
2216 nd_device_register(dev);
2217 }
Dan Williams1b40e092015-05-01 13:34:01 -04002218 if (i)
2219 nd_region->ns_seed = devs[0];
2220
2221 if (devs[i]) {
2222 int j;
2223
2224 for (j = i; devs[j]; j++) {
2225 struct device *dev = devs[j];
2226
2227 device_initialize(dev);
2228 put_device(dev);
2229 }
2230 *err = j - i;
2231 /*
2232 * All of the namespaces we tried to register failed, so
2233 * fail region activation.
2234 */
2235 if (*err == 0)
2236 rc = -ENODEV;
2237 }
Dan Williams3d880022015-05-31 15:02:11 -04002238 kfree(devs);
2239
Dan Williams1b40e092015-05-01 13:34:01 -04002240 if (rc == -ENODEV)
2241 return rc;
2242
Dan Williams3d880022015-05-31 15:02:11 -04002243 return i;
2244}