blob: b18ffea9d85be34558318bab8f1eee036f0ad725 [file] [log] [blame]
Dan Williams3d880022015-05-31 15:02:11 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/nd.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040017#include "nd-core.h"
Dan Williams3d880022015-05-31 15:02:11 -040018#include "nd.h"
19
20static void namespace_io_release(struct device *dev)
21{
22 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
23
24 kfree(nsio);
25}
26
Dan Williamsbf9bccc2015-06-17 17:14:46 -040027static void namespace_pmem_release(struct device *dev)
28{
29 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
30
31 kfree(nspm->alt_name);
32 kfree(nspm->uuid);
33 kfree(nspm);
34}
35
36static void namespace_blk_release(struct device *dev)
37{
Dan Williams1b40e092015-05-01 13:34:01 -040038 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
39 struct nd_region *nd_region = to_nd_region(dev->parent);
40
41 if (nsblk->id >= 0)
42 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
43 kfree(nsblk->alt_name);
44 kfree(nsblk->uuid);
45 kfree(nsblk->res);
46 kfree(nsblk);
Dan Williamsbf9bccc2015-06-17 17:14:46 -040047}
48
Dan Williams3d880022015-05-31 15:02:11 -040049static struct device_type namespace_io_device_type = {
50 .name = "nd_namespace_io",
51 .release = namespace_io_release,
52};
53
Dan Williamsbf9bccc2015-06-17 17:14:46 -040054static struct device_type namespace_pmem_device_type = {
55 .name = "nd_namespace_pmem",
56 .release = namespace_pmem_release,
57};
58
59static struct device_type namespace_blk_device_type = {
60 .name = "nd_namespace_blk",
61 .release = namespace_blk_release,
62};
63
64static bool is_namespace_pmem(struct device *dev)
65{
66 return dev ? dev->type == &namespace_pmem_device_type : false;
67}
68
69static bool is_namespace_blk(struct device *dev)
70{
71 return dev ? dev->type == &namespace_blk_device_type : false;
72}
73
74static bool is_namespace_io(struct device *dev)
75{
76 return dev ? dev->type == &namespace_io_device_type : false;
77}
78
Vishal Verma5212e112015-06-25 04:20:32 -040079const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
80 char *name)
81{
82 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
83 const char *suffix = "";
84
85 if (ndns->claim && is_nd_btt(ndns->claim))
86 suffix = "s";
87
88 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
89 sprintf(name, "pmem%d%s", nd_region->id, suffix);
90 else if (is_namespace_blk(&ndns->dev)) {
91 struct nd_namespace_blk *nsblk;
92
93 nsblk = to_nd_namespace_blk(&ndns->dev);
94 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
95 } else {
96 return NULL;
97 }
98
99 return name;
100}
101EXPORT_SYMBOL(nvdimm_namespace_disk_name);
102
Vishal Verma6ec68952015-07-29 14:58:09 -0600103const u8 *nd_dev_to_uuid(struct device *dev)
104{
105 static const u8 null_uuid[16];
106
107 if (!dev)
108 return null_uuid;
109
110 if (is_namespace_pmem(dev)) {
111 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
112
113 return nspm->uuid;
114 } else if (is_namespace_blk(dev)) {
115 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
116
117 return nsblk->uuid;
118 } else
119 return null_uuid;
120}
121EXPORT_SYMBOL(nd_dev_to_uuid);
122
Dan Williams3d880022015-05-31 15:02:11 -0400123static ssize_t nstype_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
125{
126 struct nd_region *nd_region = to_nd_region(dev->parent);
127
128 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
129}
130static DEVICE_ATTR_RO(nstype);
131
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400132static ssize_t __alt_name_store(struct device *dev, const char *buf,
133 const size_t len)
134{
135 char *input, *pos, *alt_name, **ns_altname;
136 ssize_t rc;
137
138 if (is_namespace_pmem(dev)) {
139 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
140
141 ns_altname = &nspm->alt_name;
142 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400143 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
144
145 ns_altname = &nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400146 } else
147 return -ENXIO;
148
Dan Williams8c2f7e82015-06-25 04:20:04 -0400149 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400150 return -EBUSY;
151
152 input = kmemdup(buf, len + 1, GFP_KERNEL);
153 if (!input)
154 return -ENOMEM;
155
156 input[len] = '\0';
157 pos = strim(input);
158 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
159 rc = -EINVAL;
160 goto out;
161 }
162
163 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
164 if (!alt_name) {
165 rc = -ENOMEM;
166 goto out;
167 }
168 kfree(*ns_altname);
169 *ns_altname = alt_name;
170 sprintf(*ns_altname, "%s", pos);
171 rc = len;
172
173out:
174 kfree(input);
175 return rc;
176}
177
Dan Williams1b40e092015-05-01 13:34:01 -0400178static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
179{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400180 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400181 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
182 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
183 struct nd_label_id label_id;
184 resource_size_t size = 0;
185 struct resource *res;
186
187 if (!nsblk->uuid)
188 return 0;
189 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
190 for_each_dpa_resource(ndd, res)
191 if (strcmp(res->name, label_id.id) == 0)
192 size += resource_size(res);
193 return size;
194}
195
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400196static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
197{
198 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
199 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
200 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
201 struct nd_label_id label_id;
202 struct resource *res;
203 int count, i;
204
205 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
206 return false;
207
208 count = 0;
209 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
210 for_each_dpa_resource(ndd, res) {
211 if (strcmp(res->name, label_id.id) != 0)
212 continue;
213 /*
214 * Resources with unacknoweldged adjustments indicate a
215 * failure to update labels
216 */
217 if (res->flags & DPA_RESOURCE_ADJUSTED)
218 return false;
219 count++;
220 }
221
222 /* These values match after a successful label update */
223 if (count != nsblk->num_resources)
224 return false;
225
226 for (i = 0; i < nsblk->num_resources; i++) {
227 struct resource *found = NULL;
228
229 for_each_dpa_resource(ndd, res)
230 if (res == nsblk->res[i]) {
231 found = res;
232 break;
233 }
234 /* stale resource */
235 if (!found)
236 return false;
237 }
238
239 return true;
240}
241
242resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
243{
244 resource_size_t size;
245
246 nvdimm_bus_lock(&nsblk->common.dev);
247 size = __nd_namespace_blk_validate(nsblk);
248 nvdimm_bus_unlock(&nsblk->common.dev);
249
250 return size;
251}
252EXPORT_SYMBOL(nd_namespace_blk_validate);
253
254
Dan Williamsf524bf22015-05-30 12:36:02 -0400255static int nd_namespace_label_update(struct nd_region *nd_region,
256 struct device *dev)
257{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400258 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
Dan Williamsf524bf22015-05-30 12:36:02 -0400259 "namespace must be idle during label update\n");
Dan Williams8c2f7e82015-06-25 04:20:04 -0400260 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsf524bf22015-05-30 12:36:02 -0400261 return 0;
262
263 /*
264 * Only allow label writes that will result in a valid namespace
265 * or deletion of an existing namespace.
266 */
267 if (is_namespace_pmem(dev)) {
268 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
Dan Williams0ba1c632015-05-30 12:35:36 -0400269 resource_size_t size = resource_size(&nspm->nsio.res);
Dan Williamsf524bf22015-05-30 12:36:02 -0400270
271 if (size == 0 && nspm->uuid)
272 /* delete allocation */;
273 else if (!nspm->uuid)
274 return 0;
275
276 return nd_pmem_namespace_label_update(nd_region, nspm, size);
277 } else if (is_namespace_blk(dev)) {
Dan Williams0ba1c632015-05-30 12:35:36 -0400278 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
279 resource_size_t size = nd_namespace_blk_size(nsblk);
280
281 if (size == 0 && nsblk->uuid)
282 /* delete allocation */;
283 else if (!nsblk->uuid || !nsblk->lbasize)
284 return 0;
285
286 return nd_blk_namespace_label_update(nd_region, nsblk, size);
Dan Williamsf524bf22015-05-30 12:36:02 -0400287 } else
288 return -ENXIO;
289}
290
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400291static ssize_t alt_name_store(struct device *dev,
292 struct device_attribute *attr, const char *buf, size_t len)
293{
Dan Williamsf524bf22015-05-30 12:36:02 -0400294 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400295 ssize_t rc;
296
297 device_lock(dev);
298 nvdimm_bus_lock(dev);
299 wait_nvdimm_bus_probe_idle(dev);
300 rc = __alt_name_store(dev, buf, len);
Dan Williamsf524bf22015-05-30 12:36:02 -0400301 if (rc >= 0)
302 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400303 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
304 nvdimm_bus_unlock(dev);
305 device_unlock(dev);
306
Dan Williamsf524bf22015-05-30 12:36:02 -0400307 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400308}
309
310static ssize_t alt_name_show(struct device *dev,
311 struct device_attribute *attr, char *buf)
312{
313 char *ns_altname;
314
315 if (is_namespace_pmem(dev)) {
316 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
317
318 ns_altname = nspm->alt_name;
319 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400320 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
321
322 ns_altname = nsblk->alt_name;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400323 } else
324 return -ENXIO;
325
326 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
327}
328static DEVICE_ATTR_RW(alt_name);
329
330static int scan_free(struct nd_region *nd_region,
331 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
332 resource_size_t n)
333{
334 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
335 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
336 int rc = 0;
337
338 while (n) {
339 struct resource *res, *last;
340 resource_size_t new_start;
341
342 last = NULL;
343 for_each_dpa_resource(ndd, res)
344 if (strcmp(res->name, label_id->id) == 0)
345 last = res;
346 res = last;
347 if (!res)
348 return 0;
349
350 if (n >= resource_size(res)) {
351 n -= resource_size(res);
352 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
353 nvdimm_free_dpa(ndd, res);
354 /* retry with last resource deleted */
355 continue;
356 }
357
358 /*
359 * Keep BLK allocations relegated to high DPA as much as
360 * possible
361 */
362 if (is_blk)
363 new_start = res->start + n;
364 else
365 new_start = res->start;
366
367 rc = adjust_resource(res, new_start, resource_size(res) - n);
Dan Williams1b40e092015-05-01 13:34:01 -0400368 if (rc == 0)
369 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400370 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
371 break;
372 }
373
374 return rc;
375}
376
377/**
378 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
379 * @nd_region: the set of dimms to reclaim @n bytes from
380 * @label_id: unique identifier for the namespace consuming this dpa range
381 * @n: number of bytes per-dimm to release
382 *
383 * Assumes resources are ordered. Starting from the end try to
384 * adjust_resource() the allocation to @n, but if @n is larger than the
385 * allocation delete it and find the 'new' last allocation in the label
386 * set.
387 */
388static int shrink_dpa_allocation(struct nd_region *nd_region,
389 struct nd_label_id *label_id, resource_size_t n)
390{
391 int i;
392
393 for (i = 0; i < nd_region->ndr_mappings; i++) {
394 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
395 int rc;
396
397 rc = scan_free(nd_region, nd_mapping, label_id, n);
398 if (rc)
399 return rc;
400 }
401
402 return 0;
403}
404
405static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
406 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
407 resource_size_t n)
408{
409 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
410 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
411 resource_size_t first_dpa;
412 struct resource *res;
413 int rc = 0;
414
415 /* allocate blk from highest dpa first */
416 if (is_blk)
417 first_dpa = nd_mapping->start + nd_mapping->size - n;
418 else
419 first_dpa = nd_mapping->start;
420
421 /* first resource allocation for this label-id or dimm */
422 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
423 if (!res)
424 rc = -EBUSY;
425
426 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
427 return rc ? n : 0;
428}
429
Dan Williams1b40e092015-05-01 13:34:01 -0400430static bool space_valid(bool is_pmem, bool is_reserve,
431 struct nd_label_id *label_id, struct resource *res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400432{
433 /*
434 * For BLK-space any space is valid, for PMEM-space, it must be
Dan Williams1b40e092015-05-01 13:34:01 -0400435 * contiguous with an existing allocation unless we are
436 * reserving pmem.
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400437 */
Dan Williams1b40e092015-05-01 13:34:01 -0400438 if (is_reserve || !is_pmem)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400439 return true;
440 if (!res || strcmp(res->name, label_id->id) == 0)
441 return true;
442 return false;
443}
444
445enum alloc_loc {
446 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
447};
448
449static resource_size_t scan_allocate(struct nd_region *nd_region,
450 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
451 resource_size_t n)
452{
453 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
Dan Williams1b40e092015-05-01 13:34:01 -0400454 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400455 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
456 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
457 const resource_size_t to_allocate = n;
458 struct resource *res;
459 int first;
460
461 retry:
462 first = 0;
463 for_each_dpa_resource(ndd, res) {
464 resource_size_t allocate, available = 0, free_start, free_end;
465 struct resource *next = res->sibling, *new_res = NULL;
466 enum alloc_loc loc = ALLOC_ERR;
467 const char *action;
468 int rc = 0;
469
470 /* ignore resources outside this nd_mapping */
471 if (res->start > mapping_end)
472 continue;
473 if (res->end < nd_mapping->start)
474 continue;
475
476 /* space at the beginning of the mapping */
477 if (!first++ && res->start > nd_mapping->start) {
478 free_start = nd_mapping->start;
479 available = res->start - free_start;
Dan Williams1b40e092015-05-01 13:34:01 -0400480 if (space_valid(is_pmem, is_reserve, label_id, NULL))
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400481 loc = ALLOC_BEFORE;
482 }
483
484 /* space between allocations */
485 if (!loc && next) {
486 free_start = res->start + resource_size(res);
487 free_end = min(mapping_end, next->start - 1);
Dan Williams1b40e092015-05-01 13:34:01 -0400488 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400489 && free_start < free_end) {
490 available = free_end + 1 - free_start;
491 loc = ALLOC_MID;
492 }
493 }
494
495 /* space at the end of the mapping */
496 if (!loc && !next) {
497 free_start = res->start + resource_size(res);
498 free_end = mapping_end;
Dan Williams1b40e092015-05-01 13:34:01 -0400499 if (space_valid(is_pmem, is_reserve, label_id, res)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400500 && free_start < free_end) {
501 available = free_end + 1 - free_start;
502 loc = ALLOC_AFTER;
503 }
504 }
505
506 if (!loc || !available)
507 continue;
508 allocate = min(available, n);
509 switch (loc) {
510 case ALLOC_BEFORE:
511 if (strcmp(res->name, label_id->id) == 0) {
512 /* adjust current resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400513 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400514 return n;
515 rc = adjust_resource(res, res->start - allocate,
516 resource_size(res) + allocate);
517 action = "cur grow up";
518 } else
519 action = "allocate";
520 break;
521 case ALLOC_MID:
522 if (strcmp(next->name, label_id->id) == 0) {
523 /* adjust next resource up */
Dan Williams1b40e092015-05-01 13:34:01 -0400524 if (is_pmem && !is_reserve)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400525 return n;
526 rc = adjust_resource(next, next->start
527 - allocate, resource_size(next)
528 + allocate);
529 new_res = next;
530 action = "next grow up";
531 } else if (strcmp(res->name, label_id->id) == 0) {
532 action = "grow down";
533 } else
534 action = "allocate";
535 break;
536 case ALLOC_AFTER:
537 if (strcmp(res->name, label_id->id) == 0)
538 action = "grow down";
539 else
540 action = "allocate";
541 break;
542 default:
543 return n;
544 }
545
546 if (strcmp(action, "allocate") == 0) {
547 /* BLK allocate bottom up */
548 if (!is_pmem)
549 free_start += available - allocate;
Dan Williams1b40e092015-05-01 13:34:01 -0400550 else if (!is_reserve && free_start != nd_mapping->start)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400551 return n;
552
553 new_res = nvdimm_allocate_dpa(ndd, label_id,
554 free_start, allocate);
555 if (!new_res)
556 rc = -EBUSY;
557 } else if (strcmp(action, "grow down") == 0) {
558 /* adjust current resource down */
559 rc = adjust_resource(res, res->start, resource_size(res)
560 + allocate);
Dan Williams1b40e092015-05-01 13:34:01 -0400561 if (rc == 0)
562 res->flags |= DPA_RESOURCE_ADJUSTED;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400563 }
564
565 if (!new_res)
566 new_res = res;
567
568 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
569 action, loc, rc);
570
571 if (rc)
572 return n;
573
574 n -= allocate;
575 if (n) {
576 /*
577 * Retry scan with newly inserted resources.
578 * For example, if we did an ALLOC_BEFORE
579 * insertion there may also have been space
580 * available for an ALLOC_AFTER insertion, so we
581 * need to check this same resource again
582 */
583 goto retry;
584 } else
585 return 0;
586 }
587
Dan Williams1b40e092015-05-01 13:34:01 -0400588 /*
589 * If we allocated nothing in the BLK case it may be because we are in
590 * an initial "pmem-reserve pass". Only do an initial BLK allocation
591 * when none of the DPA space is reserved.
592 */
593 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400594 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
595 return n;
596}
597
Dan Williams1b40e092015-05-01 13:34:01 -0400598static int merge_dpa(struct nd_region *nd_region,
599 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
600{
601 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
602 struct resource *res;
603
604 if (strncmp("pmem", label_id->id, 4) == 0)
605 return 0;
606 retry:
607 for_each_dpa_resource(ndd, res) {
608 int rc;
609 struct resource *next = res->sibling;
610 resource_size_t end = res->start + resource_size(res);
611
612 if (!next || strcmp(res->name, label_id->id) != 0
613 || strcmp(next->name, label_id->id) != 0
614 || end != next->start)
615 continue;
616 end += resource_size(next);
617 nvdimm_free_dpa(ndd, next);
618 rc = adjust_resource(res, res->start, end - res->start);
619 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
620 if (rc)
621 return rc;
622 res->flags |= DPA_RESOURCE_ADJUSTED;
623 goto retry;
624 }
625
626 return 0;
627}
628
629static int __reserve_free_pmem(struct device *dev, void *data)
630{
631 struct nvdimm *nvdimm = data;
632 struct nd_region *nd_region;
633 struct nd_label_id label_id;
634 int i;
635
636 if (!is_nd_pmem(dev))
637 return 0;
638
639 nd_region = to_nd_region(dev);
640 if (nd_region->ndr_mappings == 0)
641 return 0;
642
643 memset(&label_id, 0, sizeof(label_id));
644 strcat(label_id.id, "pmem-reserve");
645 for (i = 0; i < nd_region->ndr_mappings; i++) {
646 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
647 resource_size_t n, rem = 0;
648
649 if (nd_mapping->nvdimm != nvdimm)
650 continue;
651
652 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
653 if (n == 0)
654 return 0;
655 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
656 dev_WARN_ONCE(&nd_region->dev, rem,
657 "pmem reserve underrun: %#llx of %#llx bytes\n",
658 (unsigned long long) n - rem,
659 (unsigned long long) n);
660 return rem ? -ENXIO : 0;
661 }
662
663 return 0;
664}
665
666static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
667 struct nd_mapping *nd_mapping)
668{
669 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
670 struct resource *res, *_res;
671
672 for_each_dpa_resource_safe(ndd, res, _res)
673 if (strcmp(res->name, "pmem-reserve") == 0)
674 nvdimm_free_dpa(ndd, res);
675}
676
677static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
678 struct nd_mapping *nd_mapping)
679{
680 struct nvdimm *nvdimm = nd_mapping->nvdimm;
681 int rc;
682
683 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
684 __reserve_free_pmem);
685 if (rc)
686 release_free_pmem(nvdimm_bus, nd_mapping);
687 return rc;
688}
689
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400690/**
691 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
692 * @nd_region: the set of dimms to allocate @n more bytes from
693 * @label_id: unique identifier for the namespace consuming this dpa range
694 * @n: number of bytes per-dimm to add to the existing allocation
695 *
696 * Assumes resources are ordered. For BLK regions, first consume
697 * BLK-only available DPA free space, then consume PMEM-aliased DPA
698 * space starting at the highest DPA. For PMEM regions start
699 * allocations from the start of an interleave set and end at the first
700 * BLK allocation or the end of the interleave set, whichever comes
701 * first.
702 */
703static int grow_dpa_allocation(struct nd_region *nd_region,
704 struct nd_label_id *label_id, resource_size_t n)
705{
Dan Williams1b40e092015-05-01 13:34:01 -0400706 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
707 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400708 int i;
709
710 for (i = 0; i < nd_region->ndr_mappings; i++) {
711 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williams1b40e092015-05-01 13:34:01 -0400712 resource_size_t rem = n;
713 int rc, j;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400714
Dan Williams1b40e092015-05-01 13:34:01 -0400715 /*
716 * In the BLK case try once with all unallocated PMEM
717 * reserved, and once without
718 */
719 for (j = is_pmem; j < 2; j++) {
720 bool blk_only = j == 0;
721
722 if (blk_only) {
723 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
724 if (rc)
725 return rc;
726 }
727 rem = scan_allocate(nd_region, nd_mapping,
728 label_id, rem);
729 if (blk_only)
730 release_free_pmem(nvdimm_bus, nd_mapping);
731
732 /* try again and allow encroachments into PMEM */
733 if (rem == 0)
734 break;
735 }
736
737 dev_WARN_ONCE(&nd_region->dev, rem,
738 "allocation underrun: %#llx of %#llx bytes\n",
739 (unsigned long long) n - rem,
740 (unsigned long long) n);
741 if (rem)
742 return -ENXIO;
743
744 rc = merge_dpa(nd_region, nd_mapping, label_id);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400745 if (rc)
746 return rc;
747 }
748
749 return 0;
750}
751
752static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
753 struct nd_namespace_pmem *nspm, resource_size_t size)
754{
755 struct resource *res = &nspm->nsio.res;
756
757 res->start = nd_region->ndr_start;
758 res->end = nd_region->ndr_start + size - 1;
759}
760
761static ssize_t __size_store(struct device *dev, unsigned long long val)
762{
763 resource_size_t allocated = 0, available = 0;
764 struct nd_region *nd_region = to_nd_region(dev->parent);
765 struct nd_mapping *nd_mapping;
766 struct nvdimm_drvdata *ndd;
767 struct nd_label_id label_id;
768 u32 flags = 0, remainder;
769 u8 *uuid = NULL;
770 int rc, i;
771
Dan Williams8c2f7e82015-06-25 04:20:04 -0400772 if (dev->driver || to_ndns(dev)->claim)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400773 return -EBUSY;
774
775 if (is_namespace_pmem(dev)) {
776 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
777
778 uuid = nspm->uuid;
779 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400780 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
781
782 uuid = nsblk->uuid;
783 flags = NSLABEL_FLAG_LOCAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400784 }
785
786 /*
787 * We need a uuid for the allocation-label and dimm(s) on which
788 * to store the label.
789 */
790 if (!uuid || nd_region->ndr_mappings == 0)
791 return -ENXIO;
792
793 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
794 if (remainder) {
795 dev_dbg(dev, "%llu is not %dK aligned\n", val,
796 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
797 return -EINVAL;
798 }
799
800 nd_label_gen_id(&label_id, uuid, flags);
801 for (i = 0; i < nd_region->ndr_mappings; i++) {
802 nd_mapping = &nd_region->mapping[i];
803 ndd = to_ndd(nd_mapping);
804
805 /*
806 * All dimms in an interleave set, or the base dimm for a blk
807 * region, need to be enabled for the size to be changed.
808 */
809 if (!ndd)
810 return -ENXIO;
811
812 allocated += nvdimm_allocated_dpa(ndd, &label_id);
813 }
814 available = nd_region_available_dpa(nd_region);
815
816 if (val > available + allocated)
817 return -ENOSPC;
818
819 if (val == allocated)
820 return 0;
821
822 val = div_u64(val, nd_region->ndr_mappings);
823 allocated = div_u64(allocated, nd_region->ndr_mappings);
824 if (val < allocated)
825 rc = shrink_dpa_allocation(nd_region, &label_id,
826 allocated - val);
827 else
828 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
829
830 if (rc)
831 return rc;
832
833 if (is_namespace_pmem(dev)) {
834 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
835
836 nd_namespace_pmem_set_size(nd_region, nspm,
837 val * nd_region->ndr_mappings);
Dan Williams1b40e092015-05-01 13:34:01 -0400838 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400839 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
840
Dan Williams1b40e092015-05-01 13:34:01 -0400841 /*
842 * Try to delete the namespace if we deleted all of its
Dan Williams8c2f7e82015-06-25 04:20:04 -0400843 * allocation, this is not the seed device for the
844 * region, and it is not actively claimed by a btt
845 * instance.
Dan Williams1b40e092015-05-01 13:34:01 -0400846 */
Dan Williams8c2f7e82015-06-25 04:20:04 -0400847 if (val == 0 && nd_region->ns_seed != dev
848 && !nsblk->common.claim)
Dan Williams1b40e092015-05-01 13:34:01 -0400849 nd_device_unregister(dev, ND_ASYNC);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400850 }
851
852 return rc;
853}
854
855static ssize_t size_store(struct device *dev,
856 struct device_attribute *attr, const char *buf, size_t len)
857{
Dan Williamsf524bf22015-05-30 12:36:02 -0400858 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400859 unsigned long long val;
860 u8 **uuid = NULL;
861 int rc;
862
863 rc = kstrtoull(buf, 0, &val);
864 if (rc)
865 return rc;
866
867 device_lock(dev);
868 nvdimm_bus_lock(dev);
869 wait_nvdimm_bus_probe_idle(dev);
870 rc = __size_store(dev, val);
Dan Williamsf524bf22015-05-30 12:36:02 -0400871 if (rc >= 0)
872 rc = nd_namespace_label_update(nd_region, dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400873
874 if (is_namespace_pmem(dev)) {
875 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
876
877 uuid = &nspm->uuid;
878 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400879 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
880
881 uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400882 }
883
884 if (rc == 0 && val == 0 && uuid) {
885 /* setting size zero == 'delete namespace' */
886 kfree(*uuid);
887 *uuid = NULL;
888 }
889
890 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
891 ? "fail" : "success", rc);
892
893 nvdimm_bus_unlock(dev);
894 device_unlock(dev);
895
Dan Williamsf524bf22015-05-30 12:36:02 -0400896 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400897}
898
Dan Williams8c2f7e82015-06-25 04:20:04 -0400899resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400900{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400901 struct device *dev = &ndns->dev;
Dan Williams1b40e092015-05-01 13:34:01 -0400902
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400903 if (is_namespace_pmem(dev)) {
904 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
905
Dan Williams8c2f7e82015-06-25 04:20:04 -0400906 return resource_size(&nspm->nsio.res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400907 } else if (is_namespace_blk(dev)) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400908 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400909 } else if (is_namespace_io(dev)) {
910 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
911
Dan Williams8c2f7e82015-06-25 04:20:04 -0400912 return resource_size(&nsio->res);
913 } else
914 WARN_ONCE(1, "unknown namespace type\n");
915 return 0;
916}
Dan Williams1b40e092015-05-01 13:34:01 -0400917
Dan Williams8c2f7e82015-06-25 04:20:04 -0400918resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
919{
920 resource_size_t size;
921
922 nvdimm_bus_lock(&ndns->dev);
923 size = __nvdimm_namespace_capacity(ndns);
924 nvdimm_bus_unlock(&ndns->dev);
925
926 return size;
927}
928EXPORT_SYMBOL(nvdimm_namespace_capacity);
929
930static ssize_t size_show(struct device *dev,
931 struct device_attribute *attr, char *buf)
932{
933 return sprintf(buf, "%llu\n", (unsigned long long)
934 nvdimm_namespace_capacity(to_ndns(dev)));
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400935}
936static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
937
938static ssize_t uuid_show(struct device *dev,
939 struct device_attribute *attr, char *buf)
940{
941 u8 *uuid;
942
943 if (is_namespace_pmem(dev)) {
944 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
945
946 uuid = nspm->uuid;
947 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400948 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
949
950 uuid = nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400951 } else
952 return -ENXIO;
953
954 if (uuid)
955 return sprintf(buf, "%pUb\n", uuid);
956 return sprintf(buf, "\n");
957}
958
959/**
960 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
961 * @nd_region: parent region so we can updates all dimms in the set
962 * @dev: namespace type for generating label_id
963 * @new_uuid: incoming uuid
964 * @old_uuid: reference to the uuid storage location in the namespace object
965 */
966static int namespace_update_uuid(struct nd_region *nd_region,
967 struct device *dev, u8 *new_uuid, u8 **old_uuid)
968{
969 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
970 struct nd_label_id old_label_id;
971 struct nd_label_id new_label_id;
Dan Williamsf524bf22015-05-30 12:36:02 -0400972 int i;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400973
Dan Williamsf524bf22015-05-30 12:36:02 -0400974 if (!nd_is_uuid_unique(dev, new_uuid))
975 return -EINVAL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400976
977 if (*old_uuid == NULL)
978 goto out;
979
Dan Williamsf524bf22015-05-30 12:36:02 -0400980 /*
981 * If we've already written a label with this uuid, then it's
982 * too late to rename because we can't reliably update the uuid
983 * without losing the old namespace. Userspace must delete this
984 * namespace to abandon the old uuid.
985 */
986 for (i = 0; i < nd_region->ndr_mappings; i++) {
987 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
988
989 /*
990 * This check by itself is sufficient because old_uuid
991 * would be NULL above if this uuid did not exist in the
992 * currently written set.
993 *
994 * FIXME: can we delete uuid with zero dpa allocated?
995 */
996 if (nd_mapping->labels)
997 return -EBUSY;
998 }
999
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001000 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1001 nd_label_gen_id(&new_label_id, new_uuid, flags);
1002 for (i = 0; i < nd_region->ndr_mappings; i++) {
1003 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1004 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1005 struct resource *res;
1006
1007 for_each_dpa_resource(ndd, res)
1008 if (strcmp(res->name, old_label_id.id) == 0)
1009 sprintf((void *) res->name, "%s",
1010 new_label_id.id);
1011 }
1012 kfree(*old_uuid);
1013 out:
1014 *old_uuid = new_uuid;
1015 return 0;
1016}
1017
1018static ssize_t uuid_store(struct device *dev,
1019 struct device_attribute *attr, const char *buf, size_t len)
1020{
1021 struct nd_region *nd_region = to_nd_region(dev->parent);
1022 u8 *uuid = NULL;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001023 ssize_t rc = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001024 u8 **ns_uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001025
1026 if (is_namespace_pmem(dev)) {
1027 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1028
1029 ns_uuid = &nspm->uuid;
1030 } else if (is_namespace_blk(dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -04001031 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1032
1033 ns_uuid = &nsblk->uuid;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001034 } else
1035 return -ENXIO;
1036
1037 device_lock(dev);
1038 nvdimm_bus_lock(dev);
1039 wait_nvdimm_bus_probe_idle(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001040 if (to_ndns(dev)->claim)
1041 rc = -EBUSY;
1042 if (rc >= 0)
1043 rc = nd_uuid_store(dev, &uuid, buf, len);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001044 if (rc >= 0)
1045 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
Dan Williamsf524bf22015-05-30 12:36:02 -04001046 if (rc >= 0)
1047 rc = nd_namespace_label_update(nd_region, dev);
1048 else
1049 kfree(uuid);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001050 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1051 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1052 nvdimm_bus_unlock(dev);
1053 device_unlock(dev);
1054
Dan Williamsf524bf22015-05-30 12:36:02 -04001055 return rc < 0 ? rc : len;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001056}
1057static DEVICE_ATTR_RW(uuid);
1058
1059static ssize_t resource_show(struct device *dev,
1060 struct device_attribute *attr, char *buf)
1061{
1062 struct resource *res;
1063
1064 if (is_namespace_pmem(dev)) {
1065 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1066
1067 res = &nspm->nsio.res;
1068 } else if (is_namespace_io(dev)) {
1069 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1070
1071 res = &nsio->res;
1072 } else
1073 return -ENXIO;
1074
1075 /* no address to convey if the namespace has no allocation */
1076 if (resource_size(res) == 0)
1077 return -ENXIO;
1078 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1079}
1080static DEVICE_ATTR_RO(resource);
1081
Vishal Vermafcae6952015-06-25 04:22:39 -04001082static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1083 4096, 4104, 4160, 4224, 0 };
Dan Williams1b40e092015-05-01 13:34:01 -04001084
1085static ssize_t sector_size_show(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1087{
1088 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1089
1090 if (!is_namespace_blk(dev))
1091 return -ENXIO;
1092
1093 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1094}
1095
1096static ssize_t sector_size_store(struct device *dev,
1097 struct device_attribute *attr, const char *buf, size_t len)
1098{
1099 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
Dan Williamsf524bf22015-05-30 12:36:02 -04001100 struct nd_region *nd_region = to_nd_region(dev->parent);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001101 ssize_t rc = 0;
Dan Williams1b40e092015-05-01 13:34:01 -04001102
1103 if (!is_namespace_blk(dev))
1104 return -ENXIO;
1105
1106 device_lock(dev);
1107 nvdimm_bus_lock(dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001108 if (to_ndns(dev)->claim)
1109 rc = -EBUSY;
1110 if (rc >= 0)
1111 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1112 ns_lbasize_supported);
Dan Williamsf524bf22015-05-30 12:36:02 -04001113 if (rc >= 0)
1114 rc = nd_namespace_label_update(nd_region, dev);
1115 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1116 rc, rc < 0 ? "tried" : "wrote", buf,
1117 buf[len - 1] == '\n' ? "" : "\n");
Dan Williams1b40e092015-05-01 13:34:01 -04001118 nvdimm_bus_unlock(dev);
1119 device_unlock(dev);
1120
1121 return rc ? rc : len;
1122}
1123static DEVICE_ATTR_RW(sector_size);
1124
Dan Williams0ba1c632015-05-30 12:35:36 -04001125static ssize_t dpa_extents_show(struct device *dev,
1126 struct device_attribute *attr, char *buf)
1127{
1128 struct nd_region *nd_region = to_nd_region(dev->parent);
1129 struct nd_label_id label_id;
1130 int count = 0, i;
1131 u8 *uuid = NULL;
1132 u32 flags = 0;
1133
1134 nvdimm_bus_lock(dev);
1135 if (is_namespace_pmem(dev)) {
1136 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1137
1138 uuid = nspm->uuid;
1139 flags = 0;
1140 } else if (is_namespace_blk(dev)) {
1141 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1142
1143 uuid = nsblk->uuid;
1144 flags = NSLABEL_FLAG_LOCAL;
1145 }
1146
1147 if (!uuid)
1148 goto out;
1149
1150 nd_label_gen_id(&label_id, uuid, flags);
1151 for (i = 0; i < nd_region->ndr_mappings; i++) {
1152 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1153 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1154 struct resource *res;
1155
1156 for_each_dpa_resource(ndd, res)
1157 if (strcmp(res->name, label_id.id) == 0)
1158 count++;
1159 }
1160 out:
1161 nvdimm_bus_unlock(dev);
1162
1163 return sprintf(buf, "%d\n", count);
1164}
1165static DEVICE_ATTR_RO(dpa_extents);
1166
Dan Williams8c2f7e82015-06-25 04:20:04 -04001167static ssize_t holder_show(struct device *dev,
1168 struct device_attribute *attr, char *buf)
1169{
1170 struct nd_namespace_common *ndns = to_ndns(dev);
1171 ssize_t rc;
1172
1173 device_lock(dev);
1174 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1175 device_unlock(dev);
1176
1177 return rc;
1178}
1179static DEVICE_ATTR_RO(holder);
1180
1181static ssize_t force_raw_store(struct device *dev,
1182 struct device_attribute *attr, const char *buf, size_t len)
1183{
1184 bool force_raw;
1185 int rc = strtobool(buf, &force_raw);
1186
1187 if (rc)
1188 return rc;
1189
1190 to_ndns(dev)->force_raw = force_raw;
1191 return len;
1192}
1193
1194static ssize_t force_raw_show(struct device *dev,
1195 struct device_attribute *attr, char *buf)
1196{
1197 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1198}
1199static DEVICE_ATTR_RW(force_raw);
1200
Dan Williams3d880022015-05-31 15:02:11 -04001201static struct attribute *nd_namespace_attributes[] = {
1202 &dev_attr_nstype.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001203 &dev_attr_size.attr,
1204 &dev_attr_uuid.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001205 &dev_attr_holder.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001206 &dev_attr_resource.attr,
1207 &dev_attr_alt_name.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -04001208 &dev_attr_force_raw.attr,
Dan Williams1b40e092015-05-01 13:34:01 -04001209 &dev_attr_sector_size.attr,
Dan Williams0ba1c632015-05-30 12:35:36 -04001210 &dev_attr_dpa_extents.attr,
Dan Williams3d880022015-05-31 15:02:11 -04001211 NULL,
1212};
1213
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001214static umode_t namespace_visible(struct kobject *kobj,
1215 struct attribute *a, int n)
1216{
1217 struct device *dev = container_of(kobj, struct device, kobj);
1218
1219 if (a == &dev_attr_resource.attr) {
1220 if (is_namespace_blk(dev))
1221 return 0;
1222 return a->mode;
1223 }
1224
1225 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1226 if (a == &dev_attr_size.attr)
1227 return S_IWUSR | S_IRUGO;
Dan Williams1b40e092015-05-01 13:34:01 -04001228
1229 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1230 return 0;
1231
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001232 return a->mode;
1233 }
1234
Dan Williams8c2f7e82015-06-25 04:20:04 -04001235 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1236 || a == &dev_attr_holder.attr
1237 || a == &dev_attr_force_raw.attr)
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001238 return a->mode;
1239
1240 return 0;
1241}
1242
Dan Williams3d880022015-05-31 15:02:11 -04001243static struct attribute_group nd_namespace_attribute_group = {
1244 .attrs = nd_namespace_attributes,
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001245 .is_visible = namespace_visible,
Dan Williams3d880022015-05-31 15:02:11 -04001246};
1247
1248static const struct attribute_group *nd_namespace_attribute_groups[] = {
1249 &nd_device_attribute_group,
1250 &nd_namespace_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001251 &nd_numa_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001252 NULL,
1253};
1254
Dan Williams8c2f7e82015-06-25 04:20:04 -04001255struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1256{
1257 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1258 struct nd_namespace_common *ndns;
1259 resource_size_t size;
1260
1261 if (nd_btt) {
1262 ndns = nd_btt->ndns;
1263 if (!ndns)
1264 return ERR_PTR(-ENODEV);
1265
1266 /*
1267 * Flush any in-progess probes / removals in the driver
1268 * for the raw personality of this namespace.
1269 */
1270 device_lock(&ndns->dev);
1271 device_unlock(&ndns->dev);
1272 if (ndns->dev.driver) {
1273 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1274 dev_name(&nd_btt->dev));
1275 return ERR_PTR(-EBUSY);
1276 }
1277 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev,
1278 "host (%s) vs claim (%s) mismatch\n",
1279 dev_name(&nd_btt->dev),
1280 dev_name(ndns->claim)))
1281 return ERR_PTR(-ENXIO);
1282 } else {
1283 ndns = to_ndns(dev);
1284 if (ndns->claim) {
1285 dev_dbg(dev, "claimed by %s, failing probe\n",
1286 dev_name(ndns->claim));
1287
1288 return ERR_PTR(-ENXIO);
1289 }
1290 }
1291
1292 size = nvdimm_namespace_capacity(ndns);
1293 if (size < ND_MIN_NAMESPACE_SIZE) {
1294 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1295 &size, ND_MIN_NAMESPACE_SIZE);
1296 return ERR_PTR(-ENODEV);
1297 }
1298
1299 if (is_namespace_pmem(&ndns->dev)) {
1300 struct nd_namespace_pmem *nspm;
1301
1302 nspm = to_nd_namespace_pmem(&ndns->dev);
1303 if (!nspm->uuid) {
1304 dev_dbg(&ndns->dev, "%s: uuid not set\n", __func__);
1305 return ERR_PTR(-ENODEV);
1306 }
1307 } else if (is_namespace_blk(&ndns->dev)) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001308 struct nd_namespace_blk *nsblk;
1309
1310 nsblk = to_nd_namespace_blk(&ndns->dev);
1311 if (!nd_namespace_blk_validate(nsblk))
1312 return ERR_PTR(-ENODEV);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001313 }
1314
1315 return ndns;
1316}
1317EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1318
Dan Williams3d880022015-05-31 15:02:11 -04001319static struct device **create_namespace_io(struct nd_region *nd_region)
1320{
1321 struct nd_namespace_io *nsio;
1322 struct device *dev, **devs;
1323 struct resource *res;
1324
1325 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1326 if (!nsio)
1327 return NULL;
1328
1329 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1330 if (!devs) {
1331 kfree(nsio);
1332 return NULL;
1333 }
1334
Dan Williams8c2f7e82015-06-25 04:20:04 -04001335 dev = &nsio->common.dev;
Dan Williams3d880022015-05-31 15:02:11 -04001336 dev->type = &namespace_io_device_type;
1337 dev->parent = &nd_region->dev;
1338 res = &nsio->res;
1339 res->name = dev_name(&nd_region->dev);
1340 res->flags = IORESOURCE_MEM;
1341 res->start = nd_region->ndr_start;
1342 res->end = res->start + nd_region->ndr_size - 1;
1343
1344 devs[0] = dev;
1345 return devs;
1346}
1347
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001348static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1349 u64 cookie, u16 pos)
1350{
1351 struct nd_namespace_label *found = NULL;
1352 int i;
1353
1354 for (i = 0; i < nd_region->ndr_mappings; i++) {
1355 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1356 struct nd_namespace_label *nd_label;
1357 bool found_uuid = false;
1358 int l;
1359
1360 for_each_label(l, nd_label, nd_mapping->labels) {
1361 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1362 u16 position = __le16_to_cpu(nd_label->position);
1363 u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1364
1365 if (isetcookie != cookie)
1366 continue;
1367
1368 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1369 continue;
1370
1371 if (found_uuid) {
1372 dev_dbg(to_ndd(nd_mapping)->dev,
1373 "%s duplicate entry for uuid\n",
1374 __func__);
1375 return false;
1376 }
1377 found_uuid = true;
1378 if (nlabel != nd_region->ndr_mappings)
1379 continue;
1380 if (position != pos)
1381 continue;
1382 found = nd_label;
1383 break;
1384 }
1385 if (found)
1386 break;
1387 }
1388 return found != NULL;
1389}
1390
1391static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1392{
1393 struct nd_namespace_label *select = NULL;
1394 int i;
1395
1396 if (!pmem_id)
1397 return -ENODEV;
1398
1399 for (i = 0; i < nd_region->ndr_mappings; i++) {
1400 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1401 struct nd_namespace_label *nd_label;
1402 u64 hw_start, hw_end, pmem_start, pmem_end;
1403 int l;
1404
1405 for_each_label(l, nd_label, nd_mapping->labels)
1406 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1407 break;
1408
1409 if (!nd_label) {
1410 WARN_ON(1);
1411 return -EINVAL;
1412 }
1413
1414 select = nd_label;
1415 /*
1416 * Check that this label is compliant with the dpa
1417 * range published in NFIT
1418 */
1419 hw_start = nd_mapping->start;
1420 hw_end = hw_start + nd_mapping->size;
1421 pmem_start = __le64_to_cpu(select->dpa);
1422 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1423 if (pmem_start == hw_start && pmem_end <= hw_end)
1424 /* pass */;
1425 else
1426 return -EINVAL;
1427
1428 nd_mapping->labels[0] = select;
1429 nd_mapping->labels[1] = NULL;
1430 }
1431 return 0;
1432}
1433
1434/**
1435 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1436 * @nd_region: region with mappings to validate
1437 */
1438static int find_pmem_label_set(struct nd_region *nd_region,
1439 struct nd_namespace_pmem *nspm)
1440{
1441 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1442 struct nd_namespace_label *nd_label;
1443 u8 select_id[NSLABEL_UUID_LEN];
1444 resource_size_t size = 0;
1445 u8 *pmem_id = NULL;
1446 int rc = -ENODEV, l;
1447 u16 i;
1448
1449 if (cookie == 0)
1450 return -ENXIO;
1451
1452 /*
1453 * Find a complete set of labels by uuid. By definition we can start
1454 * with any mapping as the reference label
1455 */
1456 for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1457 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1458
1459 if (isetcookie != cookie)
1460 continue;
1461
1462 for (i = 0; nd_region->ndr_mappings; i++)
1463 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1464 cookie, i))
1465 break;
1466 if (i < nd_region->ndr_mappings) {
1467 /*
1468 * Give up if we don't find an instance of a
1469 * uuid at each position (from 0 to
1470 * nd_region->ndr_mappings - 1), or if we find a
1471 * dimm with two instances of the same uuid.
1472 */
1473 rc = -EINVAL;
1474 goto err;
1475 } else if (pmem_id) {
1476 /*
1477 * If there is more than one valid uuid set, we
1478 * need userspace to clean this up.
1479 */
1480 rc = -EBUSY;
1481 goto err;
1482 }
1483 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1484 pmem_id = select_id;
1485 }
1486
1487 /*
1488 * Fix up each mapping's 'labels' to have the validated pmem label for
1489 * that position at labels[0], and NULL at labels[1]. In the process,
1490 * check that the namespace aligns with interleave-set. We know
1491 * that it does not overlap with any blk namespaces by virtue of
1492 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1493 * succeeded).
1494 */
1495 rc = select_pmem_id(nd_region, pmem_id);
1496 if (rc)
1497 goto err;
1498
1499 /* Calculate total size and populate namespace properties from label0 */
1500 for (i = 0; i < nd_region->ndr_mappings; i++) {
1501 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1502 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1503
1504 size += __le64_to_cpu(label0->rawsize);
1505 if (__le16_to_cpu(label0->position) != 0)
1506 continue;
1507 WARN_ON(nspm->alt_name || nspm->uuid);
1508 nspm->alt_name = kmemdup((void __force *) label0->name,
1509 NSLABEL_NAME_LEN, GFP_KERNEL);
1510 nspm->uuid = kmemdup((void __force *) label0->uuid,
1511 NSLABEL_UUID_LEN, GFP_KERNEL);
1512 }
1513
1514 if (!nspm->alt_name || !nspm->uuid) {
1515 rc = -ENOMEM;
1516 goto err;
1517 }
1518
1519 nd_namespace_pmem_set_size(nd_region, nspm, size);
1520
1521 return 0;
1522 err:
1523 switch (rc) {
1524 case -EINVAL:
1525 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1526 break;
1527 case -ENODEV:
1528 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1529 break;
1530 default:
1531 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1532 __func__, rc);
1533 break;
1534 }
1535 return rc;
1536}
1537
1538static struct device **create_namespace_pmem(struct nd_region *nd_region)
1539{
1540 struct nd_namespace_pmem *nspm;
1541 struct device *dev, **devs;
1542 struct resource *res;
1543 int rc;
1544
1545 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1546 if (!nspm)
1547 return NULL;
1548
Dan Williams8c2f7e82015-06-25 04:20:04 -04001549 dev = &nspm->nsio.common.dev;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001550 dev->type = &namespace_pmem_device_type;
1551 dev->parent = &nd_region->dev;
1552 res = &nspm->nsio.res;
1553 res->name = dev_name(&nd_region->dev);
1554 res->flags = IORESOURCE_MEM;
1555 rc = find_pmem_label_set(nd_region, nspm);
1556 if (rc == -ENODEV) {
1557 int i;
1558
1559 /* Pass, try to permit namespace creation... */
1560 for (i = 0; i < nd_region->ndr_mappings; i++) {
1561 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1562
1563 kfree(nd_mapping->labels);
1564 nd_mapping->labels = NULL;
1565 }
1566
1567 /* Publish a zero-sized namespace for userspace to configure. */
1568 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1569
1570 rc = 0;
1571 } else if (rc)
1572 goto err;
1573
1574 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1575 if (!devs)
1576 goto err;
1577
1578 devs[0] = dev;
1579 return devs;
1580
1581 err:
Dan Williams8c2f7e82015-06-25 04:20:04 -04001582 namespace_pmem_release(&nspm->nsio.common.dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001583 return NULL;
1584}
1585
Dan Williams1b40e092015-05-01 13:34:01 -04001586struct resource *nsblk_add_resource(struct nd_region *nd_region,
1587 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1588 resource_size_t start)
1589{
1590 struct nd_label_id label_id;
1591 struct resource *res;
1592
1593 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1594 res = krealloc(nsblk->res,
1595 sizeof(void *) * (nsblk->num_resources + 1),
1596 GFP_KERNEL);
1597 if (!res)
1598 return NULL;
1599 nsblk->res = (struct resource **) res;
1600 for_each_dpa_resource(ndd, res)
1601 if (strcmp(res->name, label_id.id) == 0
1602 && res->start == start) {
1603 nsblk->res[nsblk->num_resources++] = res;
1604 return res;
1605 }
1606 return NULL;
1607}
1608
1609static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1610{
1611 struct nd_namespace_blk *nsblk;
1612 struct device *dev;
1613
1614 if (!is_nd_blk(&nd_region->dev))
1615 return NULL;
1616
1617 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1618 if (!nsblk)
1619 return NULL;
1620
Dan Williams8c2f7e82015-06-25 04:20:04 -04001621 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001622 dev->type = &namespace_blk_device_type;
1623 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1624 if (nsblk->id < 0) {
1625 kfree(nsblk);
1626 return NULL;
1627 }
1628 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1629 dev->parent = &nd_region->dev;
1630 dev->groups = nd_namespace_attribute_groups;
1631
Dan Williams8c2f7e82015-06-25 04:20:04 -04001632 return &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001633}
1634
1635void nd_region_create_blk_seed(struct nd_region *nd_region)
1636{
1637 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1638 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1639 /*
1640 * Seed creation failures are not fatal, provisioning is simply
1641 * disabled until memory becomes available
1642 */
1643 if (!nd_region->ns_seed)
1644 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1645 else
1646 nd_device_register(nd_region->ns_seed);
1647}
1648
Dan Williams8c2f7e82015-06-25 04:20:04 -04001649void nd_region_create_btt_seed(struct nd_region *nd_region)
1650{
1651 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1652 nd_region->btt_seed = nd_btt_create(nd_region);
1653 /*
1654 * Seed creation failures are not fatal, provisioning is simply
1655 * disabled until memory becomes available
1656 */
1657 if (!nd_region->btt_seed)
1658 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1659}
1660
Dan Williams1b40e092015-05-01 13:34:01 -04001661static struct device **create_namespace_blk(struct nd_region *nd_region)
1662{
1663 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1664 struct nd_namespace_label *nd_label;
1665 struct device *dev, **devs = NULL;
1666 struct nd_namespace_blk *nsblk;
1667 struct nvdimm_drvdata *ndd;
1668 int i, l, count = 0;
1669 struct resource *res;
1670
1671 if (nd_region->ndr_mappings == 0)
1672 return NULL;
1673
1674 ndd = to_ndd(nd_mapping);
1675 for_each_label(l, nd_label, nd_mapping->labels) {
1676 u32 flags = __le32_to_cpu(nd_label->flags);
1677 char *name[NSLABEL_NAME_LEN];
1678 struct device **__devs;
1679
1680 if (flags & NSLABEL_FLAG_LOCAL)
1681 /* pass */;
1682 else
1683 continue;
1684
1685 for (i = 0; i < count; i++) {
1686 nsblk = to_nd_namespace_blk(devs[i]);
1687 if (memcmp(nsblk->uuid, nd_label->uuid,
1688 NSLABEL_UUID_LEN) == 0) {
1689 res = nsblk_add_resource(nd_region, ndd, nsblk,
1690 __le64_to_cpu(nd_label->dpa));
1691 if (!res)
1692 goto err;
1693 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
Dan Williams8c2f7e82015-06-25 04:20:04 -04001694 dev_name(&nsblk->common.dev));
Dan Williams1b40e092015-05-01 13:34:01 -04001695 break;
1696 }
1697 }
1698 if (i < count)
1699 continue;
1700 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1701 if (!__devs)
1702 goto err;
1703 memcpy(__devs, devs, sizeof(dev) * count);
1704 kfree(devs);
1705 devs = __devs;
1706
1707 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1708 if (!nsblk)
1709 goto err;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001710 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001711 dev->type = &namespace_blk_device_type;
1712 dev->parent = &nd_region->dev;
1713 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1714 devs[count++] = dev;
1715 nsblk->id = -1;
1716 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1717 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1718 GFP_KERNEL);
1719 if (!nsblk->uuid)
1720 goto err;
1721 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1722 if (name[0])
1723 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1724 GFP_KERNEL);
1725 res = nsblk_add_resource(nd_region, ndd, nsblk,
1726 __le64_to_cpu(nd_label->dpa));
1727 if (!res)
1728 goto err;
1729 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
Dan Williams8c2f7e82015-06-25 04:20:04 -04001730 dev_name(&nsblk->common.dev));
Dan Williams1b40e092015-05-01 13:34:01 -04001731 }
1732
1733 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1734 __func__, count, count == 1 ? "" : "s");
1735
1736 if (count == 0) {
1737 /* Publish a zero-sized namespace for userspace to configure. */
1738 for (i = 0; i < nd_region->ndr_mappings; i++) {
1739 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1740
1741 kfree(nd_mapping->labels);
1742 nd_mapping->labels = NULL;
1743 }
1744
1745 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1746 if (!devs)
1747 goto err;
1748 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1749 if (!nsblk)
1750 goto err;
Dan Williams8c2f7e82015-06-25 04:20:04 -04001751 dev = &nsblk->common.dev;
Dan Williams1b40e092015-05-01 13:34:01 -04001752 dev->type = &namespace_blk_device_type;
1753 dev->parent = &nd_region->dev;
1754 devs[count++] = dev;
1755 }
1756
1757 return devs;
1758
1759err:
1760 for (i = 0; i < count; i++) {
1761 nsblk = to_nd_namespace_blk(devs[i]);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001762 namespace_blk_release(&nsblk->common.dev);
Dan Williams1b40e092015-05-01 13:34:01 -04001763 }
1764 kfree(devs);
1765 return NULL;
1766}
1767
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001768static int init_active_labels(struct nd_region *nd_region)
1769{
1770 int i;
1771
1772 for (i = 0; i < nd_region->ndr_mappings; i++) {
1773 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1774 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1775 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1776 int count, j;
1777
1778 /*
1779 * If the dimm is disabled then prevent the region from
1780 * being activated if it aliases DPA.
1781 */
1782 if (!ndd) {
1783 if ((nvdimm->flags & NDD_ALIASING) == 0)
1784 return 0;
1785 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1786 dev_name(&nd_mapping->nvdimm->dev));
1787 return -ENXIO;
1788 }
1789 nd_mapping->ndd = ndd;
1790 atomic_inc(&nvdimm->busy);
1791 get_ndd(ndd);
1792
1793 count = nd_label_active_count(ndd);
1794 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1795 if (!count)
1796 continue;
1797 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1798 GFP_KERNEL);
1799 if (!nd_mapping->labels)
1800 return -ENOMEM;
1801 for (j = 0; j < count; j++) {
1802 struct nd_namespace_label *label;
1803
1804 label = nd_label_active(ndd, j);
1805 nd_mapping->labels[j] = label;
1806 }
1807 }
1808
1809 return 0;
1810}
1811
Dan Williams3d880022015-05-31 15:02:11 -04001812int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1813{
1814 struct device **devs = NULL;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001815 int i, rc = 0, type;
Dan Williams3d880022015-05-31 15:02:11 -04001816
1817 *err = 0;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001818 nvdimm_bus_lock(&nd_region->dev);
1819 rc = init_active_labels(nd_region);
1820 if (rc) {
1821 nvdimm_bus_unlock(&nd_region->dev);
1822 return rc;
1823 }
1824
1825 type = nd_region_to_nstype(nd_region);
1826 switch (type) {
Dan Williams3d880022015-05-31 15:02:11 -04001827 case ND_DEVICE_NAMESPACE_IO:
1828 devs = create_namespace_io(nd_region);
1829 break;
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001830 case ND_DEVICE_NAMESPACE_PMEM:
1831 devs = create_namespace_pmem(nd_region);
1832 break;
Dan Williams1b40e092015-05-01 13:34:01 -04001833 case ND_DEVICE_NAMESPACE_BLK:
1834 devs = create_namespace_blk(nd_region);
1835 break;
Dan Williams3d880022015-05-31 15:02:11 -04001836 default:
1837 break;
1838 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -04001839 nvdimm_bus_unlock(&nd_region->dev);
Dan Williams3d880022015-05-31 15:02:11 -04001840
1841 if (!devs)
1842 return -ENODEV;
1843
Dan Williams3d880022015-05-31 15:02:11 -04001844 for (i = 0; devs[i]; i++) {
1845 struct device *dev = devs[i];
Dan Williams1b40e092015-05-01 13:34:01 -04001846 int id;
Dan Williams3d880022015-05-31 15:02:11 -04001847
Dan Williams1b40e092015-05-01 13:34:01 -04001848 if (type == ND_DEVICE_NAMESPACE_BLK) {
1849 struct nd_namespace_blk *nsblk;
1850
1851 nsblk = to_nd_namespace_blk(dev);
1852 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
1853 GFP_KERNEL);
1854 nsblk->id = id;
1855 } else
1856 id = i;
1857
1858 if (id < 0)
1859 break;
1860 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
Dan Williams3d880022015-05-31 15:02:11 -04001861 dev->groups = nd_namespace_attribute_groups;
1862 nd_device_register(dev);
1863 }
Dan Williams1b40e092015-05-01 13:34:01 -04001864 if (i)
1865 nd_region->ns_seed = devs[0];
1866
1867 if (devs[i]) {
1868 int j;
1869
1870 for (j = i; devs[j]; j++) {
1871 struct device *dev = devs[j];
1872
1873 device_initialize(dev);
1874 put_device(dev);
1875 }
1876 *err = j - i;
1877 /*
1878 * All of the namespaces we tried to register failed, so
1879 * fail region activation.
1880 */
1881 if (*err == 0)
1882 rc = -ENODEV;
1883 }
Dan Williams3d880022015-05-31 15:02:11 -04001884 kfree(devs);
1885
Dan Williams1b40e092015-05-01 13:34:01 -04001886 if (rc == -ENODEV)
1887 return rc;
1888
Dan Williams3d880022015-05-31 15:02:11 -04001889 return i;
1890}