blob: 40fcfea26fbbc387a3ecbf235b1d4b011022f1aa [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williamseaf96152015-05-01 13:11:27 -040013#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040014#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040015#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040016#include <linux/slab.h>
Dan Williamseaf96152015-05-01 13:11:27 -040017#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040018#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040019#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040020#include "nd-core.h"
21#include "nd.h"
22
23static DEFINE_IDA(region_ida);
24
25static void nd_region_release(struct device *dev)
26{
27 struct nd_region *nd_region = to_nd_region(dev);
28 u16 i;
29
30 for (i = 0; i < nd_region->ndr_mappings; i++) {
31 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
32 struct nvdimm *nvdimm = nd_mapping->nvdimm;
33
34 put_device(&nvdimm->dev);
35 }
Vishal Verma5212e112015-06-25 04:20:32 -040036 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -040037 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -040038 if (is_nd_blk(dev))
39 kfree(to_nd_blk_region(dev));
40 else
41 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -040042}
43
44static struct device_type nd_blk_device_type = {
45 .name = "nd_blk",
46 .release = nd_region_release,
47};
48
49static struct device_type nd_pmem_device_type = {
50 .name = "nd_pmem",
51 .release = nd_region_release,
52};
53
54static struct device_type nd_volatile_device_type = {
55 .name = "nd_volatile",
56 .release = nd_region_release,
57};
58
Dan Williams3d880022015-05-31 15:02:11 -040059bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -040060{
61 return dev ? dev->type == &nd_pmem_device_type : false;
62}
63
Dan Williams3d880022015-05-31 15:02:11 -040064bool is_nd_blk(struct device *dev)
65{
66 return dev ? dev->type == &nd_blk_device_type : false;
67}
68
Dan Williams1f7df6f2015-06-09 20:13:14 -040069struct nd_region *to_nd_region(struct device *dev)
70{
71 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
72
73 WARN_ON(dev->type->release != nd_region_release);
74 return nd_region;
75}
76EXPORT_SYMBOL_GPL(to_nd_region);
77
Ross Zwisler047fc8a2015-06-25 04:21:02 -040078struct nd_blk_region *to_nd_blk_region(struct device *dev)
79{
80 struct nd_region *nd_region = to_nd_region(dev);
81
82 WARN_ON(!is_nd_blk(dev));
83 return container_of(nd_region, struct nd_blk_region, nd_region);
84}
85EXPORT_SYMBOL_GPL(to_nd_blk_region);
86
87void *nd_region_provider_data(struct nd_region *nd_region)
88{
89 return nd_region->provider_data;
90}
91EXPORT_SYMBOL_GPL(nd_region_provider_data);
92
93void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
94{
95 return ndbr->blk_provider_data;
96}
97EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
98
99void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
100{
101 ndbr->blk_provider_data = data;
102}
103EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
104
Dan Williams3d880022015-05-31 15:02:11 -0400105/**
106 * nd_region_to_nstype() - region to an integer namespace type
107 * @nd_region: region-device to interrogate
108 *
109 * This is the 'nstype' attribute of a region as well, an input to the
110 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
111 * namespace devices with namespace drivers.
112 */
113int nd_region_to_nstype(struct nd_region *nd_region)
114{
115 if (is_nd_pmem(&nd_region->dev)) {
116 u16 i, alias;
117
118 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
119 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
120 struct nvdimm *nvdimm = nd_mapping->nvdimm;
121
122 if (nvdimm->flags & NDD_ALIASING)
123 alias++;
124 }
125 if (alias)
126 return ND_DEVICE_NAMESPACE_PMEM;
127 else
128 return ND_DEVICE_NAMESPACE_IO;
129 } else if (is_nd_blk(&nd_region->dev)) {
130 return ND_DEVICE_NAMESPACE_BLK;
131 }
132
133 return 0;
134}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400135EXPORT_SYMBOL(nd_region_to_nstype);
136
Dan Williams1f7df6f2015-06-09 20:13:14 -0400137static ssize_t size_show(struct device *dev,
138 struct device_attribute *attr, char *buf)
139{
140 struct nd_region *nd_region = to_nd_region(dev);
141 unsigned long long size = 0;
142
143 if (is_nd_pmem(dev)) {
144 size = nd_region->ndr_size;
145 } else if (nd_region->ndr_mappings == 1) {
146 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
147
148 size = nd_mapping->size;
149 }
150
151 return sprintf(buf, "%llu\n", size);
152}
153static DEVICE_ATTR_RO(size);
154
155static ssize_t mappings_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct nd_region *nd_region = to_nd_region(dev);
159
160 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
161}
162static DEVICE_ATTR_RO(mappings);
163
Dan Williams3d880022015-05-31 15:02:11 -0400164static ssize_t nstype_show(struct device *dev,
165 struct device_attribute *attr, char *buf)
166{
167 struct nd_region *nd_region = to_nd_region(dev);
168
169 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
170}
171static DEVICE_ATTR_RO(nstype);
172
Dan Williamseaf96152015-05-01 13:11:27 -0400173static ssize_t set_cookie_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
175{
176 struct nd_region *nd_region = to_nd_region(dev);
177 struct nd_interleave_set *nd_set = nd_region->nd_set;
178
179 if (is_nd_pmem(dev) && nd_set)
180 /* pass, should be precluded by region_visible */;
181 else
182 return -ENXIO;
183
184 return sprintf(buf, "%#llx\n", nd_set->cookie);
185}
186static DEVICE_ATTR_RO(set_cookie);
187
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400188resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
189{
190 resource_size_t blk_max_overlap = 0, available, overlap;
191 int i;
192
193 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
194
195 retry:
196 available = 0;
197 overlap = blk_max_overlap;
198 for (i = 0; i < nd_region->ndr_mappings; i++) {
199 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
200 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
201
202 /* if a dimm is disabled the available capacity is zero */
203 if (!ndd)
204 return 0;
205
206 if (is_nd_pmem(&nd_region->dev)) {
207 available += nd_pmem_available_dpa(nd_region,
208 nd_mapping, &overlap);
209 if (overlap > blk_max_overlap) {
210 blk_max_overlap = overlap;
211 goto retry;
212 }
213 } else if (is_nd_blk(&nd_region->dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400214 available += nd_blk_available_dpa(nd_mapping);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400215 }
216 }
217
218 return available;
219}
220
221static ssize_t available_size_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 struct nd_region *nd_region = to_nd_region(dev);
225 unsigned long long available = 0;
226
227 /*
228 * Flush in-flight updates and grab a snapshot of the available
229 * size. Of course, this value is potentially invalidated the
230 * memory nvdimm_bus_lock() is dropped, but that's userspace's
231 * problem to not race itself.
232 */
233 nvdimm_bus_lock(dev);
234 wait_nvdimm_bus_probe_idle(dev);
235 available = nd_region_available_dpa(nd_region);
236 nvdimm_bus_unlock(dev);
237
238 return sprintf(buf, "%llu\n", available);
239}
240static DEVICE_ATTR_RO(available_size);
241
Dan Williams3d880022015-05-31 15:02:11 -0400242static ssize_t init_namespaces_show(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
246 ssize_t rc;
247
248 nvdimm_bus_lock(dev);
249 if (num_ns)
250 rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
251 else
252 rc = -ENXIO;
253 nvdimm_bus_unlock(dev);
254
255 return rc;
256}
257static DEVICE_ATTR_RO(init_namespaces);
258
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400259static ssize_t namespace_seed_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261{
262 struct nd_region *nd_region = to_nd_region(dev);
263 ssize_t rc;
264
265 nvdimm_bus_lock(dev);
266 if (nd_region->ns_seed)
267 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
268 else
269 rc = sprintf(buf, "\n");
270 nvdimm_bus_unlock(dev);
271 return rc;
272}
273static DEVICE_ATTR_RO(namespace_seed);
274
Dan Williams8c2f7e82015-06-25 04:20:04 -0400275static ssize_t btt_seed_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 struct nd_region *nd_region = to_nd_region(dev);
279 ssize_t rc;
280
281 nvdimm_bus_lock(dev);
282 if (nd_region->btt_seed)
283 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
284 else
285 rc = sprintf(buf, "\n");
286 nvdimm_bus_unlock(dev);
287
288 return rc;
289}
290static DEVICE_ATTR_RO(btt_seed);
291
Dan Williamse1455742015-07-30 17:57:47 -0400292static ssize_t pfn_seed_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
294{
295 struct nd_region *nd_region = to_nd_region(dev);
296 ssize_t rc;
297
298 nvdimm_bus_lock(dev);
299 if (nd_region->pfn_seed)
300 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
301 else
302 rc = sprintf(buf, "\n");
303 nvdimm_bus_unlock(dev);
304
305 return rc;
306}
307static DEVICE_ATTR_RO(pfn_seed);
308
Dan Williamscd034122016-03-11 10:15:36 -0800309static ssize_t dax_seed_show(struct device *dev,
310 struct device_attribute *attr, char *buf)
311{
312 struct nd_region *nd_region = to_nd_region(dev);
313 ssize_t rc;
314
315 nvdimm_bus_lock(dev);
316 if (nd_region->dax_seed)
317 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
318 else
319 rc = sprintf(buf, "\n");
320 nvdimm_bus_unlock(dev);
321
322 return rc;
323}
324static DEVICE_ATTR_RO(dax_seed);
325
Dan Williams58138822015-06-23 20:08:34 -0400326static ssize_t read_only_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
328{
329 struct nd_region *nd_region = to_nd_region(dev);
330
331 return sprintf(buf, "%d\n", nd_region->ro);
332}
333
334static ssize_t read_only_store(struct device *dev,
335 struct device_attribute *attr, const char *buf, size_t len)
336{
337 bool ro;
338 int rc = strtobool(buf, &ro);
339 struct nd_region *nd_region = to_nd_region(dev);
340
341 if (rc)
342 return rc;
343
344 nd_region->ro = ro;
345 return len;
346}
347static DEVICE_ATTR_RW(read_only);
348
Dan Williams1f7df6f2015-06-09 20:13:14 -0400349static struct attribute *nd_region_attributes[] = {
350 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400351 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400352 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400353 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400354 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800355 &dev_attr_dax_seed.attr,
Dan Williams58138822015-06-23 20:08:34 -0400356 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400357 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400358 &dev_attr_available_size.attr,
359 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400360 &dev_attr_init_namespaces.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400361 NULL,
362};
363
Dan Williamseaf96152015-05-01 13:11:27 -0400364static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
365{
366 struct device *dev = container_of(kobj, typeof(*dev), kobj);
367 struct nd_region *nd_region = to_nd_region(dev);
368 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400369 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400370
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300371 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
372 return 0;
373
Dan Williamscd034122016-03-11 10:15:36 -0800374 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
375 return 0;
376
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400377 if (a != &dev_attr_set_cookie.attr
378 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400379 return a->mode;
380
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400381 if ((type == ND_DEVICE_NAMESPACE_PMEM
382 || type == ND_DEVICE_NAMESPACE_BLK)
383 && a == &dev_attr_available_size.attr)
384 return a->mode;
385 else if (is_nd_pmem(dev) && nd_set)
386 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400387
388 return 0;
389}
390
Dan Williams1f7df6f2015-06-09 20:13:14 -0400391struct attribute_group nd_region_attribute_group = {
392 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400393 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400394};
395EXPORT_SYMBOL_GPL(nd_region_attribute_group);
396
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400397u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
398{
399 struct nd_interleave_set *nd_set = nd_region->nd_set;
400
401 if (nd_set)
402 return nd_set->cookie;
403 return 0;
404}
405
Dan Williamseaf96152015-05-01 13:11:27 -0400406/*
407 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400408 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400409 * seeds. Also, on the removal of a BLK region, notify the provider to
410 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400411 */
412static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
413 struct device *dev, bool probe)
414{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400415 struct nd_region *nd_region;
416
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400417 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
Dan Williamseaf96152015-05-01 13:11:27 -0400418 int i;
419
Dan Williams8c2f7e82015-06-25 04:20:04 -0400420 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400421 for (i = 0; i < nd_region->ndr_mappings; i++) {
422 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400423 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400424 struct nvdimm *nvdimm = nd_mapping->nvdimm;
425
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400426 kfree(nd_mapping->labels);
427 nd_mapping->labels = NULL;
428 put_ndd(ndd);
429 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400430 if (ndd)
431 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400432 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400433
434 if (is_nd_pmem(dev))
435 return;
436
437 to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400438 }
439 if (dev->parent && is_nd_blk(dev->parent) && probe) {
440 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400441 nvdimm_bus_lock(dev);
442 if (nd_region->ns_seed == dev)
443 nd_region_create_blk_seed(nd_region);
444 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400445 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400446 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400447 struct nd_btt *nd_btt = to_nd_btt(dev);
448
Dan Williams8c2f7e82015-06-25 04:20:04 -0400449 nd_region = to_nd_region(dev->parent);
450 nvdimm_bus_lock(dev);
451 if (nd_region->btt_seed == dev)
452 nd_region_create_btt_seed(nd_region);
Dan Williams8ca24352015-07-24 23:42:34 -0400453 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
454 is_nd_blk(dev->parent))
455 nd_region_create_blk_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400456 nvdimm_bus_unlock(dev);
457 }
Dan Williams2dc43332015-12-13 11:41:36 -0800458 if (is_nd_pfn(dev) && probe) {
459 nd_region = to_nd_region(dev->parent);
460 nvdimm_bus_lock(dev);
461 if (nd_region->pfn_seed == dev)
462 nd_region_create_pfn_seed(nd_region);
463 nvdimm_bus_unlock(dev);
464 }
Dan Williamscd034122016-03-11 10:15:36 -0800465 if (is_nd_dax(dev) && probe) {
466 nd_region = to_nd_region(dev->parent);
467 nvdimm_bus_lock(dev);
468 if (nd_region->dax_seed == dev)
469 nd_region_create_dax_seed(nd_region);
470 nvdimm_bus_unlock(dev);
471 }
Dan Williamseaf96152015-05-01 13:11:27 -0400472}
473
474void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
475{
476 nd_region_notify_driver_action(nvdimm_bus, dev, true);
477}
478
479void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
480{
481 nd_region_notify_driver_action(nvdimm_bus, dev, false);
482}
483
Dan Williams1f7df6f2015-06-09 20:13:14 -0400484static ssize_t mappingN(struct device *dev, char *buf, int n)
485{
486 struct nd_region *nd_region = to_nd_region(dev);
487 struct nd_mapping *nd_mapping;
488 struct nvdimm *nvdimm;
489
490 if (n >= nd_region->ndr_mappings)
491 return -ENXIO;
492 nd_mapping = &nd_region->mapping[n];
493 nvdimm = nd_mapping->nvdimm;
494
495 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
496 nd_mapping->start, nd_mapping->size);
497}
498
499#define REGION_MAPPING(idx) \
500static ssize_t mapping##idx##_show(struct device *dev, \
501 struct device_attribute *attr, char *buf) \
502{ \
503 return mappingN(dev, buf, idx); \
504} \
505static DEVICE_ATTR_RO(mapping##idx)
506
507/*
508 * 32 should be enough for a while, even in the presence of socket
509 * interleave a 32-way interleave set is a degenerate case.
510 */
511REGION_MAPPING(0);
512REGION_MAPPING(1);
513REGION_MAPPING(2);
514REGION_MAPPING(3);
515REGION_MAPPING(4);
516REGION_MAPPING(5);
517REGION_MAPPING(6);
518REGION_MAPPING(7);
519REGION_MAPPING(8);
520REGION_MAPPING(9);
521REGION_MAPPING(10);
522REGION_MAPPING(11);
523REGION_MAPPING(12);
524REGION_MAPPING(13);
525REGION_MAPPING(14);
526REGION_MAPPING(15);
527REGION_MAPPING(16);
528REGION_MAPPING(17);
529REGION_MAPPING(18);
530REGION_MAPPING(19);
531REGION_MAPPING(20);
532REGION_MAPPING(21);
533REGION_MAPPING(22);
534REGION_MAPPING(23);
535REGION_MAPPING(24);
536REGION_MAPPING(25);
537REGION_MAPPING(26);
538REGION_MAPPING(27);
539REGION_MAPPING(28);
540REGION_MAPPING(29);
541REGION_MAPPING(30);
542REGION_MAPPING(31);
543
544static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
545{
546 struct device *dev = container_of(kobj, struct device, kobj);
547 struct nd_region *nd_region = to_nd_region(dev);
548
549 if (n < nd_region->ndr_mappings)
550 return a->mode;
551 return 0;
552}
553
554static struct attribute *mapping_attributes[] = {
555 &dev_attr_mapping0.attr,
556 &dev_attr_mapping1.attr,
557 &dev_attr_mapping2.attr,
558 &dev_attr_mapping3.attr,
559 &dev_attr_mapping4.attr,
560 &dev_attr_mapping5.attr,
561 &dev_attr_mapping6.attr,
562 &dev_attr_mapping7.attr,
563 &dev_attr_mapping8.attr,
564 &dev_attr_mapping9.attr,
565 &dev_attr_mapping10.attr,
566 &dev_attr_mapping11.attr,
567 &dev_attr_mapping12.attr,
568 &dev_attr_mapping13.attr,
569 &dev_attr_mapping14.attr,
570 &dev_attr_mapping15.attr,
571 &dev_attr_mapping16.attr,
572 &dev_attr_mapping17.attr,
573 &dev_attr_mapping18.attr,
574 &dev_attr_mapping19.attr,
575 &dev_attr_mapping20.attr,
576 &dev_attr_mapping21.attr,
577 &dev_attr_mapping22.attr,
578 &dev_attr_mapping23.attr,
579 &dev_attr_mapping24.attr,
580 &dev_attr_mapping25.attr,
581 &dev_attr_mapping26.attr,
582 &dev_attr_mapping27.attr,
583 &dev_attr_mapping28.attr,
584 &dev_attr_mapping29.attr,
585 &dev_attr_mapping30.attr,
586 &dev_attr_mapping31.attr,
587 NULL,
588};
589
590struct attribute_group nd_mapping_attribute_group = {
591 .is_visible = mapping_visible,
592 .attrs = mapping_attributes,
593};
594EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
595
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400596int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400597{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400598 struct device *dev = &nd_region->dev;
599 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
600
601 if (!is_nd_blk(dev))
602 return 0;
603
604 if (nd_region->ndr_mappings < 1) {
605 dev_err(dev, "invalid BLK region\n");
606 return -ENXIO;
607 }
608
609 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400610}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400611
Vishal Verma5212e112015-06-25 04:20:32 -0400612/**
613 * nd_region_acquire_lane - allocate and lock a lane
614 * @nd_region: region id and number of lanes possible
615 *
616 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
617 * We optimize for the common case where there are 256 lanes, one
618 * per-cpu. For larger systems we need to lock to share lanes. For now
619 * this implementation assumes the cost of maintaining an allocator for
620 * free lanes is on the order of the lock hold time, so it implements a
621 * static lane = cpu % num_lanes mapping.
622 *
623 * In the case of a BTT instance on top of a BLK namespace a lane may be
624 * acquired recursively. We lock on the first instance.
625 *
626 * In the case of a BTT instance on top of PMEM, we only acquire a lane
627 * for the BTT metadata updates.
628 */
629unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
630{
631 unsigned int cpu, lane;
632
633 cpu = get_cpu();
634 if (nd_region->num_lanes < nr_cpu_ids) {
635 struct nd_percpu_lane *ndl_lock, *ndl_count;
636
637 lane = cpu % nd_region->num_lanes;
638 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
639 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
640 if (ndl_count->count++ == 0)
641 spin_lock(&ndl_lock->lock);
642 } else
643 lane = cpu;
644
645 return lane;
646}
647EXPORT_SYMBOL(nd_region_acquire_lane);
648
649void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
650{
651 if (nd_region->num_lanes < nr_cpu_ids) {
652 unsigned int cpu = get_cpu();
653 struct nd_percpu_lane *ndl_lock, *ndl_count;
654
655 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
656 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
657 if (--ndl_count->count == 0)
658 spin_unlock(&ndl_lock->lock);
659 put_cpu();
660 }
661 put_cpu();
662}
663EXPORT_SYMBOL(nd_region_release_lane);
664
Dan Williams1f7df6f2015-06-09 20:13:14 -0400665static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
666 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
667 const char *caller)
668{
669 struct nd_region *nd_region;
670 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400671 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400672 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400673 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400674
675 for (i = 0; i < ndr_desc->num_mappings; i++) {
676 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
677 struct nvdimm *nvdimm = nd_mapping->nvdimm;
678
679 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
680 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
681 caller, dev_name(&nvdimm->dev), i);
682
683 return NULL;
684 }
Dan Williams58138822015-06-23 20:08:34 -0400685
686 if (nvdimm->flags & NDD_UNARMED)
687 ro = 1;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400688 }
689
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400690 if (dev_type == &nd_blk_device_type) {
691 struct nd_blk_region_desc *ndbr_desc;
692 struct nd_blk_region *ndbr;
693
694 ndbr_desc = to_blk_region_desc(ndr_desc);
695 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
696 * ndr_desc->num_mappings,
697 GFP_KERNEL);
698 if (ndbr) {
699 nd_region = &ndbr->nd_region;
700 ndbr->enable = ndbr_desc->enable;
701 ndbr->disable = ndbr_desc->disable;
702 ndbr->do_io = ndbr_desc->do_io;
703 }
704 region_buf = ndbr;
705 } else {
706 nd_region = kzalloc(sizeof(struct nd_region)
707 + sizeof(struct nd_mapping)
708 * ndr_desc->num_mappings,
709 GFP_KERNEL);
710 region_buf = nd_region;
711 }
712
713 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400714 return NULL;
715 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -0400716 if (nd_region->id < 0)
717 goto err_id;
718
719 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
720 if (!nd_region->lane)
721 goto err_percpu;
722
723 for (i = 0; i < nr_cpu_ids; i++) {
724 struct nd_percpu_lane *ndl;
725
726 ndl = per_cpu_ptr(nd_region->lane, i);
727 spin_lock_init(&ndl->lock);
728 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400729 }
730
731 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
732 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
733 for (i = 0; i < ndr_desc->num_mappings; i++) {
734 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
735 struct nvdimm *nvdimm = nd_mapping->nvdimm;
736
737 get_device(&nvdimm->dev);
738 }
739 nd_region->ndr_mappings = ndr_desc->num_mappings;
740 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -0400741 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -0400742 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -0400743 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -0400744 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -0600745 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams1b40e092015-05-01 13:34:01 -0400746 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400747 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -0400748 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -0800749 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400750 dev = &nd_region->dev;
751 dev_set_name(dev, "region%d", nd_region->id);
752 dev->parent = &nvdimm_bus->dev;
753 dev->type = dev_type;
754 dev->groups = ndr_desc->attr_groups;
755 nd_region->ndr_size = resource_size(ndr_desc->res);
756 nd_region->ndr_start = ndr_desc->res->start;
757 nd_device_register(dev);
758
759 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -0400760
761 err_percpu:
762 ida_simple_remove(&region_ida, nd_region->id);
763 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400764 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -0400765 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400766}
767
768struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
769 struct nd_region_desc *ndr_desc)
770{
Vishal Verma5212e112015-06-25 04:20:32 -0400771 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400772 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
773 __func__);
774}
775EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
776
777struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
778 struct nd_region_desc *ndr_desc)
779{
780 if (ndr_desc->num_mappings > 1)
781 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -0400782 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400783 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
784 __func__);
785}
786EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
787
788struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
789 struct nd_region_desc *ndr_desc)
790{
Vishal Verma5212e112015-06-25 04:20:32 -0400791 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400792 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
793 __func__);
794}
795EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -0700796
797void __exit nd_region_devs_exit(void)
798{
799 ida_destroy(&region_ida);
800}