blob: 67022f74febc9f49467a35f0a5026adcbcdc39f3 [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williamseaf96152015-05-01 13:11:27 -040013#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040014#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040015#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040016#include <linux/slab.h>
Dan Williamseaf96152015-05-01 13:11:27 -040017#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040018#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040019#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040020#include "nd-core.h"
21#include "nd.h"
22
23static DEFINE_IDA(region_ida);
24
Dan Williamse5ae3b22016-06-07 17:00:04 -070025static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26 struct nd_region_data *ndrd)
27{
28 int i, j;
29
30 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
32 for (i = 0; i < nvdimm->num_flush; i++) {
33 struct resource *res = &nvdimm->flush_wpq[i];
34 unsigned long pfn = PHYS_PFN(res->start);
35 void __iomem *flush_page;
36
37 /* check if flush hints share a page */
38 for (j = 0; j < i; j++) {
39 struct resource *res_j = &nvdimm->flush_wpq[j];
40 unsigned long pfn_j = PHYS_PFN(res_j->start);
41
42 if (pfn == pfn_j)
43 break;
44 }
45
46 if (j < i)
47 flush_page = (void __iomem *) ((unsigned long)
48 ndrd->flush_wpq[dimm][j] & PAGE_MASK);
49 else
50 flush_page = devm_nvdimm_ioremap(dev,
51 PHYS_PFN(pfn), PAGE_SIZE);
52 if (!flush_page)
53 return -ENXIO;
54 ndrd->flush_wpq[dimm][i] = flush_page
55 + (res->start & ~PAGE_MASK);
56 }
57
58 return 0;
59}
60
61int nd_region_activate(struct nd_region *nd_region)
62{
63 int i;
64 struct nd_region_data *ndrd;
65 struct device *dev = &nd_region->dev;
66 size_t flush_data_size = sizeof(void *);
67
68 nvdimm_bus_lock(&nd_region->dev);
69 for (i = 0; i < nd_region->ndr_mappings; i++) {
70 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
71 struct nvdimm *nvdimm = nd_mapping->nvdimm;
72
73 /* at least one null hint slot per-dimm for the "no-hint" case */
74 flush_data_size += sizeof(void *);
75 if (!nvdimm->num_flush)
76 continue;
77 flush_data_size += nvdimm->num_flush * sizeof(void *);
78 }
79 nvdimm_bus_unlock(&nd_region->dev);
80
81 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
82 if (!ndrd)
83 return -ENOMEM;
84 dev_set_drvdata(dev, ndrd);
85
86 for (i = 0; i < nd_region->ndr_mappings; i++) {
87 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
88 struct nvdimm *nvdimm = nd_mapping->nvdimm;
89 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
90
91 if (rc)
92 return rc;
93 }
94
95 return 0;
96}
97
Dan Williams1f7df6f2015-06-09 20:13:14 -040098static void nd_region_release(struct device *dev)
99{
100 struct nd_region *nd_region = to_nd_region(dev);
101 u16 i;
102
103 for (i = 0; i < nd_region->ndr_mappings; i++) {
104 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
105 struct nvdimm *nvdimm = nd_mapping->nvdimm;
106
107 put_device(&nvdimm->dev);
108 }
Vishal Verma5212e112015-06-25 04:20:32 -0400109 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400110 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400111 if (is_nd_blk(dev))
112 kfree(to_nd_blk_region(dev));
113 else
114 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400115}
116
117static struct device_type nd_blk_device_type = {
118 .name = "nd_blk",
119 .release = nd_region_release,
120};
121
122static struct device_type nd_pmem_device_type = {
123 .name = "nd_pmem",
124 .release = nd_region_release,
125};
126
127static struct device_type nd_volatile_device_type = {
128 .name = "nd_volatile",
129 .release = nd_region_release,
130};
131
Dan Williams3d880022015-05-31 15:02:11 -0400132bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400133{
134 return dev ? dev->type == &nd_pmem_device_type : false;
135}
136
Dan Williams3d880022015-05-31 15:02:11 -0400137bool is_nd_blk(struct device *dev)
138{
139 return dev ? dev->type == &nd_blk_device_type : false;
140}
141
Dan Williams1f7df6f2015-06-09 20:13:14 -0400142struct nd_region *to_nd_region(struct device *dev)
143{
144 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
145
146 WARN_ON(dev->type->release != nd_region_release);
147 return nd_region;
148}
149EXPORT_SYMBOL_GPL(to_nd_region);
150
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400151struct nd_blk_region *to_nd_blk_region(struct device *dev)
152{
153 struct nd_region *nd_region = to_nd_region(dev);
154
155 WARN_ON(!is_nd_blk(dev));
156 return container_of(nd_region, struct nd_blk_region, nd_region);
157}
158EXPORT_SYMBOL_GPL(to_nd_blk_region);
159
160void *nd_region_provider_data(struct nd_region *nd_region)
161{
162 return nd_region->provider_data;
163}
164EXPORT_SYMBOL_GPL(nd_region_provider_data);
165
166void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
167{
168 return ndbr->blk_provider_data;
169}
170EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
171
172void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
173{
174 ndbr->blk_provider_data = data;
175}
176EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
177
Dan Williams3d880022015-05-31 15:02:11 -0400178/**
179 * nd_region_to_nstype() - region to an integer namespace type
180 * @nd_region: region-device to interrogate
181 *
182 * This is the 'nstype' attribute of a region as well, an input to the
183 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
184 * namespace devices with namespace drivers.
185 */
186int nd_region_to_nstype(struct nd_region *nd_region)
187{
188 if (is_nd_pmem(&nd_region->dev)) {
189 u16 i, alias;
190
191 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
192 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
193 struct nvdimm *nvdimm = nd_mapping->nvdimm;
194
195 if (nvdimm->flags & NDD_ALIASING)
196 alias++;
197 }
198 if (alias)
199 return ND_DEVICE_NAMESPACE_PMEM;
200 else
201 return ND_DEVICE_NAMESPACE_IO;
202 } else if (is_nd_blk(&nd_region->dev)) {
203 return ND_DEVICE_NAMESPACE_BLK;
204 }
205
206 return 0;
207}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400208EXPORT_SYMBOL(nd_region_to_nstype);
209
Dan Williams1f7df6f2015-06-09 20:13:14 -0400210static ssize_t size_show(struct device *dev,
211 struct device_attribute *attr, char *buf)
212{
213 struct nd_region *nd_region = to_nd_region(dev);
214 unsigned long long size = 0;
215
216 if (is_nd_pmem(dev)) {
217 size = nd_region->ndr_size;
218 } else if (nd_region->ndr_mappings == 1) {
219 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
220
221 size = nd_mapping->size;
222 }
223
224 return sprintf(buf, "%llu\n", size);
225}
226static DEVICE_ATTR_RO(size);
227
228static ssize_t mappings_show(struct device *dev,
229 struct device_attribute *attr, char *buf)
230{
231 struct nd_region *nd_region = to_nd_region(dev);
232
233 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
234}
235static DEVICE_ATTR_RO(mappings);
236
Dan Williams3d880022015-05-31 15:02:11 -0400237static ssize_t nstype_show(struct device *dev,
238 struct device_attribute *attr, char *buf)
239{
240 struct nd_region *nd_region = to_nd_region(dev);
241
242 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
243}
244static DEVICE_ATTR_RO(nstype);
245
Dan Williamseaf96152015-05-01 13:11:27 -0400246static ssize_t set_cookie_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 struct nd_region *nd_region = to_nd_region(dev);
250 struct nd_interleave_set *nd_set = nd_region->nd_set;
251
252 if (is_nd_pmem(dev) && nd_set)
253 /* pass, should be precluded by region_visible */;
254 else
255 return -ENXIO;
256
257 return sprintf(buf, "%#llx\n", nd_set->cookie);
258}
259static DEVICE_ATTR_RO(set_cookie);
260
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400261resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
262{
263 resource_size_t blk_max_overlap = 0, available, overlap;
264 int i;
265
266 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
267
268 retry:
269 available = 0;
270 overlap = blk_max_overlap;
271 for (i = 0; i < nd_region->ndr_mappings; i++) {
272 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
273 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
274
275 /* if a dimm is disabled the available capacity is zero */
276 if (!ndd)
277 return 0;
278
279 if (is_nd_pmem(&nd_region->dev)) {
280 available += nd_pmem_available_dpa(nd_region,
281 nd_mapping, &overlap);
282 if (overlap > blk_max_overlap) {
283 blk_max_overlap = overlap;
284 goto retry;
285 }
286 } else if (is_nd_blk(&nd_region->dev)) {
Dan Williams1b40e092015-05-01 13:34:01 -0400287 available += nd_blk_available_dpa(nd_mapping);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400288 }
289 }
290
291 return available;
292}
293
294static ssize_t available_size_show(struct device *dev,
295 struct device_attribute *attr, char *buf)
296{
297 struct nd_region *nd_region = to_nd_region(dev);
298 unsigned long long available = 0;
299
300 /*
301 * Flush in-flight updates and grab a snapshot of the available
302 * size. Of course, this value is potentially invalidated the
303 * memory nvdimm_bus_lock() is dropped, but that's userspace's
304 * problem to not race itself.
305 */
306 nvdimm_bus_lock(dev);
307 wait_nvdimm_bus_probe_idle(dev);
308 available = nd_region_available_dpa(nd_region);
309 nvdimm_bus_unlock(dev);
310
311 return sprintf(buf, "%llu\n", available);
312}
313static DEVICE_ATTR_RO(available_size);
314
Dan Williams3d880022015-05-31 15:02:11 -0400315static ssize_t init_namespaces_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700318 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400319 ssize_t rc;
320
321 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700322 if (ndrd)
323 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400324 else
325 rc = -ENXIO;
326 nvdimm_bus_unlock(dev);
327
328 return rc;
329}
330static DEVICE_ATTR_RO(init_namespaces);
331
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400332static ssize_t namespace_seed_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct nd_region *nd_region = to_nd_region(dev);
336 ssize_t rc;
337
338 nvdimm_bus_lock(dev);
339 if (nd_region->ns_seed)
340 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
341 else
342 rc = sprintf(buf, "\n");
343 nvdimm_bus_unlock(dev);
344 return rc;
345}
346static DEVICE_ATTR_RO(namespace_seed);
347
Dan Williams8c2f7e82015-06-25 04:20:04 -0400348static ssize_t btt_seed_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350{
351 struct nd_region *nd_region = to_nd_region(dev);
352 ssize_t rc;
353
354 nvdimm_bus_lock(dev);
355 if (nd_region->btt_seed)
356 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
357 else
358 rc = sprintf(buf, "\n");
359 nvdimm_bus_unlock(dev);
360
361 return rc;
362}
363static DEVICE_ATTR_RO(btt_seed);
364
Dan Williamse1455742015-07-30 17:57:47 -0400365static ssize_t pfn_seed_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
367{
368 struct nd_region *nd_region = to_nd_region(dev);
369 ssize_t rc;
370
371 nvdimm_bus_lock(dev);
372 if (nd_region->pfn_seed)
373 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
374 else
375 rc = sprintf(buf, "\n");
376 nvdimm_bus_unlock(dev);
377
378 return rc;
379}
380static DEVICE_ATTR_RO(pfn_seed);
381
Dan Williamscd034122016-03-11 10:15:36 -0800382static ssize_t dax_seed_show(struct device *dev,
383 struct device_attribute *attr, char *buf)
384{
385 struct nd_region *nd_region = to_nd_region(dev);
386 ssize_t rc;
387
388 nvdimm_bus_lock(dev);
389 if (nd_region->dax_seed)
390 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
391 else
392 rc = sprintf(buf, "\n");
393 nvdimm_bus_unlock(dev);
394
395 return rc;
396}
397static DEVICE_ATTR_RO(dax_seed);
398
Dan Williams58138822015-06-23 20:08:34 -0400399static ssize_t read_only_show(struct device *dev,
400 struct device_attribute *attr, char *buf)
401{
402 struct nd_region *nd_region = to_nd_region(dev);
403
404 return sprintf(buf, "%d\n", nd_region->ro);
405}
406
407static ssize_t read_only_store(struct device *dev,
408 struct device_attribute *attr, const char *buf, size_t len)
409{
410 bool ro;
411 int rc = strtobool(buf, &ro);
412 struct nd_region *nd_region = to_nd_region(dev);
413
414 if (rc)
415 return rc;
416
417 nd_region->ro = ro;
418 return len;
419}
420static DEVICE_ATTR_RW(read_only);
421
Dan Williams1f7df6f2015-06-09 20:13:14 -0400422static struct attribute *nd_region_attributes[] = {
423 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400424 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400425 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400426 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400427 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800428 &dev_attr_dax_seed.attr,
Dan Williams58138822015-06-23 20:08:34 -0400429 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400430 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400431 &dev_attr_available_size.attr,
432 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400433 &dev_attr_init_namespaces.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400434 NULL,
435};
436
Dan Williamseaf96152015-05-01 13:11:27 -0400437static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
438{
439 struct device *dev = container_of(kobj, typeof(*dev), kobj);
440 struct nd_region *nd_region = to_nd_region(dev);
441 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400442 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400443
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300444 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
445 return 0;
446
Dan Williamscd034122016-03-11 10:15:36 -0800447 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
448 return 0;
449
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400450 if (a != &dev_attr_set_cookie.attr
451 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400452 return a->mode;
453
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400454 if ((type == ND_DEVICE_NAMESPACE_PMEM
455 || type == ND_DEVICE_NAMESPACE_BLK)
456 && a == &dev_attr_available_size.attr)
457 return a->mode;
458 else if (is_nd_pmem(dev) && nd_set)
459 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400460
461 return 0;
462}
463
Dan Williams1f7df6f2015-06-09 20:13:14 -0400464struct attribute_group nd_region_attribute_group = {
465 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400466 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400467};
468EXPORT_SYMBOL_GPL(nd_region_attribute_group);
469
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400470u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
471{
472 struct nd_interleave_set *nd_set = nd_region->nd_set;
473
474 if (nd_set)
475 return nd_set->cookie;
476 return 0;
477}
478
Dan Williamseaf96152015-05-01 13:11:27 -0400479/*
480 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400481 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400482 * seeds. Also, on the removal of a BLK region, notify the provider to
483 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400484 */
485static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
486 struct device *dev, bool probe)
487{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400488 struct nd_region *nd_region;
489
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400490 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
Dan Williamseaf96152015-05-01 13:11:27 -0400491 int i;
492
Dan Williams8c2f7e82015-06-25 04:20:04 -0400493 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400494 for (i = 0; i < nd_region->ndr_mappings; i++) {
495 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400496 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400497 struct nvdimm *nvdimm = nd_mapping->nvdimm;
498
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400499 kfree(nd_mapping->labels);
500 nd_mapping->labels = NULL;
501 put_ndd(ndd);
502 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400503 if (ndd)
504 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400505 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400506
507 if (is_nd_pmem(dev))
508 return;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400509 }
510 if (dev->parent && is_nd_blk(dev->parent) && probe) {
511 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400512 nvdimm_bus_lock(dev);
513 if (nd_region->ns_seed == dev)
514 nd_region_create_blk_seed(nd_region);
515 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400516 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400517 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400518 struct nd_btt *nd_btt = to_nd_btt(dev);
519
Dan Williams8c2f7e82015-06-25 04:20:04 -0400520 nd_region = to_nd_region(dev->parent);
521 nvdimm_bus_lock(dev);
522 if (nd_region->btt_seed == dev)
523 nd_region_create_btt_seed(nd_region);
Dan Williams8ca24352015-07-24 23:42:34 -0400524 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
525 is_nd_blk(dev->parent))
526 nd_region_create_blk_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400527 nvdimm_bus_unlock(dev);
528 }
Dan Williams2dc43332015-12-13 11:41:36 -0800529 if (is_nd_pfn(dev) && probe) {
530 nd_region = to_nd_region(dev->parent);
531 nvdimm_bus_lock(dev);
532 if (nd_region->pfn_seed == dev)
533 nd_region_create_pfn_seed(nd_region);
534 nvdimm_bus_unlock(dev);
535 }
Dan Williamscd034122016-03-11 10:15:36 -0800536 if (is_nd_dax(dev) && probe) {
537 nd_region = to_nd_region(dev->parent);
538 nvdimm_bus_lock(dev);
539 if (nd_region->dax_seed == dev)
540 nd_region_create_dax_seed(nd_region);
541 nvdimm_bus_unlock(dev);
542 }
Dan Williamseaf96152015-05-01 13:11:27 -0400543}
544
545void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
546{
547 nd_region_notify_driver_action(nvdimm_bus, dev, true);
548}
549
550void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
551{
552 nd_region_notify_driver_action(nvdimm_bus, dev, false);
553}
554
Dan Williams1f7df6f2015-06-09 20:13:14 -0400555static ssize_t mappingN(struct device *dev, char *buf, int n)
556{
557 struct nd_region *nd_region = to_nd_region(dev);
558 struct nd_mapping *nd_mapping;
559 struct nvdimm *nvdimm;
560
561 if (n >= nd_region->ndr_mappings)
562 return -ENXIO;
563 nd_mapping = &nd_region->mapping[n];
564 nvdimm = nd_mapping->nvdimm;
565
566 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
567 nd_mapping->start, nd_mapping->size);
568}
569
570#define REGION_MAPPING(idx) \
571static ssize_t mapping##idx##_show(struct device *dev, \
572 struct device_attribute *attr, char *buf) \
573{ \
574 return mappingN(dev, buf, idx); \
575} \
576static DEVICE_ATTR_RO(mapping##idx)
577
578/*
579 * 32 should be enough for a while, even in the presence of socket
580 * interleave a 32-way interleave set is a degenerate case.
581 */
582REGION_MAPPING(0);
583REGION_MAPPING(1);
584REGION_MAPPING(2);
585REGION_MAPPING(3);
586REGION_MAPPING(4);
587REGION_MAPPING(5);
588REGION_MAPPING(6);
589REGION_MAPPING(7);
590REGION_MAPPING(8);
591REGION_MAPPING(9);
592REGION_MAPPING(10);
593REGION_MAPPING(11);
594REGION_MAPPING(12);
595REGION_MAPPING(13);
596REGION_MAPPING(14);
597REGION_MAPPING(15);
598REGION_MAPPING(16);
599REGION_MAPPING(17);
600REGION_MAPPING(18);
601REGION_MAPPING(19);
602REGION_MAPPING(20);
603REGION_MAPPING(21);
604REGION_MAPPING(22);
605REGION_MAPPING(23);
606REGION_MAPPING(24);
607REGION_MAPPING(25);
608REGION_MAPPING(26);
609REGION_MAPPING(27);
610REGION_MAPPING(28);
611REGION_MAPPING(29);
612REGION_MAPPING(30);
613REGION_MAPPING(31);
614
615static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
616{
617 struct device *dev = container_of(kobj, struct device, kobj);
618 struct nd_region *nd_region = to_nd_region(dev);
619
620 if (n < nd_region->ndr_mappings)
621 return a->mode;
622 return 0;
623}
624
625static struct attribute *mapping_attributes[] = {
626 &dev_attr_mapping0.attr,
627 &dev_attr_mapping1.attr,
628 &dev_attr_mapping2.attr,
629 &dev_attr_mapping3.attr,
630 &dev_attr_mapping4.attr,
631 &dev_attr_mapping5.attr,
632 &dev_attr_mapping6.attr,
633 &dev_attr_mapping7.attr,
634 &dev_attr_mapping8.attr,
635 &dev_attr_mapping9.attr,
636 &dev_attr_mapping10.attr,
637 &dev_attr_mapping11.attr,
638 &dev_attr_mapping12.attr,
639 &dev_attr_mapping13.attr,
640 &dev_attr_mapping14.attr,
641 &dev_attr_mapping15.attr,
642 &dev_attr_mapping16.attr,
643 &dev_attr_mapping17.attr,
644 &dev_attr_mapping18.attr,
645 &dev_attr_mapping19.attr,
646 &dev_attr_mapping20.attr,
647 &dev_attr_mapping21.attr,
648 &dev_attr_mapping22.attr,
649 &dev_attr_mapping23.attr,
650 &dev_attr_mapping24.attr,
651 &dev_attr_mapping25.attr,
652 &dev_attr_mapping26.attr,
653 &dev_attr_mapping27.attr,
654 &dev_attr_mapping28.attr,
655 &dev_attr_mapping29.attr,
656 &dev_attr_mapping30.attr,
657 &dev_attr_mapping31.attr,
658 NULL,
659};
660
661struct attribute_group nd_mapping_attribute_group = {
662 .is_visible = mapping_visible,
663 .attrs = mapping_attributes,
664};
665EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
666
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400667int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400668{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400669 struct device *dev = &nd_region->dev;
670 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
671
672 if (!is_nd_blk(dev))
673 return 0;
674
675 if (nd_region->ndr_mappings < 1) {
676 dev_err(dev, "invalid BLK region\n");
677 return -ENXIO;
678 }
679
680 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400681}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400682
Vishal Verma5212e112015-06-25 04:20:32 -0400683/**
684 * nd_region_acquire_lane - allocate and lock a lane
685 * @nd_region: region id and number of lanes possible
686 *
687 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
688 * We optimize for the common case where there are 256 lanes, one
689 * per-cpu. For larger systems we need to lock to share lanes. For now
690 * this implementation assumes the cost of maintaining an allocator for
691 * free lanes is on the order of the lock hold time, so it implements a
692 * static lane = cpu % num_lanes mapping.
693 *
694 * In the case of a BTT instance on top of a BLK namespace a lane may be
695 * acquired recursively. We lock on the first instance.
696 *
697 * In the case of a BTT instance on top of PMEM, we only acquire a lane
698 * for the BTT metadata updates.
699 */
700unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
701{
702 unsigned int cpu, lane;
703
704 cpu = get_cpu();
705 if (nd_region->num_lanes < nr_cpu_ids) {
706 struct nd_percpu_lane *ndl_lock, *ndl_count;
707
708 lane = cpu % nd_region->num_lanes;
709 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
710 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
711 if (ndl_count->count++ == 0)
712 spin_lock(&ndl_lock->lock);
713 } else
714 lane = cpu;
715
716 return lane;
717}
718EXPORT_SYMBOL(nd_region_acquire_lane);
719
720void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
721{
722 if (nd_region->num_lanes < nr_cpu_ids) {
723 unsigned int cpu = get_cpu();
724 struct nd_percpu_lane *ndl_lock, *ndl_count;
725
726 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
727 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
728 if (--ndl_count->count == 0)
729 spin_unlock(&ndl_lock->lock);
730 put_cpu();
731 }
732 put_cpu();
733}
734EXPORT_SYMBOL(nd_region_release_lane);
735
Dan Williams1f7df6f2015-06-09 20:13:14 -0400736static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
737 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
738 const char *caller)
739{
740 struct nd_region *nd_region;
741 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400742 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400743 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400744 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400745
746 for (i = 0; i < ndr_desc->num_mappings; i++) {
747 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
748 struct nvdimm *nvdimm = nd_mapping->nvdimm;
749
750 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
751 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
752 caller, dev_name(&nvdimm->dev), i);
753
754 return NULL;
755 }
Dan Williams58138822015-06-23 20:08:34 -0400756
757 if (nvdimm->flags & NDD_UNARMED)
758 ro = 1;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400759 }
760
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400761 if (dev_type == &nd_blk_device_type) {
762 struct nd_blk_region_desc *ndbr_desc;
763 struct nd_blk_region *ndbr;
764
765 ndbr_desc = to_blk_region_desc(ndr_desc);
766 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
767 * ndr_desc->num_mappings,
768 GFP_KERNEL);
769 if (ndbr) {
770 nd_region = &ndbr->nd_region;
771 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400772 ndbr->do_io = ndbr_desc->do_io;
773 }
774 region_buf = ndbr;
775 } else {
776 nd_region = kzalloc(sizeof(struct nd_region)
777 + sizeof(struct nd_mapping)
778 * ndr_desc->num_mappings,
779 GFP_KERNEL);
780 region_buf = nd_region;
781 }
782
783 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400784 return NULL;
785 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -0400786 if (nd_region->id < 0)
787 goto err_id;
788
789 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
790 if (!nd_region->lane)
791 goto err_percpu;
792
793 for (i = 0; i < nr_cpu_ids; i++) {
794 struct nd_percpu_lane *ndl;
795
796 ndl = per_cpu_ptr(nd_region->lane, i);
797 spin_lock_init(&ndl->lock);
798 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400799 }
800
801 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
802 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
803 for (i = 0; i < ndr_desc->num_mappings; i++) {
804 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
805 struct nvdimm *nvdimm = nd_mapping->nvdimm;
806
807 get_device(&nvdimm->dev);
808 }
809 nd_region->ndr_mappings = ndr_desc->num_mappings;
810 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -0400811 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -0400812 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -0400813 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -0400814 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -0600815 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams1b40e092015-05-01 13:34:01 -0400816 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400817 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -0400818 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -0800819 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400820 dev = &nd_region->dev;
821 dev_set_name(dev, "region%d", nd_region->id);
822 dev->parent = &nvdimm_bus->dev;
823 dev->type = dev_type;
824 dev->groups = ndr_desc->attr_groups;
825 nd_region->ndr_size = resource_size(ndr_desc->res);
826 nd_region->ndr_start = ndr_desc->res->start;
827 nd_device_register(dev);
828
829 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -0400830
831 err_percpu:
832 ida_simple_remove(&region_ida, nd_region->id);
833 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400834 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -0400835 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400836}
837
838struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
839 struct nd_region_desc *ndr_desc)
840{
Vishal Verma5212e112015-06-25 04:20:32 -0400841 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400842 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
843 __func__);
844}
845EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
846
847struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
848 struct nd_region_desc *ndr_desc)
849{
850 if (ndr_desc->num_mappings > 1)
851 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -0400852 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400853 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
854 __func__);
855}
856EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
857
858struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
859 struct nd_region_desc *ndr_desc)
860{
Vishal Verma5212e112015-06-25 04:20:32 -0400861 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400862 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
863 __func__);
864}
865EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -0700866
867void __exit nd_region_devs_exit(void)
868{
869 ida_destroy(&region_ida);
870}