blob: 609fc450522a14979b5ab3b608ce8d3adb6647d8 [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williamseaf96152015-05-01 13:11:27 -040013#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040014#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040015#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040016#include <linux/slab.h>
Dan Williams0c27af62016-05-27 09:23:01 -070017#include <linux/hash.h>
Dan Williamseaf96152015-05-01 13:11:27 -040018#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040019#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040020#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040021#include "nd-core.h"
22#include "nd.h"
23
Dan Williamsf284a4f2016-07-07 19:44:50 -070024/*
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
26 * irrelevant.
27 */
28#include <linux/io-64-nonatomic-hi-lo.h>
29
Dan Williams1f7df6f2015-06-09 20:13:14 -040030static DEFINE_IDA(region_ida);
Dan Williams0c27af62016-05-27 09:23:01 -070031static DEFINE_PER_CPU(int, flush_idx);
Dan Williams1f7df6f2015-06-09 20:13:14 -040032
Dan Williamse5ae3b22016-06-07 17:00:04 -070033static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34 struct nd_region_data *ndrd)
35{
36 int i, j;
37
38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
Dan Williams595c7302016-09-23 17:53:52 -070040 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
Dan Williamse5ae3b22016-06-07 17:00:04 -070041 struct resource *res = &nvdimm->flush_wpq[i];
42 unsigned long pfn = PHYS_PFN(res->start);
43 void __iomem *flush_page;
44
45 /* check if flush hints share a page */
46 for (j = 0; j < i; j++) {
47 struct resource *res_j = &nvdimm->flush_wpq[j];
48 unsigned long pfn_j = PHYS_PFN(res_j->start);
49
50 if (pfn == pfn_j)
51 break;
52 }
53
54 if (j < i)
55 flush_page = (void __iomem *) ((unsigned long)
Dan Williams595c7302016-09-23 17:53:52 -070056 ndrd_get_flush_wpq(ndrd, dimm, j)
57 & PAGE_MASK);
Dan Williamse5ae3b22016-06-07 17:00:04 -070058 else
59 flush_page = devm_nvdimm_ioremap(dev,
Oliver O'Halloran480b6832016-09-19 20:19:00 +100060 PFN_PHYS(pfn), PAGE_SIZE);
Dan Williamse5ae3b22016-06-07 17:00:04 -070061 if (!flush_page)
62 return -ENXIO;
Dan Williams595c7302016-09-23 17:53:52 -070063 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK));
Dan Williamse5ae3b22016-06-07 17:00:04 -070065 }
66
67 return 0;
68}
69
70int nd_region_activate(struct nd_region *nd_region)
71{
Dave Jiangdb580282016-09-26 11:06:50 -070072 int i, j, num_flush = 0;
Dan Williamse5ae3b22016-06-07 17:00:04 -070073 struct nd_region_data *ndrd;
74 struct device *dev = &nd_region->dev;
75 size_t flush_data_size = sizeof(void *);
76
77 nvdimm_bus_lock(&nd_region->dev);
78 for (i = 0; i < nd_region->ndr_mappings; i++) {
79 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80 struct nvdimm *nvdimm = nd_mapping->nvdimm;
81
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size += sizeof(void *);
Dan Williams0c27af62016-05-27 09:23:01 -070084 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070085 if (!nvdimm->num_flush)
86 continue;
87 flush_data_size += nvdimm->num_flush * sizeof(void *);
88 }
89 nvdimm_bus_unlock(&nd_region->dev);
90
91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
92 if (!ndrd)
93 return -ENOMEM;
94 dev_set_drvdata(dev, ndrd);
95
Dan Williams595c7302016-09-23 17:53:52 -070096 if (!num_flush)
97 return 0;
98
99 ndrd->hints_shift = ilog2(num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700100 for (i = 0; i < nd_region->ndr_mappings; i++) {
101 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
102 struct nvdimm *nvdimm = nd_mapping->nvdimm;
103 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
104
105 if (rc)
106 return rc;
107 }
108
Dave Jiangdb580282016-09-26 11:06:50 -0700109 /*
110 * Clear out entries that are duplicates. This should prevent the
111 * extra flushings.
112 */
113 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
114 /* ignore if NULL already */
115 if (!ndrd_get_flush_wpq(ndrd, i, 0))
116 continue;
117
118 for (j = i + 1; j < nd_region->ndr_mappings; j++)
119 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
120 ndrd_get_flush_wpq(ndrd, j, 0))
121 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
122 }
123
Dan Williamse5ae3b22016-06-07 17:00:04 -0700124 return 0;
125}
126
Dan Williams1f7df6f2015-06-09 20:13:14 -0400127static void nd_region_release(struct device *dev)
128{
129 struct nd_region *nd_region = to_nd_region(dev);
130 u16 i;
131
132 for (i = 0; i < nd_region->ndr_mappings; i++) {
133 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
134 struct nvdimm *nvdimm = nd_mapping->nvdimm;
135
136 put_device(&nvdimm->dev);
137 }
Vishal Verma5212e112015-06-25 04:20:32 -0400138 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400139 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400140 if (is_nd_blk(dev))
141 kfree(to_nd_blk_region(dev));
142 else
143 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400144}
145
146static struct device_type nd_blk_device_type = {
147 .name = "nd_blk",
148 .release = nd_region_release,
149};
150
151static struct device_type nd_pmem_device_type = {
152 .name = "nd_pmem",
153 .release = nd_region_release,
154};
155
156static struct device_type nd_volatile_device_type = {
157 .name = "nd_volatile",
158 .release = nd_region_release,
159};
160
Dan Williams3d880022015-05-31 15:02:11 -0400161bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400162{
163 return dev ? dev->type == &nd_pmem_device_type : false;
164}
165
Dan Williams3d880022015-05-31 15:02:11 -0400166bool is_nd_blk(struct device *dev)
167{
168 return dev ? dev->type == &nd_blk_device_type : false;
169}
170
Dan Williamsc9e582a2017-05-29 23:12:19 -0700171bool is_nd_volatile(struct device *dev)
172{
173 return dev ? dev->type == &nd_volatile_device_type : false;
174}
175
Dan Williams1f7df6f2015-06-09 20:13:14 -0400176struct nd_region *to_nd_region(struct device *dev)
177{
178 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
179
180 WARN_ON(dev->type->release != nd_region_release);
181 return nd_region;
182}
183EXPORT_SYMBOL_GPL(to_nd_region);
184
Dan Williams243f29f2018-04-02 13:14:25 -0700185struct device *nd_region_dev(struct nd_region *nd_region)
186{
187 if (!nd_region)
188 return NULL;
189 return &nd_region->dev;
190}
191EXPORT_SYMBOL_GPL(nd_region_dev);
192
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400193struct nd_blk_region *to_nd_blk_region(struct device *dev)
194{
195 struct nd_region *nd_region = to_nd_region(dev);
196
197 WARN_ON(!is_nd_blk(dev));
198 return container_of(nd_region, struct nd_blk_region, nd_region);
199}
200EXPORT_SYMBOL_GPL(to_nd_blk_region);
201
202void *nd_region_provider_data(struct nd_region *nd_region)
203{
204 return nd_region->provider_data;
205}
206EXPORT_SYMBOL_GPL(nd_region_provider_data);
207
208void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
209{
210 return ndbr->blk_provider_data;
211}
212EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
213
214void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
215{
216 ndbr->blk_provider_data = data;
217}
218EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
219
Dan Williams3d880022015-05-31 15:02:11 -0400220/**
221 * nd_region_to_nstype() - region to an integer namespace type
222 * @nd_region: region-device to interrogate
223 *
224 * This is the 'nstype' attribute of a region as well, an input to the
225 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
226 * namespace devices with namespace drivers.
227 */
228int nd_region_to_nstype(struct nd_region *nd_region)
229{
Dan Williamsc9e582a2017-05-29 23:12:19 -0700230 if (is_memory(&nd_region->dev)) {
Dan Williams3d880022015-05-31 15:02:11 -0400231 u16 i, alias;
232
233 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
234 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
235 struct nvdimm *nvdimm = nd_mapping->nvdimm;
236
Dan Williams8f078b32017-05-04 14:01:24 -0700237 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Dan Williams3d880022015-05-31 15:02:11 -0400238 alias++;
239 }
240 if (alias)
241 return ND_DEVICE_NAMESPACE_PMEM;
242 else
243 return ND_DEVICE_NAMESPACE_IO;
244 } else if (is_nd_blk(&nd_region->dev)) {
245 return ND_DEVICE_NAMESPACE_BLK;
246 }
247
248 return 0;
249}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400250EXPORT_SYMBOL(nd_region_to_nstype);
251
Dan Williams1f7df6f2015-06-09 20:13:14 -0400252static ssize_t size_show(struct device *dev,
253 struct device_attribute *attr, char *buf)
254{
255 struct nd_region *nd_region = to_nd_region(dev);
256 unsigned long long size = 0;
257
Dan Williamsc9e582a2017-05-29 23:12:19 -0700258 if (is_memory(dev)) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400259 size = nd_region->ndr_size;
260 } else if (nd_region->ndr_mappings == 1) {
261 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
262
263 size = nd_mapping->size;
264 }
265
266 return sprintf(buf, "%llu\n", size);
267}
268static DEVICE_ATTR_RO(size);
269
Dan Williamsab630892017-04-21 13:28:12 -0700270static ssize_t deep_flush_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 struct nd_region *nd_region = to_nd_region(dev);
274
275 /*
276 * NOTE: in the nvdimm_has_flush() error case this attribute is
277 * not visible.
278 */
279 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
280}
281
282static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
283 const char *buf, size_t len)
284{
285 bool flush;
286 int rc = strtobool(buf, &flush);
287 struct nd_region *nd_region = to_nd_region(dev);
288
289 if (rc)
290 return rc;
291 if (!flush)
292 return -EINVAL;
293 nvdimm_flush(nd_region);
294
295 return len;
296}
297static DEVICE_ATTR_RW(deep_flush);
298
Dan Williams1f7df6f2015-06-09 20:13:14 -0400299static ssize_t mappings_show(struct device *dev,
300 struct device_attribute *attr, char *buf)
301{
302 struct nd_region *nd_region = to_nd_region(dev);
303
304 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
305}
306static DEVICE_ATTR_RO(mappings);
307
Dan Williams3d880022015-05-31 15:02:11 -0400308static ssize_t nstype_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310{
311 struct nd_region *nd_region = to_nd_region(dev);
312
313 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
314}
315static DEVICE_ATTR_RO(nstype);
316
Dan Williamseaf96152015-05-01 13:11:27 -0400317static ssize_t set_cookie_show(struct device *dev,
318 struct device_attribute *attr, char *buf)
319{
320 struct nd_region *nd_region = to_nd_region(dev);
321 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsc12c48c2017-06-04 10:59:15 +0900322 ssize_t rc = 0;
Dan Williamseaf96152015-05-01 13:11:27 -0400323
Dan Williamsc9e582a2017-05-29 23:12:19 -0700324 if (is_memory(dev) && nd_set)
Dan Williamseaf96152015-05-01 13:11:27 -0400325 /* pass, should be precluded by region_visible */;
326 else
327 return -ENXIO;
328
Dan Williamsc12c48c2017-06-04 10:59:15 +0900329 /*
330 * The cookie to show depends on which specification of the
331 * labels we are using. If there are not labels then default to
332 * the v1.1 namespace label cookie definition. To read all this
333 * data we need to wait for probing to settle.
334 */
335 device_lock(dev);
336 nvdimm_bus_lock(dev);
337 wait_nvdimm_bus_probe_idle(dev);
338 if (nd_region->ndr_mappings) {
339 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
340 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
341
342 if (ndd) {
343 struct nd_namespace_index *nsindex;
344
345 nsindex = to_namespace_index(ndd, ndd->ns_current);
346 rc = sprintf(buf, "%#llx\n",
347 nd_region_interleave_set_cookie(nd_region,
348 nsindex));
349 }
350 }
351 nvdimm_bus_unlock(dev);
352 device_unlock(dev);
353
354 if (rc)
355 return rc;
356 return sprintf(buf, "%#llx\n", nd_set->cookie1);
Dan Williamseaf96152015-05-01 13:11:27 -0400357}
358static DEVICE_ATTR_RO(set_cookie);
359
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400360resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
361{
362 resource_size_t blk_max_overlap = 0, available, overlap;
363 int i;
364
365 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
366
367 retry:
368 available = 0;
369 overlap = blk_max_overlap;
370 for (i = 0; i < nd_region->ndr_mappings; i++) {
371 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
372 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
373
374 /* if a dimm is disabled the available capacity is zero */
375 if (!ndd)
376 return 0;
377
Dan Williamsc9e582a2017-05-29 23:12:19 -0700378 if (is_memory(&nd_region->dev)) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400379 available += nd_pmem_available_dpa(nd_region,
380 nd_mapping, &overlap);
381 if (overlap > blk_max_overlap) {
382 blk_max_overlap = overlap;
383 goto retry;
384 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700385 } else if (is_nd_blk(&nd_region->dev))
386 available += nd_blk_available_dpa(nd_region);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400387 }
388
389 return available;
390}
391
Keith Busch12e31292018-07-24 15:07:57 -0600392resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
393{
394 resource_size_t available = 0;
395 int i;
396
397 if (is_memory(&nd_region->dev))
398 available = PHYS_ADDR_MAX;
399
400 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
401 for (i = 0; i < nd_region->ndr_mappings; i++) {
402 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
403
404 if (is_memory(&nd_region->dev))
405 available = min(available,
406 nd_pmem_max_contiguous_dpa(nd_region,
407 nd_mapping));
408 else if (is_nd_blk(&nd_region->dev))
409 available += nd_blk_available_dpa(nd_region);
410 }
411 if (is_memory(&nd_region->dev))
412 return available * nd_region->ndr_mappings;
413 return available;
414}
415
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400416static ssize_t available_size_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
418{
419 struct nd_region *nd_region = to_nd_region(dev);
420 unsigned long long available = 0;
421
422 /*
423 * Flush in-flight updates and grab a snapshot of the available
424 * size. Of course, this value is potentially invalidated the
425 * memory nvdimm_bus_lock() is dropped, but that's userspace's
426 * problem to not race itself.
427 */
Dan Williams2364ed02019-08-05 18:32:13 -0700428 device_lock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400429 nvdimm_bus_lock(dev);
430 wait_nvdimm_bus_probe_idle(dev);
431 available = nd_region_available_dpa(nd_region);
432 nvdimm_bus_unlock(dev);
Dan Williams2364ed02019-08-05 18:32:13 -0700433 device_unlock(dev);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400434
435 return sprintf(buf, "%llu\n", available);
436}
437static DEVICE_ATTR_RO(available_size);
438
Keith Busch1e687222018-07-24 15:07:58 -0600439static ssize_t max_available_extent_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 struct nd_region *nd_region = to_nd_region(dev);
443 unsigned long long available = 0;
444
Dan Williams2364ed02019-08-05 18:32:13 -0700445 device_lock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600446 nvdimm_bus_lock(dev);
447 wait_nvdimm_bus_probe_idle(dev);
448 available = nd_region_allocatable_dpa(nd_region);
449 nvdimm_bus_unlock(dev);
Dan Williams2364ed02019-08-05 18:32:13 -0700450 device_unlock(dev);
Keith Busch1e687222018-07-24 15:07:58 -0600451
452 return sprintf(buf, "%llu\n", available);
453}
454static DEVICE_ATTR_RO(max_available_extent);
455
Dan Williams3d880022015-05-31 15:02:11 -0400456static ssize_t init_namespaces_show(struct device *dev,
457 struct device_attribute *attr, char *buf)
458{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700459 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400460 ssize_t rc;
461
462 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700463 if (ndrd)
464 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400465 else
466 rc = -ENXIO;
467 nvdimm_bus_unlock(dev);
468
469 return rc;
470}
471static DEVICE_ATTR_RO(init_namespaces);
472
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400473static ssize_t namespace_seed_show(struct device *dev,
474 struct device_attribute *attr, char *buf)
475{
476 struct nd_region *nd_region = to_nd_region(dev);
477 ssize_t rc;
478
479 nvdimm_bus_lock(dev);
480 if (nd_region->ns_seed)
481 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
482 else
483 rc = sprintf(buf, "\n");
484 nvdimm_bus_unlock(dev);
485 return rc;
486}
487static DEVICE_ATTR_RO(namespace_seed);
488
Dan Williams8c2f7e82015-06-25 04:20:04 -0400489static ssize_t btt_seed_show(struct device *dev,
490 struct device_attribute *attr, char *buf)
491{
492 struct nd_region *nd_region = to_nd_region(dev);
493 ssize_t rc;
494
495 nvdimm_bus_lock(dev);
496 if (nd_region->btt_seed)
497 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
498 else
499 rc = sprintf(buf, "\n");
500 nvdimm_bus_unlock(dev);
501
502 return rc;
503}
504static DEVICE_ATTR_RO(btt_seed);
505
Dan Williamse1455742015-07-30 17:57:47 -0400506static ssize_t pfn_seed_show(struct device *dev,
507 struct device_attribute *attr, char *buf)
508{
509 struct nd_region *nd_region = to_nd_region(dev);
510 ssize_t rc;
511
512 nvdimm_bus_lock(dev);
513 if (nd_region->pfn_seed)
514 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
515 else
516 rc = sprintf(buf, "\n");
517 nvdimm_bus_unlock(dev);
518
519 return rc;
520}
521static DEVICE_ATTR_RO(pfn_seed);
522
Dan Williamscd034122016-03-11 10:15:36 -0800523static ssize_t dax_seed_show(struct device *dev,
524 struct device_attribute *attr, char *buf)
525{
526 struct nd_region *nd_region = to_nd_region(dev);
527 ssize_t rc;
528
529 nvdimm_bus_lock(dev);
530 if (nd_region->dax_seed)
531 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
532 else
533 rc = sprintf(buf, "\n");
534 nvdimm_bus_unlock(dev);
535
536 return rc;
537}
538static DEVICE_ATTR_RO(dax_seed);
539
Dan Williams58138822015-06-23 20:08:34 -0400540static ssize_t read_only_show(struct device *dev,
541 struct device_attribute *attr, char *buf)
542{
543 struct nd_region *nd_region = to_nd_region(dev);
544
545 return sprintf(buf, "%d\n", nd_region->ro);
546}
547
548static ssize_t read_only_store(struct device *dev,
549 struct device_attribute *attr, const char *buf, size_t len)
550{
551 bool ro;
552 int rc = strtobool(buf, &ro);
553 struct nd_region *nd_region = to_nd_region(dev);
554
555 if (rc)
556 return rc;
557
558 nd_region->ro = ro;
559 return len;
560}
561static DEVICE_ATTR_RW(read_only);
562
Dan Williams23f49842017-04-29 15:24:03 -0700563static ssize_t region_badblocks_show(struct device *dev,
Dave Jiang6a6bef92017-04-07 15:33:20 -0700564 struct device_attribute *attr, char *buf)
565{
566 struct nd_region *nd_region = to_nd_region(dev);
Dan Williams8f696982018-09-27 15:01:55 -0700567 ssize_t rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700568
Dan Williams8f696982018-09-27 15:01:55 -0700569 device_lock(dev);
570 if (dev->driver)
571 rc = badblocks_show(&nd_region->bb, buf, 0);
572 else
573 rc = -ENXIO;
574 device_unlock(dev);
575
576 return rc;
Dave Jiang6a6bef92017-04-07 15:33:20 -0700577}
Dan Williams23f49842017-04-29 15:24:03 -0700578static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
Dave Jiang6a6bef92017-04-07 15:33:20 -0700579
Dave Jiang802f4be2017-04-07 15:33:25 -0700580static ssize_t resource_show(struct device *dev,
581 struct device_attribute *attr, char *buf)
582{
583 struct nd_region *nd_region = to_nd_region(dev);
584
585 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
586}
587static DEVICE_ATTR_RO(resource);
588
Dave Jiang96c3a232018-01-31 12:45:49 -0700589static ssize_t persistence_domain_show(struct device *dev,
590 struct device_attribute *attr, char *buf)
591{
592 struct nd_region *nd_region = to_nd_region(dev);
Dave Jiang96c3a232018-01-31 12:45:49 -0700593
Dan Williamsfe9a5522018-03-21 15:12:07 -0700594 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
595 return sprintf(buf, "cpu_cache\n");
596 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
597 return sprintf(buf, "memory_controller\n");
598 else
599 return sprintf(buf, "\n");
Dave Jiang96c3a232018-01-31 12:45:49 -0700600}
601static DEVICE_ATTR_RO(persistence_domain);
602
Dan Williams1f7df6f2015-06-09 20:13:14 -0400603static struct attribute *nd_region_attributes[] = {
604 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400605 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400606 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400607 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400608 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800609 &dev_attr_dax_seed.attr,
Dan Williamsab630892017-04-21 13:28:12 -0700610 &dev_attr_deep_flush.attr,
Dan Williams58138822015-06-23 20:08:34 -0400611 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400612 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400613 &dev_attr_available_size.attr,
Keith Busch1e687222018-07-24 15:07:58 -0600614 &dev_attr_max_available_extent.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400615 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400616 &dev_attr_init_namespaces.attr,
Dan Williams23f49842017-04-29 15:24:03 -0700617 &dev_attr_badblocks.attr,
Dave Jiang802f4be2017-04-07 15:33:25 -0700618 &dev_attr_resource.attr,
Dave Jiang96c3a232018-01-31 12:45:49 -0700619 &dev_attr_persistence_domain.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400620 NULL,
621};
622
Dan Williamseaf96152015-05-01 13:11:27 -0400623static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
624{
625 struct device *dev = container_of(kobj, typeof(*dev), kobj);
626 struct nd_region *nd_region = to_nd_region(dev);
627 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400628 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400629
Dan Williamsc9e582a2017-05-29 23:12:19 -0700630 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300631 return 0;
632
Dan Williamsc9e582a2017-05-29 23:12:19 -0700633 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
Dan Williamscd034122016-03-11 10:15:36 -0800634 return 0;
635
Aneesh Kumar K.V2e93d242019-09-19 14:03:55 +0530636 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
Dave Jiang6a6bef92017-04-07 15:33:20 -0700637 return 0;
638
Dan Williamsb8ff9812017-09-26 11:17:52 -0700639 if (a == &dev_attr_resource.attr) {
Aneesh Kumar K.V2e93d242019-09-19 14:03:55 +0530640 if (is_memory(dev))
Dan Williamsb8ff9812017-09-26 11:17:52 -0700641 return 0400;
642 else
643 return 0;
644 }
Dave Jiang802f4be2017-04-07 15:33:25 -0700645
Dan Williamsab630892017-04-21 13:28:12 -0700646 if (a == &dev_attr_deep_flush.attr) {
647 int has_flush = nvdimm_has_flush(nd_region);
648
649 if (has_flush == 1)
650 return a->mode;
651 else if (has_flush == 0)
652 return 0444;
653 else
654 return 0;
655 }
656
Dan Williams896196dc2018-03-21 14:06:23 -0700657 if (a == &dev_attr_persistence_domain.attr) {
658 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
659 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
660 return 0;
661 return a->mode;
662 }
663
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400664 if (a != &dev_attr_set_cookie.attr
665 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400666 return a->mode;
667
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400668 if ((type == ND_DEVICE_NAMESPACE_PMEM
669 || type == ND_DEVICE_NAMESPACE_BLK)
670 && a == &dev_attr_available_size.attr)
671 return a->mode;
Dan Williamsc9e582a2017-05-29 23:12:19 -0700672 else if (is_memory(dev) && nd_set)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400673 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400674
675 return 0;
676}
677
Dan Williams1f7df6f2015-06-09 20:13:14 -0400678struct attribute_group nd_region_attribute_group = {
679 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400680 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400681};
682EXPORT_SYMBOL_GPL(nd_region_attribute_group);
683
Dan Williamsc12c48c2017-06-04 10:59:15 +0900684u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
685 struct nd_namespace_index *nsindex)
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400686{
687 struct nd_interleave_set *nd_set = nd_region->nd_set;
688
Dan Williamsc12c48c2017-06-04 10:59:15 +0900689 if (!nd_set)
690 return 0;
691
692 if (nsindex && __le16_to_cpu(nsindex->major) == 1
693 && __le16_to_cpu(nsindex->minor) == 1)
694 return nd_set->cookie1;
695 return nd_set->cookie2;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400696}
697
Dan Williams86ef58a2017-02-28 18:32:48 -0800698u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
699{
700 struct nd_interleave_set *nd_set = nd_region->nd_set;
701
702 if (nd_set)
703 return nd_set->altcookie;
704 return 0;
705}
706
Dan Williamsae8219f2016-09-19 16:04:21 -0700707void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
708{
709 struct nd_label_ent *label_ent, *e;
710
Dan Williams9cf8bd52016-12-15 20:04:31 -0800711 lockdep_assert_held(&nd_mapping->lock);
Dan Williamsae8219f2016-09-19 16:04:21 -0700712 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
713 list_del(&label_ent->list);
714 kfree(label_ent);
715 }
716}
717
Dan Williamseaf96152015-05-01 13:11:27 -0400718/*
719 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400720 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400721 * seeds. Also, on the removal of a BLK region, notify the provider to
722 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400723 */
724static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
725 struct device *dev, bool probe)
726{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400727 struct nd_region *nd_region;
728
Dan Williamsc9e582a2017-05-29 23:12:19 -0700729 if (!probe && is_nd_region(dev)) {
Dan Williamseaf96152015-05-01 13:11:27 -0400730 int i;
731
Dan Williams8c2f7e82015-06-25 04:20:04 -0400732 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400733 for (i = 0; i < nd_region->ndr_mappings; i++) {
734 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400735 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400736 struct nvdimm *nvdimm = nd_mapping->nvdimm;
737
Dan Williamsae8219f2016-09-19 16:04:21 -0700738 mutex_lock(&nd_mapping->lock);
739 nd_mapping_free_labels(nd_mapping);
740 mutex_unlock(&nd_mapping->lock);
741
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400742 put_ndd(ndd);
743 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400744 if (ndd)
745 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400746 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400747 }
Dan Williamsc9e582a2017-05-29 23:12:19 -0700748 if (dev->parent && is_nd_region(dev->parent) && probe) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400749 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400750 nvdimm_bus_lock(dev);
751 if (nd_region->ns_seed == dev)
Dan Williams98a29c32016-09-30 15:28:27 -0700752 nd_region_create_ns_seed(nd_region);
Dan Williams1b40e092015-05-01 13:34:01 -0400753 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400754 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400755 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400756 struct nd_btt *nd_btt = to_nd_btt(dev);
757
Dan Williams8c2f7e82015-06-25 04:20:04 -0400758 nd_region = to_nd_region(dev->parent);
759 nvdimm_bus_lock(dev);
760 if (nd_region->btt_seed == dev)
761 nd_region_create_btt_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700762 if (nd_region->ns_seed == &nd_btt->ndns->dev)
763 nd_region_create_ns_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400764 nvdimm_bus_unlock(dev);
765 }
Dan Williams2dc43332015-12-13 11:41:36 -0800766 if (is_nd_pfn(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700767 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
768
Dan Williams2dc43332015-12-13 11:41:36 -0800769 nd_region = to_nd_region(dev->parent);
770 nvdimm_bus_lock(dev);
771 if (nd_region->pfn_seed == dev)
772 nd_region_create_pfn_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700773 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
774 nd_region_create_ns_seed(nd_region);
Dan Williams2dc43332015-12-13 11:41:36 -0800775 nvdimm_bus_unlock(dev);
776 }
Dan Williamscd034122016-03-11 10:15:36 -0800777 if (is_nd_dax(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700778 struct nd_dax *nd_dax = to_nd_dax(dev);
779
Dan Williamscd034122016-03-11 10:15:36 -0800780 nd_region = to_nd_region(dev->parent);
781 nvdimm_bus_lock(dev);
782 if (nd_region->dax_seed == dev)
783 nd_region_create_dax_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700784 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
785 nd_region_create_ns_seed(nd_region);
Dan Williamscd034122016-03-11 10:15:36 -0800786 nvdimm_bus_unlock(dev);
787 }
Dan Williamseaf96152015-05-01 13:11:27 -0400788}
789
790void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
791{
792 nd_region_notify_driver_action(nvdimm_bus, dev, true);
793}
794
795void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
796{
797 nd_region_notify_driver_action(nvdimm_bus, dev, false);
798}
799
Dan Williams1f7df6f2015-06-09 20:13:14 -0400800static ssize_t mappingN(struct device *dev, char *buf, int n)
801{
802 struct nd_region *nd_region = to_nd_region(dev);
803 struct nd_mapping *nd_mapping;
804 struct nvdimm *nvdimm;
805
806 if (n >= nd_region->ndr_mappings)
807 return -ENXIO;
808 nd_mapping = &nd_region->mapping[n];
809 nvdimm = nd_mapping->nvdimm;
810
Dan Williams401c0a12017-08-04 17:20:16 -0700811 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
812 nd_mapping->start, nd_mapping->size,
813 nd_mapping->position);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400814}
815
816#define REGION_MAPPING(idx) \
817static ssize_t mapping##idx##_show(struct device *dev, \
818 struct device_attribute *attr, char *buf) \
819{ \
820 return mappingN(dev, buf, idx); \
821} \
822static DEVICE_ATTR_RO(mapping##idx)
823
824/*
825 * 32 should be enough for a while, even in the presence of socket
826 * interleave a 32-way interleave set is a degenerate case.
827 */
828REGION_MAPPING(0);
829REGION_MAPPING(1);
830REGION_MAPPING(2);
831REGION_MAPPING(3);
832REGION_MAPPING(4);
833REGION_MAPPING(5);
834REGION_MAPPING(6);
835REGION_MAPPING(7);
836REGION_MAPPING(8);
837REGION_MAPPING(9);
838REGION_MAPPING(10);
839REGION_MAPPING(11);
840REGION_MAPPING(12);
841REGION_MAPPING(13);
842REGION_MAPPING(14);
843REGION_MAPPING(15);
844REGION_MAPPING(16);
845REGION_MAPPING(17);
846REGION_MAPPING(18);
847REGION_MAPPING(19);
848REGION_MAPPING(20);
849REGION_MAPPING(21);
850REGION_MAPPING(22);
851REGION_MAPPING(23);
852REGION_MAPPING(24);
853REGION_MAPPING(25);
854REGION_MAPPING(26);
855REGION_MAPPING(27);
856REGION_MAPPING(28);
857REGION_MAPPING(29);
858REGION_MAPPING(30);
859REGION_MAPPING(31);
860
861static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
862{
863 struct device *dev = container_of(kobj, struct device, kobj);
864 struct nd_region *nd_region = to_nd_region(dev);
865
866 if (n < nd_region->ndr_mappings)
867 return a->mode;
868 return 0;
869}
870
871static struct attribute *mapping_attributes[] = {
872 &dev_attr_mapping0.attr,
873 &dev_attr_mapping1.attr,
874 &dev_attr_mapping2.attr,
875 &dev_attr_mapping3.attr,
876 &dev_attr_mapping4.attr,
877 &dev_attr_mapping5.attr,
878 &dev_attr_mapping6.attr,
879 &dev_attr_mapping7.attr,
880 &dev_attr_mapping8.attr,
881 &dev_attr_mapping9.attr,
882 &dev_attr_mapping10.attr,
883 &dev_attr_mapping11.attr,
884 &dev_attr_mapping12.attr,
885 &dev_attr_mapping13.attr,
886 &dev_attr_mapping14.attr,
887 &dev_attr_mapping15.attr,
888 &dev_attr_mapping16.attr,
889 &dev_attr_mapping17.attr,
890 &dev_attr_mapping18.attr,
891 &dev_attr_mapping19.attr,
892 &dev_attr_mapping20.attr,
893 &dev_attr_mapping21.attr,
894 &dev_attr_mapping22.attr,
895 &dev_attr_mapping23.attr,
896 &dev_attr_mapping24.attr,
897 &dev_attr_mapping25.attr,
898 &dev_attr_mapping26.attr,
899 &dev_attr_mapping27.attr,
900 &dev_attr_mapping28.attr,
901 &dev_attr_mapping29.attr,
902 &dev_attr_mapping30.attr,
903 &dev_attr_mapping31.attr,
904 NULL,
905};
906
907struct attribute_group nd_mapping_attribute_group = {
908 .is_visible = mapping_visible,
909 .attrs = mapping_attributes,
910};
911EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
912
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400913int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400914{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400915 struct device *dev = &nd_region->dev;
916 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
917
918 if (!is_nd_blk(dev))
919 return 0;
920
921 if (nd_region->ndr_mappings < 1) {
Dan Williamsd5d51fe2017-06-29 09:02:10 -0700922 dev_dbg(dev, "invalid BLK region\n");
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400923 return -ENXIO;
924 }
925
926 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400927}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400928
Vishal Verma5212e112015-06-25 04:20:32 -0400929/**
930 * nd_region_acquire_lane - allocate and lock a lane
931 * @nd_region: region id and number of lanes possible
932 *
933 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
934 * We optimize for the common case where there are 256 lanes, one
935 * per-cpu. For larger systems we need to lock to share lanes. For now
936 * this implementation assumes the cost of maintaining an allocator for
937 * free lanes is on the order of the lock hold time, so it implements a
938 * static lane = cpu % num_lanes mapping.
939 *
940 * In the case of a BTT instance on top of a BLK namespace a lane may be
941 * acquired recursively. We lock on the first instance.
942 *
943 * In the case of a BTT instance on top of PMEM, we only acquire a lane
944 * for the BTT metadata updates.
945 */
946unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
947{
948 unsigned int cpu, lane;
949
950 cpu = get_cpu();
951 if (nd_region->num_lanes < nr_cpu_ids) {
952 struct nd_percpu_lane *ndl_lock, *ndl_count;
953
954 lane = cpu % nd_region->num_lanes;
955 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
956 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
957 if (ndl_count->count++ == 0)
958 spin_lock(&ndl_lock->lock);
959 } else
960 lane = cpu;
961
962 return lane;
963}
964EXPORT_SYMBOL(nd_region_acquire_lane);
965
966void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
967{
968 if (nd_region->num_lanes < nr_cpu_ids) {
969 unsigned int cpu = get_cpu();
970 struct nd_percpu_lane *ndl_lock, *ndl_count;
971
972 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
973 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
974 if (--ndl_count->count == 0)
975 spin_unlock(&ndl_lock->lock);
976 put_cpu();
977 }
978 put_cpu();
979}
980EXPORT_SYMBOL(nd_region_release_lane);
981
Dan Williams1f7df6f2015-06-09 20:13:14 -0400982static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
983 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
984 const char *caller)
985{
986 struct nd_region *nd_region;
987 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400988 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400989 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400990 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400991
992 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700993 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
994 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400995
Dan Williams44c462e2016-09-19 16:38:50 -0700996 if ((mapping->start | mapping->size) % SZ_4K) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400997 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
998 caller, dev_name(&nvdimm->dev), i);
999
1000 return NULL;
1001 }
Dan Williams58138822015-06-23 20:08:34 -04001002
Dan Williams8f078b32017-05-04 14:01:24 -07001003 if (test_bit(NDD_UNARMED, &nvdimm->flags))
Dan Williams58138822015-06-23 20:08:34 -04001004 ro = 1;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001005 }
1006
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001007 if (dev_type == &nd_blk_device_type) {
1008 struct nd_blk_region_desc *ndbr_desc;
1009 struct nd_blk_region *ndbr;
1010
1011 ndbr_desc = to_blk_region_desc(ndr_desc);
1012 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1013 * ndr_desc->num_mappings,
1014 GFP_KERNEL);
1015 if (ndbr) {
1016 nd_region = &ndbr->nd_region;
1017 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001018 ndbr->do_io = ndbr_desc->do_io;
1019 }
1020 region_buf = ndbr;
1021 } else {
1022 nd_region = kzalloc(sizeof(struct nd_region)
1023 + sizeof(struct nd_mapping)
1024 * ndr_desc->num_mappings,
1025 GFP_KERNEL);
1026 region_buf = nd_region;
1027 }
1028
1029 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001030 return NULL;
1031 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -04001032 if (nd_region->id < 0)
1033 goto err_id;
1034
1035 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1036 if (!nd_region->lane)
1037 goto err_percpu;
1038
1039 for (i = 0; i < nr_cpu_ids; i++) {
1040 struct nd_percpu_lane *ndl;
1041
1042 ndl = per_cpu_ptr(nd_region->lane, i);
1043 spin_lock_init(&ndl->lock);
1044 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001045 }
1046
Dan Williams1f7df6f2015-06-09 20:13:14 -04001047 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -07001048 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1049 struct nvdimm *nvdimm = mapping->nvdimm;
1050
1051 nd_region->mapping[i].nvdimm = nvdimm;
1052 nd_region->mapping[i].start = mapping->start;
1053 nd_region->mapping[i].size = mapping->size;
Dan Williams401c0a12017-08-04 17:20:16 -07001054 nd_region->mapping[i].position = mapping->position;
Dan Williamsae8219f2016-09-19 16:04:21 -07001055 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1056 mutex_init(&nd_region->mapping[i].lock);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001057
1058 get_device(&nvdimm->dev);
1059 }
1060 nd_region->ndr_mappings = ndr_desc->num_mappings;
1061 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -04001062 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -04001063 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -04001064 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -04001065 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001066 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams1b40e092015-05-01 13:34:01 -04001067 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -04001068 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -04001069 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -08001070 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001071 dev = &nd_region->dev;
1072 dev_set_name(dev, "region%d", nd_region->id);
1073 dev->parent = &nvdimm_bus->dev;
1074 dev->type = dev_type;
1075 dev->groups = ndr_desc->attr_groups;
Oliver O'Halloran1ff19f42018-04-06 15:21:13 +10001076 dev->of_node = ndr_desc->of_node;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001077 nd_region->ndr_size = resource_size(ndr_desc->res);
1078 nd_region->ndr_start = ndr_desc->res->start;
1079 nd_device_register(dev);
1080
1081 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -04001082
1083 err_percpu:
1084 ida_simple_remove(&region_ida, nd_region->id);
1085 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001086 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -04001087 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001088}
1089
1090struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1091 struct nd_region_desc *ndr_desc)
1092{
Vishal Verma5212e112015-06-25 04:20:32 -04001093 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001094 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1095 __func__);
1096}
1097EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1098
1099struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1100 struct nd_region_desc *ndr_desc)
1101{
1102 if (ndr_desc->num_mappings > 1)
1103 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001104 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001105 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1106 __func__);
1107}
1108EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1109
1110struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1111 struct nd_region_desc *ndr_desc)
1112{
Vishal Verma5212e112015-06-25 04:20:32 -04001113 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001114 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1115 __func__);
1116}
1117EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -07001118
Dan Williamsf284a4f2016-07-07 19:44:50 -07001119/**
1120 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1121 * @nd_region: blk or interleaved pmem region
1122 */
1123void nvdimm_flush(struct nd_region *nd_region)
1124{
1125 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
Dan Williams0c27af62016-05-27 09:23:01 -07001126 int i, idx;
1127
1128 /*
1129 * Try to encourage some diversity in flush hint addresses
1130 * across cpus assuming a limited number of flush hints.
1131 */
1132 idx = this_cpu_read(flush_idx);
1133 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001134
1135 /*
1136 * The first wmb() is needed to 'sfence' all previous writes
1137 * such that they are architecturally visible for the platform
1138 * buffer flush. Note that we've already arranged for pmem
Dan Williams0aed55a2017-05-29 12:22:50 -07001139 * writes to avoid the cache via memcpy_flushcache(). The final
1140 * wmb() ensures ordering for the NVDIMM flush write.
Dan Williamsf284a4f2016-07-07 19:44:50 -07001141 */
1142 wmb();
1143 for (i = 0; i < nd_region->ndr_mappings; i++)
Dan Williams595c7302016-09-23 17:53:52 -07001144 if (ndrd_get_flush_wpq(ndrd, i, 0))
1145 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001146 wmb();
1147}
1148EXPORT_SYMBOL_GPL(nvdimm_flush);
1149
1150/**
1151 * nvdimm_has_flush - determine write flushing requirements
1152 * @nd_region: blk or interleaved pmem region
1153 *
1154 * Returns 1 if writes require flushing
1155 * Returns 0 if writes do not require flushing
1156 * Returns -ENXIO if flushing capability can not be determined
1157 */
1158int nvdimm_has_flush(struct nd_region *nd_region)
1159{
Dan Williamsf284a4f2016-07-07 19:44:50 -07001160 int i;
1161
Dan Williamsc00b3962017-05-29 23:11:57 -07001162 /* no nvdimm or pmem api == flushing capability unknown */
1163 if (nd_region->ndr_mappings == 0
1164 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
Dan Williamsf284a4f2016-07-07 19:44:50 -07001165 return -ENXIO;
1166
Dan Williamsbc042fd2017-04-24 15:43:05 -07001167 for (i = 0; i < nd_region->ndr_mappings; i++) {
1168 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1169 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1170
1171 /* flush hints present / available */
1172 if (nvdimm->num_flush)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001173 return 1;
Dan Williamsbc042fd2017-04-24 15:43:05 -07001174 }
Dan Williamsf284a4f2016-07-07 19:44:50 -07001175
1176 /*
1177 * The platform defines dimm devices without hints, assume
1178 * platform persistence mechanism like ADR
1179 */
1180 return 0;
1181}
1182EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1183
Dan Williams0b277962017-06-09 09:46:50 -07001184int nvdimm_has_cache(struct nd_region *nd_region)
1185{
Ross Zwisler546eb032018-06-06 10:45:15 -06001186 return is_nd_pmem(&nd_region->dev) &&
1187 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
Dan Williams0b277962017-06-09 09:46:50 -07001188}
1189EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1190
Dan Williams98206f32018-11-24 10:47:04 -08001191struct conflict_context {
1192 struct nd_region *nd_region;
1193 resource_size_t start, size;
1194};
1195
1196static int region_conflict(struct device *dev, void *data)
1197{
1198 struct nd_region *nd_region;
1199 struct conflict_context *ctx = data;
1200 resource_size_t res_end, region_end, region_start;
1201
1202 if (!is_memory(dev))
1203 return 0;
1204
1205 nd_region = to_nd_region(dev);
1206 if (nd_region == ctx->nd_region)
1207 return 0;
1208
1209 res_end = ctx->start + ctx->size;
1210 region_start = nd_region->ndr_start;
1211 region_end = region_start + nd_region->ndr_size;
1212 if (ctx->start >= region_start && ctx->start < region_end)
1213 return -EBUSY;
1214 if (res_end > region_start && res_end <= region_end)
1215 return -EBUSY;
1216 return 0;
1217}
1218
1219int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1220 resource_size_t size)
1221{
1222 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1223 struct conflict_context ctx = {
1224 .nd_region = nd_region,
1225 .start = start,
1226 .size = size,
1227 };
1228
1229 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1230}
1231
Dan Williamsb354aba2016-05-17 20:24:16 -07001232void __exit nd_region_devs_exit(void)
1233{
1234 ida_destroy(&region_ida);
1235}