blob: b550edf2571f448df70e973ba9d6265ab894299c [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williamseaf96152015-05-01 13:11:27 -040013#include <linux/scatterlist.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040014#include <linux/highmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040015#include <linux/sched.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040016#include <linux/slab.h>
Dan Williams0c27af62016-05-27 09:23:01 -070017#include <linux/hash.h>
Dan Williamsf284a4f2016-07-07 19:44:50 -070018#include <linux/pmem.h>
Dan Williamseaf96152015-05-01 13:11:27 -040019#include <linux/sort.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040020#include <linux/io.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040021#include <linux/nd.h>
Dan Williams1f7df6f2015-06-09 20:13:14 -040022#include "nd-core.h"
23#include "nd.h"
24
Dan Williamsf284a4f2016-07-07 19:44:50 -070025/*
26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
27 * irrelevant.
28 */
29#include <linux/io-64-nonatomic-hi-lo.h>
30
Dan Williams1f7df6f2015-06-09 20:13:14 -040031static DEFINE_IDA(region_ida);
Dan Williams0c27af62016-05-27 09:23:01 -070032static DEFINE_PER_CPU(int, flush_idx);
Dan Williams1f7df6f2015-06-09 20:13:14 -040033
Dan Williamse5ae3b22016-06-07 17:00:04 -070034static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
35 struct nd_region_data *ndrd)
36{
37 int i, j;
38
39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
Dan Williams595c7302016-09-23 17:53:52 -070041 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
Dan Williamse5ae3b22016-06-07 17:00:04 -070042 struct resource *res = &nvdimm->flush_wpq[i];
43 unsigned long pfn = PHYS_PFN(res->start);
44 void __iomem *flush_page;
45
46 /* check if flush hints share a page */
47 for (j = 0; j < i; j++) {
48 struct resource *res_j = &nvdimm->flush_wpq[j];
49 unsigned long pfn_j = PHYS_PFN(res_j->start);
50
51 if (pfn == pfn_j)
52 break;
53 }
54
55 if (j < i)
56 flush_page = (void __iomem *) ((unsigned long)
Dan Williams595c7302016-09-23 17:53:52 -070057 ndrd_get_flush_wpq(ndrd, dimm, j)
58 & PAGE_MASK);
Dan Williamse5ae3b22016-06-07 17:00:04 -070059 else
60 flush_page = devm_nvdimm_ioremap(dev,
Oliver O'Halloran480b6832016-09-19 20:19:00 +100061 PFN_PHYS(pfn), PAGE_SIZE);
Dan Williamse5ae3b22016-06-07 17:00:04 -070062 if (!flush_page)
63 return -ENXIO;
Dan Williams595c7302016-09-23 17:53:52 -070064 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
65 + (res->start & ~PAGE_MASK));
Dan Williamse5ae3b22016-06-07 17:00:04 -070066 }
67
68 return 0;
69}
70
71int nd_region_activate(struct nd_region *nd_region)
72{
Dave Jiangdb580282016-09-26 11:06:50 -070073 int i, j, num_flush = 0;
Dan Williamse5ae3b22016-06-07 17:00:04 -070074 struct nd_region_data *ndrd;
75 struct device *dev = &nd_region->dev;
76 size_t flush_data_size = sizeof(void *);
77
78 nvdimm_bus_lock(&nd_region->dev);
79 for (i = 0; i < nd_region->ndr_mappings; i++) {
80 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
81 struct nvdimm *nvdimm = nd_mapping->nvdimm;
82
83 /* at least one null hint slot per-dimm for the "no-hint" case */
84 flush_data_size += sizeof(void *);
Dan Williams0c27af62016-05-27 09:23:01 -070085 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -070086 if (!nvdimm->num_flush)
87 continue;
88 flush_data_size += nvdimm->num_flush * sizeof(void *);
89 }
90 nvdimm_bus_unlock(&nd_region->dev);
91
92 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
93 if (!ndrd)
94 return -ENOMEM;
95 dev_set_drvdata(dev, ndrd);
96
Dan Williams595c7302016-09-23 17:53:52 -070097 if (!num_flush)
98 return 0;
99
100 ndrd->hints_shift = ilog2(num_flush);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700101 for (i = 0; i < nd_region->ndr_mappings; i++) {
102 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
103 struct nvdimm *nvdimm = nd_mapping->nvdimm;
104 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
105
106 if (rc)
107 return rc;
108 }
109
Dave Jiangdb580282016-09-26 11:06:50 -0700110 /*
111 * Clear out entries that are duplicates. This should prevent the
112 * extra flushings.
113 */
114 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
115 /* ignore if NULL already */
116 if (!ndrd_get_flush_wpq(ndrd, i, 0))
117 continue;
118
119 for (j = i + 1; j < nd_region->ndr_mappings; j++)
120 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
121 ndrd_get_flush_wpq(ndrd, j, 0))
122 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
123 }
124
Dan Williamse5ae3b22016-06-07 17:00:04 -0700125 return 0;
126}
127
Dan Williams1f7df6f2015-06-09 20:13:14 -0400128static void nd_region_release(struct device *dev)
129{
130 struct nd_region *nd_region = to_nd_region(dev);
131 u16 i;
132
133 for (i = 0; i < nd_region->ndr_mappings; i++) {
134 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
135 struct nvdimm *nvdimm = nd_mapping->nvdimm;
136
137 put_device(&nvdimm->dev);
138 }
Vishal Verma5212e112015-06-25 04:20:32 -0400139 free_percpu(nd_region->lane);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400140 ida_simple_remove(&region_ida, nd_region->id);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400141 if (is_nd_blk(dev))
142 kfree(to_nd_blk_region(dev));
143 else
144 kfree(nd_region);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400145}
146
147static struct device_type nd_blk_device_type = {
148 .name = "nd_blk",
149 .release = nd_region_release,
150};
151
152static struct device_type nd_pmem_device_type = {
153 .name = "nd_pmem",
154 .release = nd_region_release,
155};
156
157static struct device_type nd_volatile_device_type = {
158 .name = "nd_volatile",
159 .release = nd_region_release,
160};
161
Dan Williams3d880022015-05-31 15:02:11 -0400162bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400163{
164 return dev ? dev->type == &nd_pmem_device_type : false;
165}
166
Dan Williams3d880022015-05-31 15:02:11 -0400167bool is_nd_blk(struct device *dev)
168{
169 return dev ? dev->type == &nd_blk_device_type : false;
170}
171
Dan Williams1f7df6f2015-06-09 20:13:14 -0400172struct nd_region *to_nd_region(struct device *dev)
173{
174 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
175
176 WARN_ON(dev->type->release != nd_region_release);
177 return nd_region;
178}
179EXPORT_SYMBOL_GPL(to_nd_region);
180
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400181struct nd_blk_region *to_nd_blk_region(struct device *dev)
182{
183 struct nd_region *nd_region = to_nd_region(dev);
184
185 WARN_ON(!is_nd_blk(dev));
186 return container_of(nd_region, struct nd_blk_region, nd_region);
187}
188EXPORT_SYMBOL_GPL(to_nd_blk_region);
189
190void *nd_region_provider_data(struct nd_region *nd_region)
191{
192 return nd_region->provider_data;
193}
194EXPORT_SYMBOL_GPL(nd_region_provider_data);
195
196void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
197{
198 return ndbr->blk_provider_data;
199}
200EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
201
202void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
203{
204 ndbr->blk_provider_data = data;
205}
206EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
207
Dan Williams3d880022015-05-31 15:02:11 -0400208/**
209 * nd_region_to_nstype() - region to an integer namespace type
210 * @nd_region: region-device to interrogate
211 *
212 * This is the 'nstype' attribute of a region as well, an input to the
213 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
214 * namespace devices with namespace drivers.
215 */
216int nd_region_to_nstype(struct nd_region *nd_region)
217{
218 if (is_nd_pmem(&nd_region->dev)) {
219 u16 i, alias;
220
221 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
222 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
223 struct nvdimm *nvdimm = nd_mapping->nvdimm;
224
Dan Williams8f078b32017-05-04 14:01:24 -0700225 if (test_bit(NDD_ALIASING, &nvdimm->flags))
Dan Williams3d880022015-05-31 15:02:11 -0400226 alias++;
227 }
228 if (alias)
229 return ND_DEVICE_NAMESPACE_PMEM;
230 else
231 return ND_DEVICE_NAMESPACE_IO;
232 } else if (is_nd_blk(&nd_region->dev)) {
233 return ND_DEVICE_NAMESPACE_BLK;
234 }
235
236 return 0;
237}
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400238EXPORT_SYMBOL(nd_region_to_nstype);
239
Dan Williams1f7df6f2015-06-09 20:13:14 -0400240static ssize_t size_show(struct device *dev,
241 struct device_attribute *attr, char *buf)
242{
243 struct nd_region *nd_region = to_nd_region(dev);
244 unsigned long long size = 0;
245
246 if (is_nd_pmem(dev)) {
247 size = nd_region->ndr_size;
248 } else if (nd_region->ndr_mappings == 1) {
249 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
250
251 size = nd_mapping->size;
252 }
253
254 return sprintf(buf, "%llu\n", size);
255}
256static DEVICE_ATTR_RO(size);
257
Dan Williamsab630892017-04-21 13:28:12 -0700258static ssize_t deep_flush_show(struct device *dev,
259 struct device_attribute *attr, char *buf)
260{
261 struct nd_region *nd_region = to_nd_region(dev);
262
263 /*
264 * NOTE: in the nvdimm_has_flush() error case this attribute is
265 * not visible.
266 */
267 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
268}
269
270static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
271 const char *buf, size_t len)
272{
273 bool flush;
274 int rc = strtobool(buf, &flush);
275 struct nd_region *nd_region = to_nd_region(dev);
276
277 if (rc)
278 return rc;
279 if (!flush)
280 return -EINVAL;
281 nvdimm_flush(nd_region);
282
283 return len;
284}
285static DEVICE_ATTR_RW(deep_flush);
286
Dan Williams1f7df6f2015-06-09 20:13:14 -0400287static ssize_t mappings_show(struct device *dev,
288 struct device_attribute *attr, char *buf)
289{
290 struct nd_region *nd_region = to_nd_region(dev);
291
292 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
293}
294static DEVICE_ATTR_RO(mappings);
295
Dan Williams3d880022015-05-31 15:02:11 -0400296static ssize_t nstype_show(struct device *dev,
297 struct device_attribute *attr, char *buf)
298{
299 struct nd_region *nd_region = to_nd_region(dev);
300
301 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
302}
303static DEVICE_ATTR_RO(nstype);
304
Dan Williamseaf96152015-05-01 13:11:27 -0400305static ssize_t set_cookie_show(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct nd_region *nd_region = to_nd_region(dev);
309 struct nd_interleave_set *nd_set = nd_region->nd_set;
310
311 if (is_nd_pmem(dev) && nd_set)
312 /* pass, should be precluded by region_visible */;
313 else
314 return -ENXIO;
315
316 return sprintf(buf, "%#llx\n", nd_set->cookie);
317}
318static DEVICE_ATTR_RO(set_cookie);
319
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400320resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
321{
322 resource_size_t blk_max_overlap = 0, available, overlap;
323 int i;
324
325 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
326
327 retry:
328 available = 0;
329 overlap = blk_max_overlap;
330 for (i = 0; i < nd_region->ndr_mappings; i++) {
331 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
332 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
333
334 /* if a dimm is disabled the available capacity is zero */
335 if (!ndd)
336 return 0;
337
338 if (is_nd_pmem(&nd_region->dev)) {
339 available += nd_pmem_available_dpa(nd_region,
340 nd_mapping, &overlap);
341 if (overlap > blk_max_overlap) {
342 blk_max_overlap = overlap;
343 goto retry;
344 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700345 } else if (is_nd_blk(&nd_region->dev))
346 available += nd_blk_available_dpa(nd_region);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400347 }
348
349 return available;
350}
351
352static ssize_t available_size_show(struct device *dev,
353 struct device_attribute *attr, char *buf)
354{
355 struct nd_region *nd_region = to_nd_region(dev);
356 unsigned long long available = 0;
357
358 /*
359 * Flush in-flight updates and grab a snapshot of the available
360 * size. Of course, this value is potentially invalidated the
361 * memory nvdimm_bus_lock() is dropped, but that's userspace's
362 * problem to not race itself.
363 */
364 nvdimm_bus_lock(dev);
365 wait_nvdimm_bus_probe_idle(dev);
366 available = nd_region_available_dpa(nd_region);
367 nvdimm_bus_unlock(dev);
368
369 return sprintf(buf, "%llu\n", available);
370}
371static DEVICE_ATTR_RO(available_size);
372
Dan Williams3d880022015-05-31 15:02:11 -0400373static ssize_t init_namespaces_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
375{
Dan Williamse5ae3b22016-06-07 17:00:04 -0700376 struct nd_region_data *ndrd = dev_get_drvdata(dev);
Dan Williams3d880022015-05-31 15:02:11 -0400377 ssize_t rc;
378
379 nvdimm_bus_lock(dev);
Dan Williamse5ae3b22016-06-07 17:00:04 -0700380 if (ndrd)
381 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
Dan Williams3d880022015-05-31 15:02:11 -0400382 else
383 rc = -ENXIO;
384 nvdimm_bus_unlock(dev);
385
386 return rc;
387}
388static DEVICE_ATTR_RO(init_namespaces);
389
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400390static ssize_t namespace_seed_show(struct device *dev,
391 struct device_attribute *attr, char *buf)
392{
393 struct nd_region *nd_region = to_nd_region(dev);
394 ssize_t rc;
395
396 nvdimm_bus_lock(dev);
397 if (nd_region->ns_seed)
398 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
399 else
400 rc = sprintf(buf, "\n");
401 nvdimm_bus_unlock(dev);
402 return rc;
403}
404static DEVICE_ATTR_RO(namespace_seed);
405
Dan Williams8c2f7e82015-06-25 04:20:04 -0400406static ssize_t btt_seed_show(struct device *dev,
407 struct device_attribute *attr, char *buf)
408{
409 struct nd_region *nd_region = to_nd_region(dev);
410 ssize_t rc;
411
412 nvdimm_bus_lock(dev);
413 if (nd_region->btt_seed)
414 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
415 else
416 rc = sprintf(buf, "\n");
417 nvdimm_bus_unlock(dev);
418
419 return rc;
420}
421static DEVICE_ATTR_RO(btt_seed);
422
Dan Williamse1455742015-07-30 17:57:47 -0400423static ssize_t pfn_seed_show(struct device *dev,
424 struct device_attribute *attr, char *buf)
425{
426 struct nd_region *nd_region = to_nd_region(dev);
427 ssize_t rc;
428
429 nvdimm_bus_lock(dev);
430 if (nd_region->pfn_seed)
431 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
432 else
433 rc = sprintf(buf, "\n");
434 nvdimm_bus_unlock(dev);
435
436 return rc;
437}
438static DEVICE_ATTR_RO(pfn_seed);
439
Dan Williamscd034122016-03-11 10:15:36 -0800440static ssize_t dax_seed_show(struct device *dev,
441 struct device_attribute *attr, char *buf)
442{
443 struct nd_region *nd_region = to_nd_region(dev);
444 ssize_t rc;
445
446 nvdimm_bus_lock(dev);
447 if (nd_region->dax_seed)
448 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
449 else
450 rc = sprintf(buf, "\n");
451 nvdimm_bus_unlock(dev);
452
453 return rc;
454}
455static DEVICE_ATTR_RO(dax_seed);
456
Dan Williams58138822015-06-23 20:08:34 -0400457static ssize_t read_only_show(struct device *dev,
458 struct device_attribute *attr, char *buf)
459{
460 struct nd_region *nd_region = to_nd_region(dev);
461
462 return sprintf(buf, "%d\n", nd_region->ro);
463}
464
465static ssize_t read_only_store(struct device *dev,
466 struct device_attribute *attr, const char *buf, size_t len)
467{
468 bool ro;
469 int rc = strtobool(buf, &ro);
470 struct nd_region *nd_region = to_nd_region(dev);
471
472 if (rc)
473 return rc;
474
475 nd_region->ro = ro;
476 return len;
477}
478static DEVICE_ATTR_RW(read_only);
479
Dan Williams23f49842017-04-29 15:24:03 -0700480static ssize_t region_badblocks_show(struct device *dev,
Dave Jiang6a6bef92017-04-07 15:33:20 -0700481 struct device_attribute *attr, char *buf)
482{
483 struct nd_region *nd_region = to_nd_region(dev);
484
485 return badblocks_show(&nd_region->bb, buf, 0);
486}
Dan Williams23f49842017-04-29 15:24:03 -0700487
488static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
Dave Jiang6a6bef92017-04-07 15:33:20 -0700489
Dave Jiang802f4be2017-04-07 15:33:25 -0700490static ssize_t resource_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct nd_region *nd_region = to_nd_region(dev);
494
495 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
496}
497static DEVICE_ATTR_RO(resource);
498
Dan Williams1f7df6f2015-06-09 20:13:14 -0400499static struct attribute *nd_region_attributes[] = {
500 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400501 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400502 &dev_attr_mappings.attr,
Dan Williams8c2f7e82015-06-25 04:20:04 -0400503 &dev_attr_btt_seed.attr,
Dan Williamse1455742015-07-30 17:57:47 -0400504 &dev_attr_pfn_seed.attr,
Dan Williamscd034122016-03-11 10:15:36 -0800505 &dev_attr_dax_seed.attr,
Dan Williamsab630892017-04-21 13:28:12 -0700506 &dev_attr_deep_flush.attr,
Dan Williams58138822015-06-23 20:08:34 -0400507 &dev_attr_read_only.attr,
Dan Williamseaf96152015-05-01 13:11:27 -0400508 &dev_attr_set_cookie.attr,
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400509 &dev_attr_available_size.attr,
510 &dev_attr_namespace_seed.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400511 &dev_attr_init_namespaces.attr,
Dan Williams23f49842017-04-29 15:24:03 -0700512 &dev_attr_badblocks.attr,
Dave Jiang802f4be2017-04-07 15:33:25 -0700513 &dev_attr_resource.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400514 NULL,
515};
516
Dan Williamseaf96152015-05-01 13:11:27 -0400517static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
518{
519 struct device *dev = container_of(kobj, typeof(*dev), kobj);
520 struct nd_region *nd_region = to_nd_region(dev);
521 struct nd_interleave_set *nd_set = nd_region->nd_set;
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400522 int type = nd_region_to_nstype(nd_region);
Dan Williamseaf96152015-05-01 13:11:27 -0400523
Dmitry Krivenok6bb691a2015-12-02 09:39:29 +0300524 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
525 return 0;
526
Dan Williamscd034122016-03-11 10:15:36 -0800527 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
528 return 0;
529
Dan Williams23f49842017-04-29 15:24:03 -0700530 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
Dave Jiang6a6bef92017-04-07 15:33:20 -0700531 return 0;
532
Dave Jiang802f4be2017-04-07 15:33:25 -0700533 if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
534 return 0;
535
Dan Williamsab630892017-04-21 13:28:12 -0700536 if (a == &dev_attr_deep_flush.attr) {
537 int has_flush = nvdimm_has_flush(nd_region);
538
539 if (has_flush == 1)
540 return a->mode;
541 else if (has_flush == 0)
542 return 0444;
543 else
544 return 0;
545 }
546
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400547 if (a != &dev_attr_set_cookie.attr
548 && a != &dev_attr_available_size.attr)
Dan Williamseaf96152015-05-01 13:11:27 -0400549 return a->mode;
550
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400551 if ((type == ND_DEVICE_NAMESPACE_PMEM
552 || type == ND_DEVICE_NAMESPACE_BLK)
553 && a == &dev_attr_available_size.attr)
554 return a->mode;
555 else if (is_nd_pmem(dev) && nd_set)
556 return a->mode;
Dan Williamseaf96152015-05-01 13:11:27 -0400557
558 return 0;
559}
560
Dan Williams1f7df6f2015-06-09 20:13:14 -0400561struct attribute_group nd_region_attribute_group = {
562 .attrs = nd_region_attributes,
Dan Williamseaf96152015-05-01 13:11:27 -0400563 .is_visible = region_visible,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400564};
565EXPORT_SYMBOL_GPL(nd_region_attribute_group);
566
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400567u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
568{
569 struct nd_interleave_set *nd_set = nd_region->nd_set;
570
571 if (nd_set)
572 return nd_set->cookie;
573 return 0;
574}
575
Dan Williams86ef58a2017-02-28 18:32:48 -0800576u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
577{
578 struct nd_interleave_set *nd_set = nd_region->nd_set;
579
580 if (nd_set)
581 return nd_set->altcookie;
582 return 0;
583}
584
Dan Williamsae8219f2016-09-19 16:04:21 -0700585void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
586{
587 struct nd_label_ent *label_ent, *e;
588
Dan Williams9cf8bd52016-12-15 20:04:31 -0800589 lockdep_assert_held(&nd_mapping->lock);
Dan Williamsae8219f2016-09-19 16:04:21 -0700590 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
591 list_del(&label_ent->list);
592 kfree(label_ent);
593 }
594}
595
Dan Williamseaf96152015-05-01 13:11:27 -0400596/*
597 * Upon successful probe/remove, take/release a reference on the
Dan Williams8c2f7e82015-06-25 04:20:04 -0400598 * associated interleave set (if present), and plant new btt + namespace
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400599 * seeds. Also, on the removal of a BLK region, notify the provider to
600 * disable the region.
Dan Williamseaf96152015-05-01 13:11:27 -0400601 */
602static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
603 struct device *dev, bool probe)
604{
Dan Williams8c2f7e82015-06-25 04:20:04 -0400605 struct nd_region *nd_region;
606
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400607 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
Dan Williamseaf96152015-05-01 13:11:27 -0400608 int i;
609
Dan Williams8c2f7e82015-06-25 04:20:04 -0400610 nd_region = to_nd_region(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400611 for (i = 0; i < nd_region->ndr_mappings; i++) {
612 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400613 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
Dan Williamseaf96152015-05-01 13:11:27 -0400614 struct nvdimm *nvdimm = nd_mapping->nvdimm;
615
Dan Williamsae8219f2016-09-19 16:04:21 -0700616 mutex_lock(&nd_mapping->lock);
617 nd_mapping_free_labels(nd_mapping);
618 mutex_unlock(&nd_mapping->lock);
619
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400620 put_ndd(ndd);
621 nd_mapping->ndd = NULL;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400622 if (ndd)
623 atomic_dec(&nvdimm->busy);
Dan Williamseaf96152015-05-01 13:11:27 -0400624 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400625
626 if (is_nd_pmem(dev))
627 return;
Dan Williams8c2f7e82015-06-25 04:20:04 -0400628 }
Dan Williams98a29c32016-09-30 15:28:27 -0700629 if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
630 && probe) {
Dan Williams8c2f7e82015-06-25 04:20:04 -0400631 nd_region = to_nd_region(dev->parent);
Dan Williams1b40e092015-05-01 13:34:01 -0400632 nvdimm_bus_lock(dev);
633 if (nd_region->ns_seed == dev)
Dan Williams98a29c32016-09-30 15:28:27 -0700634 nd_region_create_ns_seed(nd_region);
Dan Williams1b40e092015-05-01 13:34:01 -0400635 nvdimm_bus_unlock(dev);
Dan Williamseaf96152015-05-01 13:11:27 -0400636 }
Dan Williams8c2f7e82015-06-25 04:20:04 -0400637 if (is_nd_btt(dev) && probe) {
Dan Williams8ca24352015-07-24 23:42:34 -0400638 struct nd_btt *nd_btt = to_nd_btt(dev);
639
Dan Williams8c2f7e82015-06-25 04:20:04 -0400640 nd_region = to_nd_region(dev->parent);
641 nvdimm_bus_lock(dev);
642 if (nd_region->btt_seed == dev)
643 nd_region_create_btt_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700644 if (nd_region->ns_seed == &nd_btt->ndns->dev)
645 nd_region_create_ns_seed(nd_region);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400646 nvdimm_bus_unlock(dev);
647 }
Dan Williams2dc43332015-12-13 11:41:36 -0800648 if (is_nd_pfn(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700649 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
650
Dan Williams2dc43332015-12-13 11:41:36 -0800651 nd_region = to_nd_region(dev->parent);
652 nvdimm_bus_lock(dev);
653 if (nd_region->pfn_seed == dev)
654 nd_region_create_pfn_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700655 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
656 nd_region_create_ns_seed(nd_region);
Dan Williams2dc43332015-12-13 11:41:36 -0800657 nvdimm_bus_unlock(dev);
658 }
Dan Williamscd034122016-03-11 10:15:36 -0800659 if (is_nd_dax(dev) && probe) {
Dan Williams98a29c32016-09-30 15:28:27 -0700660 struct nd_dax *nd_dax = to_nd_dax(dev);
661
Dan Williamscd034122016-03-11 10:15:36 -0800662 nd_region = to_nd_region(dev->parent);
663 nvdimm_bus_lock(dev);
664 if (nd_region->dax_seed == dev)
665 nd_region_create_dax_seed(nd_region);
Dan Williams98a29c32016-09-30 15:28:27 -0700666 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
667 nd_region_create_ns_seed(nd_region);
Dan Williamscd034122016-03-11 10:15:36 -0800668 nvdimm_bus_unlock(dev);
669 }
Dan Williamseaf96152015-05-01 13:11:27 -0400670}
671
672void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
673{
674 nd_region_notify_driver_action(nvdimm_bus, dev, true);
675}
676
677void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
678{
679 nd_region_notify_driver_action(nvdimm_bus, dev, false);
680}
681
Dan Williams1f7df6f2015-06-09 20:13:14 -0400682static ssize_t mappingN(struct device *dev, char *buf, int n)
683{
684 struct nd_region *nd_region = to_nd_region(dev);
685 struct nd_mapping *nd_mapping;
686 struct nvdimm *nvdimm;
687
688 if (n >= nd_region->ndr_mappings)
689 return -ENXIO;
690 nd_mapping = &nd_region->mapping[n];
691 nvdimm = nd_mapping->nvdimm;
692
693 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
694 nd_mapping->start, nd_mapping->size);
695}
696
697#define REGION_MAPPING(idx) \
698static ssize_t mapping##idx##_show(struct device *dev, \
699 struct device_attribute *attr, char *buf) \
700{ \
701 return mappingN(dev, buf, idx); \
702} \
703static DEVICE_ATTR_RO(mapping##idx)
704
705/*
706 * 32 should be enough for a while, even in the presence of socket
707 * interleave a 32-way interleave set is a degenerate case.
708 */
709REGION_MAPPING(0);
710REGION_MAPPING(1);
711REGION_MAPPING(2);
712REGION_MAPPING(3);
713REGION_MAPPING(4);
714REGION_MAPPING(5);
715REGION_MAPPING(6);
716REGION_MAPPING(7);
717REGION_MAPPING(8);
718REGION_MAPPING(9);
719REGION_MAPPING(10);
720REGION_MAPPING(11);
721REGION_MAPPING(12);
722REGION_MAPPING(13);
723REGION_MAPPING(14);
724REGION_MAPPING(15);
725REGION_MAPPING(16);
726REGION_MAPPING(17);
727REGION_MAPPING(18);
728REGION_MAPPING(19);
729REGION_MAPPING(20);
730REGION_MAPPING(21);
731REGION_MAPPING(22);
732REGION_MAPPING(23);
733REGION_MAPPING(24);
734REGION_MAPPING(25);
735REGION_MAPPING(26);
736REGION_MAPPING(27);
737REGION_MAPPING(28);
738REGION_MAPPING(29);
739REGION_MAPPING(30);
740REGION_MAPPING(31);
741
742static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
743{
744 struct device *dev = container_of(kobj, struct device, kobj);
745 struct nd_region *nd_region = to_nd_region(dev);
746
747 if (n < nd_region->ndr_mappings)
748 return a->mode;
749 return 0;
750}
751
752static struct attribute *mapping_attributes[] = {
753 &dev_attr_mapping0.attr,
754 &dev_attr_mapping1.attr,
755 &dev_attr_mapping2.attr,
756 &dev_attr_mapping3.attr,
757 &dev_attr_mapping4.attr,
758 &dev_attr_mapping5.attr,
759 &dev_attr_mapping6.attr,
760 &dev_attr_mapping7.attr,
761 &dev_attr_mapping8.attr,
762 &dev_attr_mapping9.attr,
763 &dev_attr_mapping10.attr,
764 &dev_attr_mapping11.attr,
765 &dev_attr_mapping12.attr,
766 &dev_attr_mapping13.attr,
767 &dev_attr_mapping14.attr,
768 &dev_attr_mapping15.attr,
769 &dev_attr_mapping16.attr,
770 &dev_attr_mapping17.attr,
771 &dev_attr_mapping18.attr,
772 &dev_attr_mapping19.attr,
773 &dev_attr_mapping20.attr,
774 &dev_attr_mapping21.attr,
775 &dev_attr_mapping22.attr,
776 &dev_attr_mapping23.attr,
777 &dev_attr_mapping24.attr,
778 &dev_attr_mapping25.attr,
779 &dev_attr_mapping26.attr,
780 &dev_attr_mapping27.attr,
781 &dev_attr_mapping28.attr,
782 &dev_attr_mapping29.attr,
783 &dev_attr_mapping30.attr,
784 &dev_attr_mapping31.attr,
785 NULL,
786};
787
788struct attribute_group nd_mapping_attribute_group = {
789 .is_visible = mapping_visible,
790 .attrs = mapping_attributes,
791};
792EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
793
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400794int nd_blk_region_init(struct nd_region *nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400795{
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400796 struct device *dev = &nd_region->dev;
797 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
798
799 if (!is_nd_blk(dev))
800 return 0;
801
802 if (nd_region->ndr_mappings < 1) {
803 dev_err(dev, "invalid BLK region\n");
804 return -ENXIO;
805 }
806
807 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400808}
Dan Williams1f7df6f2015-06-09 20:13:14 -0400809
Vishal Verma5212e112015-06-25 04:20:32 -0400810/**
811 * nd_region_acquire_lane - allocate and lock a lane
812 * @nd_region: region id and number of lanes possible
813 *
814 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
815 * We optimize for the common case where there are 256 lanes, one
816 * per-cpu. For larger systems we need to lock to share lanes. For now
817 * this implementation assumes the cost of maintaining an allocator for
818 * free lanes is on the order of the lock hold time, so it implements a
819 * static lane = cpu % num_lanes mapping.
820 *
821 * In the case of a BTT instance on top of a BLK namespace a lane may be
822 * acquired recursively. We lock on the first instance.
823 *
824 * In the case of a BTT instance on top of PMEM, we only acquire a lane
825 * for the BTT metadata updates.
826 */
827unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
828{
829 unsigned int cpu, lane;
830
831 cpu = get_cpu();
832 if (nd_region->num_lanes < nr_cpu_ids) {
833 struct nd_percpu_lane *ndl_lock, *ndl_count;
834
835 lane = cpu % nd_region->num_lanes;
836 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
837 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
838 if (ndl_count->count++ == 0)
839 spin_lock(&ndl_lock->lock);
840 } else
841 lane = cpu;
842
843 return lane;
844}
845EXPORT_SYMBOL(nd_region_acquire_lane);
846
847void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
848{
849 if (nd_region->num_lanes < nr_cpu_ids) {
850 unsigned int cpu = get_cpu();
851 struct nd_percpu_lane *ndl_lock, *ndl_count;
852
853 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
854 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
855 if (--ndl_count->count == 0)
856 spin_unlock(&ndl_lock->lock);
857 put_cpu();
858 }
859 put_cpu();
860}
861EXPORT_SYMBOL(nd_region_release_lane);
862
Dan Williams1f7df6f2015-06-09 20:13:14 -0400863static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
864 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
865 const char *caller)
866{
867 struct nd_region *nd_region;
868 struct device *dev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400869 void *region_buf;
Vishal Verma5212e112015-06-25 04:20:32 -0400870 unsigned int i;
Dan Williams58138822015-06-23 20:08:34 -0400871 int ro = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400872
873 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700874 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
875 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400876
Dan Williams44c462e2016-09-19 16:38:50 -0700877 if ((mapping->start | mapping->size) % SZ_4K) {
Dan Williams1f7df6f2015-06-09 20:13:14 -0400878 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
879 caller, dev_name(&nvdimm->dev), i);
880
881 return NULL;
882 }
Dan Williams58138822015-06-23 20:08:34 -0400883
Dan Williams8f078b32017-05-04 14:01:24 -0700884 if (test_bit(NDD_UNARMED, &nvdimm->flags))
Dan Williams58138822015-06-23 20:08:34 -0400885 ro = 1;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400886 }
887
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400888 if (dev_type == &nd_blk_device_type) {
889 struct nd_blk_region_desc *ndbr_desc;
890 struct nd_blk_region *ndbr;
891
892 ndbr_desc = to_blk_region_desc(ndr_desc);
893 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
894 * ndr_desc->num_mappings,
895 GFP_KERNEL);
896 if (ndbr) {
897 nd_region = &ndbr->nd_region;
898 ndbr->enable = ndbr_desc->enable;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400899 ndbr->do_io = ndbr_desc->do_io;
900 }
901 region_buf = ndbr;
902 } else {
903 nd_region = kzalloc(sizeof(struct nd_region)
904 + sizeof(struct nd_mapping)
905 * ndr_desc->num_mappings,
906 GFP_KERNEL);
907 region_buf = nd_region;
908 }
909
910 if (!region_buf)
Dan Williams1f7df6f2015-06-09 20:13:14 -0400911 return NULL;
912 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -0400913 if (nd_region->id < 0)
914 goto err_id;
915
916 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
917 if (!nd_region->lane)
918 goto err_percpu;
919
920 for (i = 0; i < nr_cpu_ids; i++) {
921 struct nd_percpu_lane *ndl;
922
923 ndl = per_cpu_ptr(nd_region->lane, i);
924 spin_lock_init(&ndl->lock);
925 ndl->count = 0;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400926 }
927
Dan Williams1f7df6f2015-06-09 20:13:14 -0400928 for (i = 0; i < ndr_desc->num_mappings; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -0700929 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
930 struct nvdimm *nvdimm = mapping->nvdimm;
931
932 nd_region->mapping[i].nvdimm = nvdimm;
933 nd_region->mapping[i].start = mapping->start;
934 nd_region->mapping[i].size = mapping->size;
Dan Williamsae8219f2016-09-19 16:04:21 -0700935 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
936 mutex_init(&nd_region->mapping[i].lock);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400937
938 get_device(&nvdimm->dev);
939 }
940 nd_region->ndr_mappings = ndr_desc->num_mappings;
941 nd_region->provider_data = ndr_desc->provider_data;
Dan Williamseaf96152015-05-01 13:11:27 -0400942 nd_region->nd_set = ndr_desc->nd_set;
Vishal Verma5212e112015-06-25 04:20:32 -0400943 nd_region->num_lanes = ndr_desc->num_lanes;
Dan Williams004f1af2015-08-24 19:20:23 -0400944 nd_region->flags = ndr_desc->flags;
Dan Williams58138822015-06-23 20:08:34 -0400945 nd_region->ro = ro;
Toshi Kani41d7a6d2015-06-19 12:18:33 -0600946 nd_region->numa_node = ndr_desc->numa_node;
Dan Williams1b40e092015-05-01 13:34:01 -0400947 ida_init(&nd_region->ns_ida);
Dan Williams8c2f7e82015-06-25 04:20:04 -0400948 ida_init(&nd_region->btt_ida);
Dan Williamse1455742015-07-30 17:57:47 -0400949 ida_init(&nd_region->pfn_ida);
Dan Williamscd034122016-03-11 10:15:36 -0800950 ida_init(&nd_region->dax_ida);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400951 dev = &nd_region->dev;
952 dev_set_name(dev, "region%d", nd_region->id);
953 dev->parent = &nvdimm_bus->dev;
954 dev->type = dev_type;
955 dev->groups = ndr_desc->attr_groups;
956 nd_region->ndr_size = resource_size(ndr_desc->res);
957 nd_region->ndr_start = ndr_desc->res->start;
958 nd_device_register(dev);
959
960 return nd_region;
Vishal Verma5212e112015-06-25 04:20:32 -0400961
962 err_percpu:
963 ida_simple_remove(&region_ida, nd_region->id);
964 err_id:
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400965 kfree(region_buf);
Vishal Verma5212e112015-06-25 04:20:32 -0400966 return NULL;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400967}
968
969struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
970 struct nd_region_desc *ndr_desc)
971{
Vishal Verma5212e112015-06-25 04:20:32 -0400972 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400973 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
974 __func__);
975}
976EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
977
978struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
979 struct nd_region_desc *ndr_desc)
980{
981 if (ndr_desc->num_mappings > 1)
982 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -0400983 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
Dan Williams1f7df6f2015-06-09 20:13:14 -0400984 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
985 __func__);
986}
987EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
988
989struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
990 struct nd_region_desc *ndr_desc)
991{
Vishal Verma5212e112015-06-25 04:20:32 -0400992 ndr_desc->num_lanes = ND_MAX_LANES;
Dan Williams1f7df6f2015-06-09 20:13:14 -0400993 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
994 __func__);
995}
996EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
Dan Williamsb354aba2016-05-17 20:24:16 -0700997
Dan Williamsf284a4f2016-07-07 19:44:50 -0700998/**
999 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1000 * @nd_region: blk or interleaved pmem region
1001 */
1002void nvdimm_flush(struct nd_region *nd_region)
1003{
1004 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
Dan Williams0c27af62016-05-27 09:23:01 -07001005 int i, idx;
1006
1007 /*
1008 * Try to encourage some diversity in flush hint addresses
1009 * across cpus assuming a limited number of flush hints.
1010 */
1011 idx = this_cpu_read(flush_idx);
1012 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001013
1014 /*
1015 * The first wmb() is needed to 'sfence' all previous writes
1016 * such that they are architecturally visible for the platform
1017 * buffer flush. Note that we've already arranged for pmem
1018 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1019 * final wmb() ensures ordering for the NVDIMM flush write.
1020 */
1021 wmb();
1022 for (i = 0; i < nd_region->ndr_mappings; i++)
Dan Williams595c7302016-09-23 17:53:52 -07001023 if (ndrd_get_flush_wpq(ndrd, i, 0))
1024 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
Dan Williamsf284a4f2016-07-07 19:44:50 -07001025 wmb();
1026}
1027EXPORT_SYMBOL_GPL(nvdimm_flush);
1028
1029/**
1030 * nvdimm_has_flush - determine write flushing requirements
1031 * @nd_region: blk or interleaved pmem region
1032 *
1033 * Returns 1 if writes require flushing
1034 * Returns 0 if writes do not require flushing
1035 * Returns -ENXIO if flushing capability can not be determined
1036 */
1037int nvdimm_has_flush(struct nd_region *nd_region)
1038{
Dan Williamsf284a4f2016-07-07 19:44:50 -07001039 int i;
1040
1041 /* no nvdimm == flushing capability unknown */
1042 if (nd_region->ndr_mappings == 0)
1043 return -ENXIO;
1044
Dan Williamsbc042fd2017-04-24 15:43:05 -07001045 for (i = 0; i < nd_region->ndr_mappings; i++) {
1046 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1047 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1048
1049 /* flush hints present / available */
1050 if (nvdimm->num_flush)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001051 return 1;
Dan Williamsbc042fd2017-04-24 15:43:05 -07001052 }
Dan Williamsf284a4f2016-07-07 19:44:50 -07001053
1054 /*
1055 * The platform defines dimm devices without hints, assume
1056 * platform persistence mechanism like ADR
1057 */
1058 return 0;
1059}
1060EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1061
Dan Williamsb354aba2016-05-17 20:24:16 -07001062void __exit nd_region_devs_exit(void)
1063{
1064 ida_destroy(&region_ida);
1065}