blob: 0eedc49e0d473ed36b5ef9832760aa8498b9f146 [file] [log] [blame]
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Dan Williams4d88a972015-05-31 14:41:48 -040014#include <linux/vmalloc.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040015#include <linux/device.h>
Dan Williams62232e452015-06-08 14:27:06 -040016#include <linux/ndctl.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040017#include <linux/slab.h>
18#include <linux/io.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include "nd-core.h"
Dan Williams0ba1c632015-05-30 12:35:36 -040022#include "label.h"
Dan Williams4d88a972015-05-31 14:41:48 -040023#include "nd.h"
Dan Williamse6dfb2d2015-04-25 03:56:17 -040024
25static DEFINE_IDA(dimm_ida);
26
Dan Williams4d88a972015-05-31 14:41:48 -040027/*
28 * Retrieve bus and dimm handle and return if this bus supports
29 * get_config_data commands
30 */
Toshi Kaniaee65982016-08-16 13:08:40 -060031int nvdimm_check_config_data(struct device *dev)
Dan Williams4d88a972015-05-31 14:41:48 -040032{
Toshi Kaniaee65982016-08-16 13:08:40 -060033 struct nvdimm *nvdimm = to_nvdimm(dev);
Dan Williams4d88a972015-05-31 14:41:48 -040034
Toshi Kaniaee65982016-08-16 13:08:40 -060035 if (!nvdimm->cmd_mask ||
36 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
37 if (nvdimm->flags & NDD_ALIASING)
38 return -ENXIO;
39 else
40 return -ENOTTY;
41 }
Dan Williams4d88a972015-05-31 14:41:48 -040042
43 return 0;
44}
45
46static int validate_dimm(struct nvdimm_drvdata *ndd)
47{
Toshi Kaniaee65982016-08-16 13:08:40 -060048 int rc;
Dan Williams4d88a972015-05-31 14:41:48 -040049
Toshi Kaniaee65982016-08-16 13:08:40 -060050 if (!ndd)
51 return -EINVAL;
52
53 rc = nvdimm_check_config_data(ndd->dev);
54 if (rc)
Dan Williams4d88a972015-05-31 14:41:48 -040055 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
56 __builtin_return_address(0), __func__, rc);
57 return rc;
58}
59
60/**
61 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
62 * @nvdimm: dimm to initialize
63 */
64int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
65{
66 struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
67 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
68 struct nvdimm_bus_descriptor *nd_desc;
69 int rc = validate_dimm(ndd);
70
71 if (rc)
72 return rc;
73
74 if (cmd->config_size)
75 return 0; /* already valid */
76
77 memset(cmd, 0, sizeof(*cmd));
78 nd_desc = nvdimm_bus->nd_desc;
79 return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
Dan Williamsaef25332016-02-12 17:01:11 -080080 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
Dan Williams4d88a972015-05-31 14:41:48 -040081}
82
83int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
84{
85 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
86 struct nd_cmd_get_config_data_hdr *cmd;
87 struct nvdimm_bus_descriptor *nd_desc;
88 int rc = validate_dimm(ndd);
89 u32 max_cmd_size, config_size;
90 size_t offset;
91
92 if (rc)
93 return rc;
94
95 if (ndd->data)
96 return 0;
97
Dan Williams4a826c82015-06-09 16:09:36 -040098 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
99 || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
100 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
101 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
Dan Williams4d88a972015-05-31 14:41:48 -0400102 return -ENXIO;
Dan Williams4a826c82015-06-09 16:09:36 -0400103 }
Dan Williams4d88a972015-05-31 14:41:48 -0400104
105 ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
106 if (!ndd->data)
107 ndd->data = vmalloc(ndd->nsarea.config_size);
108
109 if (!ndd->data)
110 return -ENOMEM;
111
112 max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
113 cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
114 if (!cmd)
115 return -ENOMEM;
116
117 nd_desc = nvdimm_bus->nd_desc;
118 for (config_size = ndd->nsarea.config_size, offset = 0;
119 config_size; config_size -= cmd->in_length,
120 offset += cmd->in_length) {
121 cmd->in_length = min(config_size, max_cmd_size);
122 cmd->in_offset = offset;
123 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
124 ND_CMD_GET_CONFIG_DATA, cmd,
Dan Williamsaef25332016-02-12 17:01:11 -0800125 cmd->in_length + sizeof(*cmd), NULL);
Dan Williams4d88a972015-05-31 14:41:48 -0400126 if (rc || cmd->status) {
127 rc = -ENXIO;
128 break;
129 }
130 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
131 }
132 dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
133 kfree(cmd);
134
135 return rc;
136}
137
Dan Williamsf524bf22015-05-30 12:36:02 -0400138int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
139 void *buf, size_t len)
140{
141 int rc = validate_dimm(ndd);
142 size_t max_cmd_size, buf_offset;
143 struct nd_cmd_set_config_hdr *cmd;
144 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
145 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
146
147 if (rc)
148 return rc;
149
150 if (!ndd->data)
151 return -ENXIO;
152
153 if (offset + len > ndd->nsarea.config_size)
154 return -ENXIO;
155
156 max_cmd_size = min_t(u32, PAGE_SIZE, len);
157 max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
158 cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
159 if (!cmd)
160 return -ENOMEM;
161
162 for (buf_offset = 0; len; len -= cmd->in_length,
163 buf_offset += cmd->in_length) {
164 size_t cmd_size;
165 u32 *status;
166
167 cmd->in_offset = offset + buf_offset;
168 cmd->in_length = min(max_cmd_size, len);
169 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
170
171 /* status is output in the last 4-bytes of the command buffer */
172 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
173 status = ((void *) cmd) + cmd_size - sizeof(u32);
174
175 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
Dan Williamsaef25332016-02-12 17:01:11 -0800176 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
Dan Williamsf524bf22015-05-30 12:36:02 -0400177 if (rc || *status) {
178 rc = rc ? rc : -ENXIO;
179 break;
180 }
181 }
182 kfree(cmd);
183
184 return rc;
185}
186
Dan Williams42237e32016-10-15 15:33:52 -0700187void nvdimm_set_aliasing(struct device *dev)
188{
189 struct nvdimm *nvdimm = to_nvdimm(dev);
190
191 nvdimm->flags |= NDD_ALIASING;
192}
193
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400194static void nvdimm_release(struct device *dev)
195{
196 struct nvdimm *nvdimm = to_nvdimm(dev);
197
198 ida_simple_remove(&dimm_ida, nvdimm->id);
199 kfree(nvdimm);
200}
201
202static struct device_type nvdimm_device_type = {
203 .name = "nvdimm",
204 .release = nvdimm_release,
205};
206
Dan Williams62232e452015-06-08 14:27:06 -0400207bool is_nvdimm(struct device *dev)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400208{
209 return dev->type == &nvdimm_device_type;
210}
211
212struct nvdimm *to_nvdimm(struct device *dev)
213{
214 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
215
216 WARN_ON(!is_nvdimm(dev));
217 return nvdimm;
218}
219EXPORT_SYMBOL_GPL(to_nvdimm);
220
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400221struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
222{
223 struct nd_region *nd_region = &ndbr->nd_region;
224 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
225
226 return nd_mapping->nvdimm;
227}
228EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
229
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400230struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
231{
232 struct nvdimm *nvdimm = nd_mapping->nvdimm;
233
234 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
235
236 return dev_get_drvdata(&nvdimm->dev);
237}
238EXPORT_SYMBOL(to_ndd);
239
240void nvdimm_drvdata_release(struct kref *kref)
241{
242 struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
243 struct device *dev = ndd->dev;
244 struct resource *res, *_r;
245
246 dev_dbg(dev, "%s\n", __func__);
247
248 nvdimm_bus_lock(dev);
249 for_each_dpa_resource_safe(ndd, res, _r)
250 nvdimm_free_dpa(ndd, res);
251 nvdimm_bus_unlock(dev);
252
yalin wanga06a7572015-08-27 19:35:48 -0400253 kvfree(ndd->data);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400254 kfree(ndd);
255 put_device(dev);
256}
257
258void get_ndd(struct nvdimm_drvdata *ndd)
259{
260 kref_get(&ndd->kref);
261}
262
263void put_ndd(struct nvdimm_drvdata *ndd)
264{
265 if (ndd)
266 kref_put(&ndd->kref, nvdimm_drvdata_release);
267}
268
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400269const char *nvdimm_name(struct nvdimm *nvdimm)
270{
271 return dev_name(&nvdimm->dev);
272}
273EXPORT_SYMBOL_GPL(nvdimm_name);
274
Dan Williamsba9c8dd2016-08-22 19:28:37 -0700275struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
276{
277 return &nvdimm->dev.kobj;
278}
279EXPORT_SYMBOL_GPL(nvdimm_kobj);
280
Dan Williamse3654ec2016-04-28 16:17:07 -0700281unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
282{
283 return nvdimm->cmd_mask;
284}
285EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
286
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400287void *nvdimm_provider_data(struct nvdimm *nvdimm)
288{
Dan Williams62232e452015-06-08 14:27:06 -0400289 if (nvdimm)
290 return nvdimm->provider_data;
291 return NULL;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400292}
293EXPORT_SYMBOL_GPL(nvdimm_provider_data);
294
Dan Williams62232e452015-06-08 14:27:06 -0400295static ssize_t commands_show(struct device *dev,
296 struct device_attribute *attr, char *buf)
297{
298 struct nvdimm *nvdimm = to_nvdimm(dev);
299 int cmd, len = 0;
300
Dan Williamse3654ec2016-04-28 16:17:07 -0700301 if (!nvdimm->cmd_mask)
Dan Williams62232e452015-06-08 14:27:06 -0400302 return sprintf(buf, "\n");
303
Dan Williamse3654ec2016-04-28 16:17:07 -0700304 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -0400305 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
306 len += sprintf(buf + len, "\n");
307 return len;
308}
309static DEVICE_ATTR_RO(commands);
310
Dan Williamseaf96152015-05-01 13:11:27 -0400311static ssize_t state_show(struct device *dev, struct device_attribute *attr,
312 char *buf)
313{
314 struct nvdimm *nvdimm = to_nvdimm(dev);
315
316 /*
317 * The state may be in the process of changing, userspace should
318 * quiesce probing if it wants a static answer
319 */
320 nvdimm_bus_lock(dev);
321 nvdimm_bus_unlock(dev);
322 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
323 ? "active" : "idle");
324}
325static DEVICE_ATTR_RO(state);
326
Dan Williams0ba1c632015-05-30 12:35:36 -0400327static ssize_t available_slots_show(struct device *dev,
328 struct device_attribute *attr, char *buf)
329{
330 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
331 ssize_t rc;
332 u32 nfree;
333
334 if (!ndd)
335 return -ENXIO;
336
337 nvdimm_bus_lock(dev);
338 nfree = nd_label_nfree(ndd);
339 if (nfree - 1 > nfree) {
340 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
341 nfree = 0;
342 } else
343 nfree--;
344 rc = sprintf(buf, "%d\n", nfree);
345 nvdimm_bus_unlock(dev);
346 return rc;
347}
348static DEVICE_ATTR_RO(available_slots);
349
Dan Williams62232e452015-06-08 14:27:06 -0400350static struct attribute *nvdimm_attributes[] = {
Dan Williamseaf96152015-05-01 13:11:27 -0400351 &dev_attr_state.attr,
Dan Williams62232e452015-06-08 14:27:06 -0400352 &dev_attr_commands.attr,
Dan Williams0ba1c632015-05-30 12:35:36 -0400353 &dev_attr_available_slots.attr,
Dan Williams62232e452015-06-08 14:27:06 -0400354 NULL,
355};
356
357struct attribute_group nvdimm_attribute_group = {
358 .attrs = nvdimm_attributes,
359};
360EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
361
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400362struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
Dan Williams62232e452015-06-08 14:27:06 -0400363 const struct attribute_group **groups, unsigned long flags,
Dan Williamse5ae3b22016-06-07 17:00:04 -0700364 unsigned long cmd_mask, int num_flush,
365 struct resource *flush_wpq)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400366{
367 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
368 struct device *dev;
369
370 if (!nvdimm)
371 return NULL;
372
373 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
374 if (nvdimm->id < 0) {
375 kfree(nvdimm);
376 return NULL;
377 }
378 nvdimm->provider_data = provider_data;
379 nvdimm->flags = flags;
Dan Williamse3654ec2016-04-28 16:17:07 -0700380 nvdimm->cmd_mask = cmd_mask;
Dan Williamse5ae3b22016-06-07 17:00:04 -0700381 nvdimm->num_flush = num_flush;
382 nvdimm->flush_wpq = flush_wpq;
Dan Williamseaf96152015-05-01 13:11:27 -0400383 atomic_set(&nvdimm->busy, 0);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400384 dev = &nvdimm->dev;
385 dev_set_name(dev, "nmem%d", nvdimm->id);
386 dev->parent = &nvdimm_bus->dev;
387 dev->type = &nvdimm_device_type;
Dan Williams62232e452015-06-08 14:27:06 -0400388 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400389 dev->groups = groups;
Dan Williams4d88a972015-05-31 14:41:48 -0400390 nd_device_register(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400391
392 return nvdimm;
393}
394EXPORT_SYMBOL_GPL(nvdimm_create);
Dan Williams4d88a972015-05-31 14:41:48 -0400395
Dan Williams762d0672016-10-04 16:09:59 -0700396int alias_dpa_busy(struct device *dev, void *data)
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700397{
398 resource_size_t map_end, blk_start, new, busy;
399 struct blk_alloc_info *info = data;
400 struct nd_mapping *nd_mapping;
401 struct nd_region *nd_region;
402 struct nvdimm_drvdata *ndd;
403 struct resource *res;
404 int i;
405
406 if (!is_nd_pmem(dev))
407 return 0;
408
409 nd_region = to_nd_region(dev);
410 for (i = 0; i < nd_region->ndr_mappings; i++) {
411 nd_mapping = &nd_region->mapping[i];
412 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
413 break;
414 }
415
416 if (i >= nd_region->ndr_mappings)
417 return 0;
418
419 ndd = to_ndd(nd_mapping);
420 map_end = nd_mapping->start + nd_mapping->size - 1;
421 blk_start = nd_mapping->start;
Dan Williams762d0672016-10-04 16:09:59 -0700422
423 /*
424 * In the allocation case ->res is set to free space that we are
425 * looking to validate against PMEM aliasing collision rules
426 * (i.e. BLK is allocated after all aliased PMEM).
427 */
428 if (info->res) {
429 if (info->res->start >= nd_mapping->start
430 && info->res->start < map_end)
431 /* pass */;
432 else
433 return 0;
434 }
435
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700436 retry:
437 /*
438 * Find the free dpa from the end of the last pmem allocation to
439 * the end of the interleave-set mapping that is not already
440 * covered by a blk allocation.
441 */
442 busy = 0;
443 for_each_dpa_resource(ndd, res) {
444 if ((res->start >= blk_start && res->start < map_end)
445 || (res->end >= blk_start
446 && res->end <= map_end)) {
447 if (strncmp(res->name, "pmem", 4) == 0) {
448 new = max(blk_start, min(map_end + 1,
449 res->end + 1));
450 if (new != blk_start) {
451 blk_start = new;
452 goto retry;
453 }
454 } else
455 busy += min(map_end, res->end)
456 - max(nd_mapping->start, res->start) + 1;
457 } else if (nd_mapping->start > res->start
458 && map_end < res->end) {
459 /* total eclipse of the PMEM region mapping */
460 busy += nd_mapping->size;
461 break;
462 }
463 }
464
Dan Williams762d0672016-10-04 16:09:59 -0700465 /* update the free space range with the probed blk_start */
466 if (info->res && blk_start > info->res->start) {
467 info->res->start = max(info->res->start, blk_start);
468 if (info->res->start > info->res->end)
469 info->res->end = info->res->start - 1;
470 return 1;
471 }
472
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700473 info->available -= blk_start - nd_mapping->start + busy;
Dan Williams762d0672016-10-04 16:09:59 -0700474
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700475 return 0;
476}
477
478static int blk_dpa_busy(struct device *dev, void *data)
479{
480 struct blk_alloc_info *info = data;
481 struct nd_mapping *nd_mapping;
482 struct nd_region *nd_region;
483 resource_size_t map_end;
484 int i;
485
486 if (!is_nd_pmem(dev))
487 return 0;
488
489 nd_region = to_nd_region(dev);
490 for (i = 0; i < nd_region->ndr_mappings; i++) {
491 nd_mapping = &nd_region->mapping[i];
492 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493 break;
494 }
495
496 if (i >= nd_region->ndr_mappings)
497 return 0;
498
499 map_end = nd_mapping->start + nd_mapping->size - 1;
500 if (info->res->start >= nd_mapping->start
501 && info->res->start < map_end) {
502 if (info->res->end <= map_end) {
503 info->busy = 0;
504 return 1;
505 } else {
506 info->busy -= info->res->end - map_end;
507 return 0;
508 }
509 } else if (info->res->end >= nd_mapping->start
510 && info->res->end <= map_end) {
511 info->busy -= nd_mapping->start - info->res->start;
512 return 0;
513 } else {
514 info->busy -= nd_mapping->size;
515 return 0;
516 }
517}
518
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400519/**
Dan Williams1b40e092015-05-01 13:34:01 -0400520 * nd_blk_available_dpa - account the unused dpa of BLK region
521 * @nd_mapping: container of dpa-resource-root + labels
522 *
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700523 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
524 * we arrange for them to never start at an lower dpa than the last
525 * PMEM allocation in an aliased region.
Dan Williams1b40e092015-05-01 13:34:01 -0400526 */
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700527resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
Dan Williams1b40e092015-05-01 13:34:01 -0400528{
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700529 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
530 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
Dan Williams1b40e092015-05-01 13:34:01 -0400531 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700532 struct blk_alloc_info info = {
533 .nd_mapping = nd_mapping,
534 .available = nd_mapping->size,
Dan Williams762d0672016-10-04 16:09:59 -0700535 .res = NULL,
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700536 };
Dan Williams1b40e092015-05-01 13:34:01 -0400537 struct resource *res;
538
539 if (!ndd)
540 return 0;
541
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700542 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
Dan Williams1b40e092015-05-01 13:34:01 -0400543
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700544 /* now account for busy blk allocations in unaliased dpa */
545 for_each_dpa_resource(ndd, res) {
546 if (strncmp(res->name, "blk", 3) != 0)
547 continue;
Dan Williams1b40e092015-05-01 13:34:01 -0400548
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700549 info.res = res;
550 info.busy = resource_size(res);
551 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552 info.available -= info.busy;
553 }
554
555 return info.available;
Dan Williams1b40e092015-05-01 13:34:01 -0400556}
557
558/**
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400559 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
560 * @nd_mapping: container of dpa-resource-root + labels
561 * @nd_region: constrain available space check to this reference region
562 * @overlap: calculate available space assuming this level of overlap
563 *
564 * Validate that a PMEM label, if present, aligns with the start of an
565 * interleave set and truncate the available size at the lowest BLK
566 * overlap point.
567 *
568 * The expectation is that this routine is called multiple times as it
569 * probes for the largest BLK encroachment for any single member DIMM of
570 * the interleave set. Once that value is determined the PMEM-limit for
571 * the set can be established.
572 */
573resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
574 struct nd_mapping *nd_mapping, resource_size_t *overlap)
575{
576 resource_size_t map_start, map_end, busy = 0, available, blk_start;
577 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
578 struct resource *res;
579 const char *reason;
580
581 if (!ndd)
582 return 0;
583
584 map_start = nd_mapping->start;
585 map_end = map_start + nd_mapping->size - 1;
586 blk_start = max(map_start, map_end + 1 - *overlap);
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700587 for_each_dpa_resource(ndd, res) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400588 if (res->start >= map_start && res->start < map_end) {
589 if (strncmp(res->name, "blk", 3) == 0)
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700590 blk_start = min(blk_start,
591 max(map_start, res->start));
592 else if (res->end > map_end) {
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400593 reason = "misaligned to iset";
594 goto err;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700595 } else
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400596 busy += resource_size(res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400597 } else if (res->end >= map_start && res->end <= map_end) {
598 if (strncmp(res->name, "blk", 3) == 0) {
599 /*
600 * If a BLK allocation overlaps the start of
601 * PMEM the entire interleave set may now only
602 * be used for BLK.
603 */
604 blk_start = map_start;
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700605 } else
606 busy += resource_size(res);
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400607 } else if (map_start > res->start && map_start < res->end) {
608 /* total eclipse of the mapping */
609 busy += nd_mapping->size;
610 blk_start = map_start;
611 }
Dan Williamsa1f3e4d2016-09-30 17:28:58 -0700612 }
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400613
614 *overlap = map_end + 1 - blk_start;
615 available = blk_start - map_start;
616 if (busy < available)
617 return available - busy;
618 return 0;
619
620 err:
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400621 nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
622 return 0;
623}
624
Dan Williams4a826c82015-06-09 16:09:36 -0400625void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
626{
627 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
628 kfree(res->name);
629 __release_region(&ndd->dpa, res->start, resource_size(res));
630}
631
632struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
633 struct nd_label_id *label_id, resource_size_t start,
634 resource_size_t n)
635{
636 char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
637 struct resource *res;
638
639 if (!name)
640 return NULL;
641
642 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
643 res = __request_region(&ndd->dpa, start, n, name, 0);
644 if (!res)
645 kfree(name);
646 return res;
647}
648
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400649/**
650 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
651 * @nvdimm: container of dpa-resource-root + labels
652 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
653 */
654resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
655 struct nd_label_id *label_id)
656{
657 resource_size_t allocated = 0;
658 struct resource *res;
659
660 for_each_dpa_resource(ndd, res)
661 if (strcmp(res->name, label_id->id) == 0)
662 allocated += resource_size(res);
663
664 return allocated;
665}
666
Dan Williams4d88a972015-05-31 14:41:48 -0400667static int count_dimms(struct device *dev, void *c)
668{
669 int *count = c;
670
671 if (is_nvdimm(dev))
672 (*count)++;
673 return 0;
674}
675
676int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
677{
678 int count = 0;
679 /* Flush any possible dimm registration failures */
680 nd_synchronize();
681
682 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
683 dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
684 if (count != dimm_count)
685 return -ENXIO;
686 return 0;
687}
688EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
Dan Williamsb354aba2016-05-17 20:24:16 -0700689
690void __exit nvdimm_devs_exit(void)
691{
692 ida_destroy(&dimm_ida);
693}