blob: b5c5b9095b28e34ccc3b8db602e09e91a07d0c8e [file] [log] [blame]
Dan Williams1f7df6f2015-06-09 20:13:14 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/slab.h>
14#include <linux/io.h>
15#include "nd-core.h"
16#include "nd.h"
17
18static DEFINE_IDA(region_ida);
19
20static void nd_region_release(struct device *dev)
21{
22 struct nd_region *nd_region = to_nd_region(dev);
23 u16 i;
24
25 for (i = 0; i < nd_region->ndr_mappings; i++) {
26 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
27 struct nvdimm *nvdimm = nd_mapping->nvdimm;
28
29 put_device(&nvdimm->dev);
30 }
31 ida_simple_remove(&region_ida, nd_region->id);
32 kfree(nd_region);
33}
34
35static struct device_type nd_blk_device_type = {
36 .name = "nd_blk",
37 .release = nd_region_release,
38};
39
40static struct device_type nd_pmem_device_type = {
41 .name = "nd_pmem",
42 .release = nd_region_release,
43};
44
45static struct device_type nd_volatile_device_type = {
46 .name = "nd_volatile",
47 .release = nd_region_release,
48};
49
Dan Williams3d880022015-05-31 15:02:11 -040050bool is_nd_pmem(struct device *dev)
Dan Williams1f7df6f2015-06-09 20:13:14 -040051{
52 return dev ? dev->type == &nd_pmem_device_type : false;
53}
54
Dan Williams3d880022015-05-31 15:02:11 -040055bool is_nd_blk(struct device *dev)
56{
57 return dev ? dev->type == &nd_blk_device_type : false;
58}
59
Dan Williams1f7df6f2015-06-09 20:13:14 -040060struct nd_region *to_nd_region(struct device *dev)
61{
62 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
63
64 WARN_ON(dev->type->release != nd_region_release);
65 return nd_region;
66}
67EXPORT_SYMBOL_GPL(to_nd_region);
68
Dan Williams3d880022015-05-31 15:02:11 -040069/**
70 * nd_region_to_nstype() - region to an integer namespace type
71 * @nd_region: region-device to interrogate
72 *
73 * This is the 'nstype' attribute of a region as well, an input to the
74 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
75 * namespace devices with namespace drivers.
76 */
77int nd_region_to_nstype(struct nd_region *nd_region)
78{
79 if (is_nd_pmem(&nd_region->dev)) {
80 u16 i, alias;
81
82 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
83 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
84 struct nvdimm *nvdimm = nd_mapping->nvdimm;
85
86 if (nvdimm->flags & NDD_ALIASING)
87 alias++;
88 }
89 if (alias)
90 return ND_DEVICE_NAMESPACE_PMEM;
91 else
92 return ND_DEVICE_NAMESPACE_IO;
93 } else if (is_nd_blk(&nd_region->dev)) {
94 return ND_DEVICE_NAMESPACE_BLK;
95 }
96
97 return 0;
98}
99
Dan Williams1f7df6f2015-06-09 20:13:14 -0400100static ssize_t size_show(struct device *dev,
101 struct device_attribute *attr, char *buf)
102{
103 struct nd_region *nd_region = to_nd_region(dev);
104 unsigned long long size = 0;
105
106 if (is_nd_pmem(dev)) {
107 size = nd_region->ndr_size;
108 } else if (nd_region->ndr_mappings == 1) {
109 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
110
111 size = nd_mapping->size;
112 }
113
114 return sprintf(buf, "%llu\n", size);
115}
116static DEVICE_ATTR_RO(size);
117
118static ssize_t mappings_show(struct device *dev,
119 struct device_attribute *attr, char *buf)
120{
121 struct nd_region *nd_region = to_nd_region(dev);
122
123 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
124}
125static DEVICE_ATTR_RO(mappings);
126
Dan Williams3d880022015-05-31 15:02:11 -0400127static ssize_t nstype_show(struct device *dev,
128 struct device_attribute *attr, char *buf)
129{
130 struct nd_region *nd_region = to_nd_region(dev);
131
132 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
133}
134static DEVICE_ATTR_RO(nstype);
135
136static ssize_t init_namespaces_show(struct device *dev,
137 struct device_attribute *attr, char *buf)
138{
139 struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
140 ssize_t rc;
141
142 nvdimm_bus_lock(dev);
143 if (num_ns)
144 rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
145 else
146 rc = -ENXIO;
147 nvdimm_bus_unlock(dev);
148
149 return rc;
150}
151static DEVICE_ATTR_RO(init_namespaces);
152
Dan Williams1f7df6f2015-06-09 20:13:14 -0400153static struct attribute *nd_region_attributes[] = {
154 &dev_attr_size.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400155 &dev_attr_nstype.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400156 &dev_attr_mappings.attr,
Dan Williams3d880022015-05-31 15:02:11 -0400157 &dev_attr_init_namespaces.attr,
Dan Williams1f7df6f2015-06-09 20:13:14 -0400158 NULL,
159};
160
161struct attribute_group nd_region_attribute_group = {
162 .attrs = nd_region_attributes,
163};
164EXPORT_SYMBOL_GPL(nd_region_attribute_group);
165
166static ssize_t mappingN(struct device *dev, char *buf, int n)
167{
168 struct nd_region *nd_region = to_nd_region(dev);
169 struct nd_mapping *nd_mapping;
170 struct nvdimm *nvdimm;
171
172 if (n >= nd_region->ndr_mappings)
173 return -ENXIO;
174 nd_mapping = &nd_region->mapping[n];
175 nvdimm = nd_mapping->nvdimm;
176
177 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
178 nd_mapping->start, nd_mapping->size);
179}
180
181#define REGION_MAPPING(idx) \
182static ssize_t mapping##idx##_show(struct device *dev, \
183 struct device_attribute *attr, char *buf) \
184{ \
185 return mappingN(dev, buf, idx); \
186} \
187static DEVICE_ATTR_RO(mapping##idx)
188
189/*
190 * 32 should be enough for a while, even in the presence of socket
191 * interleave a 32-way interleave set is a degenerate case.
192 */
193REGION_MAPPING(0);
194REGION_MAPPING(1);
195REGION_MAPPING(2);
196REGION_MAPPING(3);
197REGION_MAPPING(4);
198REGION_MAPPING(5);
199REGION_MAPPING(6);
200REGION_MAPPING(7);
201REGION_MAPPING(8);
202REGION_MAPPING(9);
203REGION_MAPPING(10);
204REGION_MAPPING(11);
205REGION_MAPPING(12);
206REGION_MAPPING(13);
207REGION_MAPPING(14);
208REGION_MAPPING(15);
209REGION_MAPPING(16);
210REGION_MAPPING(17);
211REGION_MAPPING(18);
212REGION_MAPPING(19);
213REGION_MAPPING(20);
214REGION_MAPPING(21);
215REGION_MAPPING(22);
216REGION_MAPPING(23);
217REGION_MAPPING(24);
218REGION_MAPPING(25);
219REGION_MAPPING(26);
220REGION_MAPPING(27);
221REGION_MAPPING(28);
222REGION_MAPPING(29);
223REGION_MAPPING(30);
224REGION_MAPPING(31);
225
226static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
227{
228 struct device *dev = container_of(kobj, struct device, kobj);
229 struct nd_region *nd_region = to_nd_region(dev);
230
231 if (n < nd_region->ndr_mappings)
232 return a->mode;
233 return 0;
234}
235
236static struct attribute *mapping_attributes[] = {
237 &dev_attr_mapping0.attr,
238 &dev_attr_mapping1.attr,
239 &dev_attr_mapping2.attr,
240 &dev_attr_mapping3.attr,
241 &dev_attr_mapping4.attr,
242 &dev_attr_mapping5.attr,
243 &dev_attr_mapping6.attr,
244 &dev_attr_mapping7.attr,
245 &dev_attr_mapping8.attr,
246 &dev_attr_mapping9.attr,
247 &dev_attr_mapping10.attr,
248 &dev_attr_mapping11.attr,
249 &dev_attr_mapping12.attr,
250 &dev_attr_mapping13.attr,
251 &dev_attr_mapping14.attr,
252 &dev_attr_mapping15.attr,
253 &dev_attr_mapping16.attr,
254 &dev_attr_mapping17.attr,
255 &dev_attr_mapping18.attr,
256 &dev_attr_mapping19.attr,
257 &dev_attr_mapping20.attr,
258 &dev_attr_mapping21.attr,
259 &dev_attr_mapping22.attr,
260 &dev_attr_mapping23.attr,
261 &dev_attr_mapping24.attr,
262 &dev_attr_mapping25.attr,
263 &dev_attr_mapping26.attr,
264 &dev_attr_mapping27.attr,
265 &dev_attr_mapping28.attr,
266 &dev_attr_mapping29.attr,
267 &dev_attr_mapping30.attr,
268 &dev_attr_mapping31.attr,
269 NULL,
270};
271
272struct attribute_group nd_mapping_attribute_group = {
273 .is_visible = mapping_visible,
274 .attrs = mapping_attributes,
275};
276EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
277
278void *nd_region_provider_data(struct nd_region *nd_region)
279{
280 return nd_region->provider_data;
281}
282EXPORT_SYMBOL_GPL(nd_region_provider_data);
283
284static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
285 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
286 const char *caller)
287{
288 struct nd_region *nd_region;
289 struct device *dev;
290 u16 i;
291
292 for (i = 0; i < ndr_desc->num_mappings; i++) {
293 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
294 struct nvdimm *nvdimm = nd_mapping->nvdimm;
295
296 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
297 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
298 caller, dev_name(&nvdimm->dev), i);
299
300 return NULL;
301 }
302 }
303
304 nd_region = kzalloc(sizeof(struct nd_region)
305 + sizeof(struct nd_mapping) * ndr_desc->num_mappings,
306 GFP_KERNEL);
307 if (!nd_region)
308 return NULL;
309 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
310 if (nd_region->id < 0) {
311 kfree(nd_region);
312 return NULL;
313 }
314
315 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
316 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
317 for (i = 0; i < ndr_desc->num_mappings; i++) {
318 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
319 struct nvdimm *nvdimm = nd_mapping->nvdimm;
320
321 get_device(&nvdimm->dev);
322 }
323 nd_region->ndr_mappings = ndr_desc->num_mappings;
324 nd_region->provider_data = ndr_desc->provider_data;
325 dev = &nd_region->dev;
326 dev_set_name(dev, "region%d", nd_region->id);
327 dev->parent = &nvdimm_bus->dev;
328 dev->type = dev_type;
329 dev->groups = ndr_desc->attr_groups;
330 nd_region->ndr_size = resource_size(ndr_desc->res);
331 nd_region->ndr_start = ndr_desc->res->start;
332 nd_device_register(dev);
333
334 return nd_region;
335}
336
337struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
338 struct nd_region_desc *ndr_desc)
339{
340 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
341 __func__);
342}
343EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
344
345struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
346 struct nd_region_desc *ndr_desc)
347{
348 if (ndr_desc->num_mappings > 1)
349 return NULL;
350 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
351 __func__);
352}
353EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
354
355struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
356 struct nd_region_desc *ndr_desc)
357{
358 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
359 __func__);
360}
361EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);