blob: b3ae86f2e1dad80228dcfd7593ef95b5cda6e24a [file] [log] [blame]
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Dan Williams4d88a972015-05-31 14:41:48 -040014#include <linux/vmalloc.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040015#include <linux/device.h>
Dan Williams62232e452015-06-08 14:27:06 -040016#include <linux/ndctl.h>
Dan Williamse6dfb2d2015-04-25 03:56:17 -040017#include <linux/slab.h>
18#include <linux/io.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include "nd-core.h"
Dan Williams4d88a972015-05-31 14:41:48 -040022#include "nd.h"
Dan Williamse6dfb2d2015-04-25 03:56:17 -040023
24static DEFINE_IDA(dimm_ida);
25
Dan Williams4d88a972015-05-31 14:41:48 -040026/*
27 * Retrieve bus and dimm handle and return if this bus supports
28 * get_config_data commands
29 */
30static int __validate_dimm(struct nvdimm_drvdata *ndd)
31{
32 struct nvdimm *nvdimm;
33
34 if (!ndd)
35 return -EINVAL;
36
37 nvdimm = to_nvdimm(ndd->dev);
38
39 if (!nvdimm->dsm_mask)
40 return -ENXIO;
41 if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask))
42 return -ENXIO;
43
44 return 0;
45}
46
47static int validate_dimm(struct nvdimm_drvdata *ndd)
48{
49 int rc = __validate_dimm(ndd);
50
51 if (rc && ndd)
52 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
53 __builtin_return_address(0), __func__, rc);
54 return rc;
55}
56
57/**
58 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
59 * @nvdimm: dimm to initialize
60 */
61int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
62{
63 struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
64 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
65 struct nvdimm_bus_descriptor *nd_desc;
66 int rc = validate_dimm(ndd);
67
68 if (rc)
69 return rc;
70
71 if (cmd->config_size)
72 return 0; /* already valid */
73
74 memset(cmd, 0, sizeof(*cmd));
75 nd_desc = nvdimm_bus->nd_desc;
76 return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
77 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd));
78}
79
80int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
81{
82 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
83 struct nd_cmd_get_config_data_hdr *cmd;
84 struct nvdimm_bus_descriptor *nd_desc;
85 int rc = validate_dimm(ndd);
86 u32 max_cmd_size, config_size;
87 size_t offset;
88
89 if (rc)
90 return rc;
91
92 if (ndd->data)
93 return 0;
94
95 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0)
96 return -ENXIO;
97
98 ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
99 if (!ndd->data)
100 ndd->data = vmalloc(ndd->nsarea.config_size);
101
102 if (!ndd->data)
103 return -ENOMEM;
104
105 max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
106 cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
107 if (!cmd)
108 return -ENOMEM;
109
110 nd_desc = nvdimm_bus->nd_desc;
111 for (config_size = ndd->nsarea.config_size, offset = 0;
112 config_size; config_size -= cmd->in_length,
113 offset += cmd->in_length) {
114 cmd->in_length = min(config_size, max_cmd_size);
115 cmd->in_offset = offset;
116 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
117 ND_CMD_GET_CONFIG_DATA, cmd,
118 cmd->in_length + sizeof(*cmd));
119 if (rc || cmd->status) {
120 rc = -ENXIO;
121 break;
122 }
123 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
124 }
125 dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
126 kfree(cmd);
127
128 return rc;
129}
130
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400131static void nvdimm_release(struct device *dev)
132{
133 struct nvdimm *nvdimm = to_nvdimm(dev);
134
135 ida_simple_remove(&dimm_ida, nvdimm->id);
136 kfree(nvdimm);
137}
138
139static struct device_type nvdimm_device_type = {
140 .name = "nvdimm",
141 .release = nvdimm_release,
142};
143
Dan Williams62232e452015-06-08 14:27:06 -0400144bool is_nvdimm(struct device *dev)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400145{
146 return dev->type == &nvdimm_device_type;
147}
148
149struct nvdimm *to_nvdimm(struct device *dev)
150{
151 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
152
153 WARN_ON(!is_nvdimm(dev));
154 return nvdimm;
155}
156EXPORT_SYMBOL_GPL(to_nvdimm);
157
158const char *nvdimm_name(struct nvdimm *nvdimm)
159{
160 return dev_name(&nvdimm->dev);
161}
162EXPORT_SYMBOL_GPL(nvdimm_name);
163
164void *nvdimm_provider_data(struct nvdimm *nvdimm)
165{
Dan Williams62232e452015-06-08 14:27:06 -0400166 if (nvdimm)
167 return nvdimm->provider_data;
168 return NULL;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400169}
170EXPORT_SYMBOL_GPL(nvdimm_provider_data);
171
Dan Williams62232e452015-06-08 14:27:06 -0400172static ssize_t commands_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 struct nvdimm *nvdimm = to_nvdimm(dev);
176 int cmd, len = 0;
177
178 if (!nvdimm->dsm_mask)
179 return sprintf(buf, "\n");
180
181 for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG)
182 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
183 len += sprintf(buf + len, "\n");
184 return len;
185}
186static DEVICE_ATTR_RO(commands);
187
188static struct attribute *nvdimm_attributes[] = {
189 &dev_attr_commands.attr,
190 NULL,
191};
192
193struct attribute_group nvdimm_attribute_group = {
194 .attrs = nvdimm_attributes,
195};
196EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
197
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400198struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
Dan Williams62232e452015-06-08 14:27:06 -0400199 const struct attribute_group **groups, unsigned long flags,
200 unsigned long *dsm_mask)
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400201{
202 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
203 struct device *dev;
204
205 if (!nvdimm)
206 return NULL;
207
208 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
209 if (nvdimm->id < 0) {
210 kfree(nvdimm);
211 return NULL;
212 }
213 nvdimm->provider_data = provider_data;
214 nvdimm->flags = flags;
Dan Williams62232e452015-06-08 14:27:06 -0400215 nvdimm->dsm_mask = dsm_mask;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400216
217 dev = &nvdimm->dev;
218 dev_set_name(dev, "nmem%d", nvdimm->id);
219 dev->parent = &nvdimm_bus->dev;
220 dev->type = &nvdimm_device_type;
Dan Williams62232e452015-06-08 14:27:06 -0400221 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400222 dev->groups = groups;
Dan Williams4d88a972015-05-31 14:41:48 -0400223 nd_device_register(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400224
225 return nvdimm;
226}
227EXPORT_SYMBOL_GPL(nvdimm_create);
Dan Williams4d88a972015-05-31 14:41:48 -0400228
229static int count_dimms(struct device *dev, void *c)
230{
231 int *count = c;
232
233 if (is_nvdimm(dev))
234 (*count)++;
235 return 0;
236}
237
238int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
239{
240 int count = 0;
241 /* Flush any possible dimm registration failures */
242 nd_synchronize();
243
244 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
245 dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
246 if (count != dimm_count)
247 return -ENXIO;
248 return 0;
249}
250EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);