blob: 3646501b01d7857834f80093653c9b1da14f3436 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040016#include <linux/mutex.h>
Dan Williams62232e452015-06-08 14:27:06 -040017#include <linux/ndctl.h>
Vishal Verma0caeef62015-12-24 19:21:43 -070018#include <linux/delay.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040019#include <linux/list.h>
20#include <linux/acpi.h>
Dan Williamseaf96152015-05-01 13:11:27 -040021#include <linux/sort.h>
Ross Zwislerc2ad2952015-07-10 11:06:13 -060022#include <linux/pmem.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040023#include <linux/io.h>
Dan Williams1cf03c02016-02-17 13:01:23 -080024#include <linux/nd.h>
Dan Williams96601ad2015-08-24 18:29:38 -040025#include <asm/cacheflush.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040026#include "nfit.h"
27
Ross Zwisler047fc8a2015-06-25 04:21:02 -040028/*
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30 * irrelevant.
31 */
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020032#include <linux/io-64-nonatomic-hi-lo.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040033
Dan Williams4d88a972015-05-31 14:41:48 -040034static bool force_enable_dimms;
35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
37
Dan Williams1cf03c02016-02-17 13:01:23 -080038static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42/* after three payloads of overflow, it's dead jim */
43static unsigned int scrub_overflow_abort = 3;
44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
47
Dan Williams7ae0fa432016-02-19 12:16:34 -080048static struct workqueue_struct *nfit_wq;
49
Vishal Verma20985162015-10-27 16:58:27 -060050struct nfit_table_prev {
51 struct list_head spas;
52 struct list_head memdevs;
53 struct list_head dcrs;
54 struct list_head bdws;
55 struct list_head idts;
56 struct list_head flushes;
57};
58
Dan Williamsb94d5232015-05-19 22:54:31 -040059static u8 nfit_uuid[NFIT_UUID_MAX][16];
60
Dan Williams6bc75612015-06-17 17:23:32 -040061const u8 *to_nfit_uuid(enum nfit_uuids id)
Dan Williamsb94d5232015-05-19 22:54:31 -040062{
63 return nfit_uuid[id];
64}
Dan Williams6bc75612015-06-17 17:23:32 -040065EXPORT_SYMBOL(to_nfit_uuid);
Dan Williamsb94d5232015-05-19 22:54:31 -040066
Dan Williams62232e452015-06-08 14:27:06 -040067static struct acpi_nfit_desc *to_acpi_nfit_desc(
68 struct nvdimm_bus_descriptor *nd_desc)
69{
70 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
71}
72
73static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
74{
75 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
76
77 /*
78 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
79 * acpi_device.
80 */
81 if (!nd_desc->provider_name
82 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
83 return NULL;
84
85 return to_acpi_device(acpi_desc->dev);
86}
87
Dan Williamsaef25332016-02-12 17:01:11 -080088static int xlat_status(void *buf, unsigned int cmd)
89{
90 struct nd_cmd_ars_status *ars_status;
91 struct nd_cmd_ars_start *ars_start;
92 struct nd_cmd_ars_cap *ars_cap;
93 u16 flags;
94
95 switch (cmd) {
96 case ND_CMD_ARS_CAP:
97 ars_cap = buf;
98 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
99 return -ENOTTY;
100
101 /* Command failed */
102 if (ars_cap->status & 0xffff)
103 return -EIO;
104
105 /* No supported scan types for this range */
106 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
107 if ((ars_cap->status >> 16 & flags) == 0)
108 return -ENOTTY;
109 break;
110 case ND_CMD_ARS_START:
111 ars_start = buf;
112 /* ARS is in progress */
113 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
114 return -EBUSY;
115
116 /* Command failed */
117 if (ars_start->status & 0xffff)
118 return -EIO;
119 break;
120 case ND_CMD_ARS_STATUS:
121 ars_status = buf;
122 /* Command failed */
123 if (ars_status->status & 0xffff)
124 return -EIO;
125 /* Check extended status (Upper two bytes) */
126 if (ars_status->status == NFIT_ARS_STATUS_DONE)
127 return 0;
128
129 /* ARS is in progress */
130 if (ars_status->status == NFIT_ARS_STATUS_BUSY)
131 return -EBUSY;
132
133 /* No ARS performed for the current boot */
134 if (ars_status->status == NFIT_ARS_STATUS_NONE)
135 return -EAGAIN;
136
137 /*
138 * ARS interrupted, either we overflowed or some other
139 * agent wants the scan to stop. If we didn't overflow
140 * then just continue with the returned results.
141 */
142 if (ars_status->status == NFIT_ARS_STATUS_INTR) {
143 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
144 return -ENOSPC;
145 return 0;
146 }
147
148 /* Unknown status */
149 if (ars_status->status >> 16)
150 return -EIO;
151 break;
152 default:
153 break;
154 }
155
156 return 0;
157}
158
Dan Williamsb94d5232015-05-19 22:54:31 -0400159static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
160 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
Dan Williamsaef25332016-02-12 17:01:11 -0800161 unsigned int buf_len, int *cmd_rc)
Dan Williamsb94d5232015-05-19 22:54:31 -0400162{
Dan Williams62232e452015-06-08 14:27:06 -0400163 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
164 const struct nd_cmd_desc *desc = NULL;
165 union acpi_object in_obj, in_buf, *out_obj;
166 struct device *dev = acpi_desc->dev;
167 const char *cmd_name, *dimm_name;
168 unsigned long dsm_mask;
169 acpi_handle handle;
170 const u8 *uuid;
171 u32 offset;
172 int rc, i;
173
174 if (nvdimm) {
175 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
176 struct acpi_device *adev = nfit_mem->adev;
177
178 if (!adev)
179 return -ENOTTY;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400180 dimm_name = nvdimm_name(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400181 cmd_name = nvdimm_cmd_name(cmd);
182 dsm_mask = nfit_mem->dsm_mask;
183 desc = nd_cmd_dimm_desc(cmd);
184 uuid = to_nfit_uuid(NFIT_DEV_DIMM);
185 handle = adev->handle;
186 } else {
187 struct acpi_device *adev = to_acpi_dev(acpi_desc);
188
189 cmd_name = nvdimm_bus_cmd_name(cmd);
190 dsm_mask = nd_desc->dsm_mask;
191 desc = nd_cmd_bus_desc(cmd);
192 uuid = to_nfit_uuid(NFIT_DEV_BUS);
193 handle = adev->handle;
194 dimm_name = "bus";
195 }
196
197 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
198 return -ENOTTY;
199
200 if (!test_bit(cmd, &dsm_mask))
201 return -ENOTTY;
202
203 in_obj.type = ACPI_TYPE_PACKAGE;
204 in_obj.package.count = 1;
205 in_obj.package.elements = &in_buf;
206 in_buf.type = ACPI_TYPE_BUFFER;
207 in_buf.buffer.pointer = buf;
208 in_buf.buffer.length = 0;
209
210 /* libnvdimm has already validated the input envelope */
211 for (i = 0; i < desc->in_num; i++)
212 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
213 i, buf);
214
215 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
216 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
217 dimm_name, cmd_name, in_buf.buffer.length);
218 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
219 4, in_buf.buffer.pointer, min_t(u32, 128,
220 in_buf.buffer.length), true);
221 }
222
223 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
224 if (!out_obj) {
225 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
226 cmd_name);
227 return -EINVAL;
228 }
229
230 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
231 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
232 __func__, dimm_name, cmd_name, out_obj->type);
233 rc = -EINVAL;
234 goto out;
235 }
236
237 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
238 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
239 dimm_name, cmd_name, out_obj->buffer.length);
240 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
241 4, out_obj->buffer.pointer, min_t(u32, 128,
242 out_obj->buffer.length), true);
243 }
244
245 for (i = 0, offset = 0; i < desc->out_num; i++) {
246 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
247 (u32 *) out_obj->buffer.pointer);
248
249 if (offset + out_size > out_obj->buffer.length) {
250 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
251 __func__, dimm_name, cmd_name, i);
252 break;
253 }
254
255 if (in_buf.buffer.length + offset + out_size > buf_len) {
256 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
257 __func__, dimm_name, cmd_name, i);
258 rc = -ENXIO;
259 goto out;
260 }
261 memcpy(buf + in_buf.buffer.length + offset,
262 out_obj->buffer.pointer + offset, out_size);
263 offset += out_size;
264 }
265 if (offset + in_buf.buffer.length < buf_len) {
266 if (i >= 1) {
267 /*
268 * status valid, return the number of bytes left
269 * unfilled in the output buffer
270 */
271 rc = buf_len - offset - in_buf.buffer.length;
Dan Williamsaef25332016-02-12 17:01:11 -0800272 if (cmd_rc)
273 *cmd_rc = xlat_status(buf, cmd);
Dan Williams62232e452015-06-08 14:27:06 -0400274 } else {
275 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
276 __func__, dimm_name, cmd_name, buf_len,
277 offset);
278 rc = -ENXIO;
279 }
280 } else
281 rc = 0;
282
283 out:
284 ACPI_FREE(out_obj);
285
286 return rc;
Dan Williamsb94d5232015-05-19 22:54:31 -0400287}
288
289static const char *spa_type_name(u16 type)
290{
291 static const char *to_name[] = {
292 [NFIT_SPA_VOLATILE] = "volatile",
293 [NFIT_SPA_PM] = "pmem",
294 [NFIT_SPA_DCR] = "dimm-control-region",
295 [NFIT_SPA_BDW] = "block-data-window",
296 [NFIT_SPA_VDISK] = "volatile-disk",
297 [NFIT_SPA_VCD] = "volatile-cd",
298 [NFIT_SPA_PDISK] = "persistent-disk",
299 [NFIT_SPA_PCD] = "persistent-cd",
300
301 };
302
303 if (type > NFIT_SPA_PCD)
304 return "unknown";
305
306 return to_name[type];
307}
308
309static int nfit_spa_type(struct acpi_nfit_system_address *spa)
310{
311 int i;
312
313 for (i = 0; i < NFIT_UUID_MAX; i++)
314 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
315 return i;
316 return -1;
317}
318
319static bool add_spa(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600320 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400321 struct acpi_nfit_system_address *spa)
322{
Linda Knippers826c4162015-11-20 19:05:47 -0500323 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400324 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600325 struct nfit_spa *nfit_spa;
Dan Williamsb94d5232015-05-19 22:54:31 -0400326
Vishal Verma20985162015-10-27 16:58:27 -0600327 list_for_each_entry(nfit_spa, &prev->spas, list) {
Linda Knippers826c4162015-11-20 19:05:47 -0500328 if (memcmp(nfit_spa->spa, spa, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600329 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
330 return true;
331 }
332 }
333
334 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400335 if (!nfit_spa)
336 return false;
337 INIT_LIST_HEAD(&nfit_spa->list);
338 nfit_spa->spa = spa;
339 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
340 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
341 spa->range_index,
342 spa_type_name(nfit_spa_type(spa)));
343 return true;
344}
345
346static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600347 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400348 struct acpi_nfit_memory_map *memdev)
349{
Linda Knippers826c4162015-11-20 19:05:47 -0500350 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400351 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600352 struct nfit_memdev *nfit_memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400353
Vishal Verma20985162015-10-27 16:58:27 -0600354 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500355 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600356 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
357 return true;
358 }
359
360 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400361 if (!nfit_memdev)
362 return false;
363 INIT_LIST_HEAD(&nfit_memdev->list);
364 nfit_memdev->memdev = memdev;
365 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
366 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
367 __func__, memdev->device_handle, memdev->range_index,
368 memdev->region_index);
369 return true;
370}
371
372static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600373 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400374 struct acpi_nfit_control_region *dcr)
375{
Linda Knippers826c4162015-11-20 19:05:47 -0500376 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400377 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600378 struct nfit_dcr *nfit_dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400379
Vishal Verma20985162015-10-27 16:58:27 -0600380 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500381 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600382 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
383 return true;
384 }
385
386 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400387 if (!nfit_dcr)
388 return false;
389 INIT_LIST_HEAD(&nfit_dcr->list);
390 nfit_dcr->dcr = dcr;
391 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
392 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
393 dcr->region_index, dcr->windows);
394 return true;
395}
396
397static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600398 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400399 struct acpi_nfit_data_region *bdw)
400{
Linda Knippers826c4162015-11-20 19:05:47 -0500401 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400402 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600403 struct nfit_bdw *nfit_bdw;
Dan Williamsb94d5232015-05-19 22:54:31 -0400404
Vishal Verma20985162015-10-27 16:58:27 -0600405 list_for_each_entry(nfit_bdw, &prev->bdws, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500406 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600407 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
408 return true;
409 }
410
411 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400412 if (!nfit_bdw)
413 return false;
414 INIT_LIST_HEAD(&nfit_bdw->list);
415 nfit_bdw->bdw = bdw;
416 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
417 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
418 bdw->region_index, bdw->windows);
419 return true;
420}
421
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400422static bool add_idt(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600423 struct nfit_table_prev *prev,
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400424 struct acpi_nfit_interleave *idt)
425{
Linda Knippers826c4162015-11-20 19:05:47 -0500426 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400427 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600428 struct nfit_idt *nfit_idt;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400429
Vishal Verma20985162015-10-27 16:58:27 -0600430 list_for_each_entry(nfit_idt, &prev->idts, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500431 if (memcmp(nfit_idt->idt, idt, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600432 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
433 return true;
434 }
435
436 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400437 if (!nfit_idt)
438 return false;
439 INIT_LIST_HEAD(&nfit_idt->list);
440 nfit_idt->idt = idt;
441 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
442 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
443 idt->interleave_index, idt->line_count);
444 return true;
445}
446
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600447static bool add_flush(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600448 struct nfit_table_prev *prev,
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600449 struct acpi_nfit_flush_address *flush)
450{
Linda Knippers826c4162015-11-20 19:05:47 -0500451 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600452 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600453 struct nfit_flush *nfit_flush;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600454
Vishal Verma20985162015-10-27 16:58:27 -0600455 list_for_each_entry(nfit_flush, &prev->flushes, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500456 if (memcmp(nfit_flush->flush, flush, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600457 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
458 return true;
459 }
460
461 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600462 if (!nfit_flush)
463 return false;
464 INIT_LIST_HEAD(&nfit_flush->list);
465 nfit_flush->flush = flush;
466 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
467 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
468 flush->device_handle, flush->hint_count);
469 return true;
470}
471
Vishal Verma20985162015-10-27 16:58:27 -0600472static void *add_table(struct acpi_nfit_desc *acpi_desc,
473 struct nfit_table_prev *prev, void *table, const void *end)
Dan Williamsb94d5232015-05-19 22:54:31 -0400474{
475 struct device *dev = acpi_desc->dev;
476 struct acpi_nfit_header *hdr;
477 void *err = ERR_PTR(-ENOMEM);
478
479 if (table >= end)
480 return NULL;
481
482 hdr = table;
Vishal Verma564d5012015-10-27 16:58:26 -0600483 if (!hdr->length) {
484 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
485 hdr->type);
486 return NULL;
487 }
488
Dan Williamsb94d5232015-05-19 22:54:31 -0400489 switch (hdr->type) {
490 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600491 if (!add_spa(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400492 return err;
493 break;
494 case ACPI_NFIT_TYPE_MEMORY_MAP:
Vishal Verma20985162015-10-27 16:58:27 -0600495 if (!add_memdev(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400496 return err;
497 break;
498 case ACPI_NFIT_TYPE_CONTROL_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600499 if (!add_dcr(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400500 return err;
501 break;
502 case ACPI_NFIT_TYPE_DATA_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600503 if (!add_bdw(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400504 return err;
505 break;
Dan Williamsb94d5232015-05-19 22:54:31 -0400506 case ACPI_NFIT_TYPE_INTERLEAVE:
Vishal Verma20985162015-10-27 16:58:27 -0600507 if (!add_idt(acpi_desc, prev, table))
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400508 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400509 break;
510 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600511 if (!add_flush(acpi_desc, prev, table))
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600512 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400513 break;
514 case ACPI_NFIT_TYPE_SMBIOS:
515 dev_dbg(dev, "%s: smbios\n", __func__);
516 break;
517 default:
518 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
519 break;
520 }
521
522 return table + hdr->length;
523}
524
525static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
526 struct nfit_mem *nfit_mem)
527{
528 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
529 u16 dcr = nfit_mem->dcr->region_index;
530 struct nfit_spa *nfit_spa;
531
532 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
533 u16 range_index = nfit_spa->spa->range_index;
534 int type = nfit_spa_type(nfit_spa->spa);
535 struct nfit_memdev *nfit_memdev;
536
537 if (type != NFIT_SPA_BDW)
538 continue;
539
540 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
541 if (nfit_memdev->memdev->range_index != range_index)
542 continue;
543 if (nfit_memdev->memdev->device_handle != device_handle)
544 continue;
545 if (nfit_memdev->memdev->region_index != dcr)
546 continue;
547
548 nfit_mem->spa_bdw = nfit_spa->spa;
549 return;
550 }
551 }
552
553 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
554 nfit_mem->spa_dcr->range_index);
555 nfit_mem->bdw = NULL;
556}
557
Dan Williams6697b2c2016-02-04 16:51:00 -0800558static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
Dan Williamsb94d5232015-05-19 22:54:31 -0400559 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
560{
561 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400562 struct nfit_memdev *nfit_memdev;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600563 struct nfit_flush *nfit_flush;
Dan Williamsb94d5232015-05-19 22:54:31 -0400564 struct nfit_bdw *nfit_bdw;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400565 struct nfit_idt *nfit_idt;
566 u16 idt_idx, range_index;
Dan Williamsb94d5232015-05-19 22:54:31 -0400567
Dan Williamsb94d5232015-05-19 22:54:31 -0400568 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
569 if (nfit_bdw->bdw->region_index != dcr)
570 continue;
571 nfit_mem->bdw = nfit_bdw->bdw;
572 break;
573 }
574
575 if (!nfit_mem->bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800576 return;
Dan Williamsb94d5232015-05-19 22:54:31 -0400577
578 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400579
580 if (!nfit_mem->spa_bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800581 return;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400582
583 range_index = nfit_mem->spa_bdw->range_index;
584 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
585 if (nfit_memdev->memdev->range_index != range_index ||
586 nfit_memdev->memdev->region_index != dcr)
587 continue;
588 nfit_mem->memdev_bdw = nfit_memdev->memdev;
589 idt_idx = nfit_memdev->memdev->interleave_index;
590 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
591 if (nfit_idt->idt->interleave_index != idt_idx)
592 continue;
593 nfit_mem->idt_bdw = nfit_idt->idt;
594 break;
595 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600596
597 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
598 if (nfit_flush->flush->device_handle !=
599 nfit_memdev->memdev->device_handle)
600 continue;
601 nfit_mem->nfit_flush = nfit_flush;
602 break;
603 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400604 break;
605 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400606}
607
608static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
609 struct acpi_nfit_system_address *spa)
610{
611 struct nfit_mem *nfit_mem, *found;
612 struct nfit_memdev *nfit_memdev;
613 int type = nfit_spa_type(spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400614
615 switch (type) {
616 case NFIT_SPA_DCR:
617 case NFIT_SPA_PM:
618 break;
619 default:
620 return 0;
621 }
622
623 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
Dan Williams6697b2c2016-02-04 16:51:00 -0800624 struct nfit_dcr *nfit_dcr;
625 u32 device_handle;
626 u16 dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400627
628 if (nfit_memdev->memdev->range_index != spa->range_index)
629 continue;
630 found = NULL;
631 dcr = nfit_memdev->memdev->region_index;
Dan Williams6697b2c2016-02-04 16:51:00 -0800632 device_handle = nfit_memdev->memdev->device_handle;
Dan Williamsb94d5232015-05-19 22:54:31 -0400633 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
Dan Williams6697b2c2016-02-04 16:51:00 -0800634 if (__to_nfit_memdev(nfit_mem)->device_handle
635 == device_handle) {
Dan Williamsb94d5232015-05-19 22:54:31 -0400636 found = nfit_mem;
637 break;
638 }
639
640 if (found)
641 nfit_mem = found;
642 else {
643 nfit_mem = devm_kzalloc(acpi_desc->dev,
644 sizeof(*nfit_mem), GFP_KERNEL);
645 if (!nfit_mem)
646 return -ENOMEM;
647 INIT_LIST_HEAD(&nfit_mem->list);
Dan Williams6697b2c2016-02-04 16:51:00 -0800648 list_add(&nfit_mem->list, &acpi_desc->dimms);
649 }
650
651 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
652 if (nfit_dcr->dcr->region_index != dcr)
653 continue;
654 /*
655 * Record the control region for the dimm. For
656 * the ACPI 6.1 case, where there are separate
657 * control regions for the pmem vs blk
658 * interfaces, be sure to record the extended
659 * blk details.
660 */
661 if (!nfit_mem->dcr)
662 nfit_mem->dcr = nfit_dcr->dcr;
663 else if (nfit_mem->dcr->windows == 0
664 && nfit_dcr->dcr->windows)
665 nfit_mem->dcr = nfit_dcr->dcr;
666 break;
667 }
668
669 if (dcr && !nfit_mem->dcr) {
670 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
671 spa->range_index, dcr);
672 return -ENODEV;
Dan Williamsb94d5232015-05-19 22:54:31 -0400673 }
674
675 if (type == NFIT_SPA_DCR) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400676 struct nfit_idt *nfit_idt;
677 u16 idt_idx;
678
Dan Williamsb94d5232015-05-19 22:54:31 -0400679 /* multiple dimms may share a SPA when interleaved */
680 nfit_mem->spa_dcr = spa;
681 nfit_mem->memdev_dcr = nfit_memdev->memdev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400682 idt_idx = nfit_memdev->memdev->interleave_index;
683 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
684 if (nfit_idt->idt->interleave_index != idt_idx)
685 continue;
686 nfit_mem->idt_dcr = nfit_idt->idt;
687 break;
688 }
Dan Williams6697b2c2016-02-04 16:51:00 -0800689 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400690 } else {
691 /*
692 * A single dimm may belong to multiple SPA-PM
693 * ranges, record at least one in addition to
694 * any SPA-DCR range.
695 */
696 nfit_mem->memdev_pmem = nfit_memdev->memdev;
697 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400698 }
699
700 return 0;
701}
702
703static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
704{
705 struct nfit_mem *a = container_of(_a, typeof(*a), list);
706 struct nfit_mem *b = container_of(_b, typeof(*b), list);
707 u32 handleA, handleB;
708
709 handleA = __to_nfit_memdev(a)->device_handle;
710 handleB = __to_nfit_memdev(b)->device_handle;
711 if (handleA < handleB)
712 return -1;
713 else if (handleA > handleB)
714 return 1;
715 return 0;
716}
717
718static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
719{
720 struct nfit_spa *nfit_spa;
721
722 /*
723 * For each SPA-DCR or SPA-PMEM address range find its
724 * corresponding MEMDEV(s). From each MEMDEV find the
725 * corresponding DCR. Then, if we're operating on a SPA-DCR,
726 * try to find a SPA-BDW and a corresponding BDW that references
727 * the DCR. Throw it all into an nfit_mem object. Note, that
728 * BDWs are optional.
729 */
730 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
731 int rc;
732
733 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
734 if (rc)
735 return rc;
736 }
737
738 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
739
740 return 0;
741}
742
Dan Williams45def222015-04-26 19:26:48 -0400743static ssize_t revision_show(struct device *dev,
744 struct device_attribute *attr, char *buf)
745{
746 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
747 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
748 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
749
Linda Knippers6b577c92015-11-20 19:05:49 -0500750 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
Dan Williams45def222015-04-26 19:26:48 -0400751}
752static DEVICE_ATTR_RO(revision);
753
754static struct attribute *acpi_nfit_attributes[] = {
755 &dev_attr_revision.attr,
756 NULL,
757};
758
759static struct attribute_group acpi_nfit_attribute_group = {
760 .name = "nfit",
761 .attrs = acpi_nfit_attributes,
762};
763
Dan Williamsa61fe6f2016-02-19 12:29:32 -0800764static const struct attribute_group *acpi_nfit_attribute_groups[] = {
Dan Williams45def222015-04-26 19:26:48 -0400765 &nvdimm_bus_attribute_group,
766 &acpi_nfit_attribute_group,
767 NULL,
768};
769
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400770static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
771{
772 struct nvdimm *nvdimm = to_nvdimm(dev);
773 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
774
775 return __to_nfit_memdev(nfit_mem);
776}
777
778static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
779{
780 struct nvdimm *nvdimm = to_nvdimm(dev);
781 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
782
783 return nfit_mem->dcr;
784}
785
786static ssize_t handle_show(struct device *dev,
787 struct device_attribute *attr, char *buf)
788{
789 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
790
791 return sprintf(buf, "%#x\n", memdev->device_handle);
792}
793static DEVICE_ATTR_RO(handle);
794
795static ssize_t phys_id_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
797{
798 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
799
800 return sprintf(buf, "%#x\n", memdev->physical_id);
801}
802static DEVICE_ATTR_RO(phys_id);
803
804static ssize_t vendor_show(struct device *dev,
805 struct device_attribute *attr, char *buf)
806{
807 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
808
809 return sprintf(buf, "%#x\n", dcr->vendor_id);
810}
811static DEVICE_ATTR_RO(vendor);
812
813static ssize_t rev_id_show(struct device *dev,
814 struct device_attribute *attr, char *buf)
815{
816 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
817
818 return sprintf(buf, "%#x\n", dcr->revision_id);
819}
820static DEVICE_ATTR_RO(rev_id);
821
822static ssize_t device_show(struct device *dev,
823 struct device_attribute *attr, char *buf)
824{
825 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
826
827 return sprintf(buf, "%#x\n", dcr->device_id);
828}
829static DEVICE_ATTR_RO(device);
830
831static ssize_t format_show(struct device *dev,
832 struct device_attribute *attr, char *buf)
833{
834 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
835
836 return sprintf(buf, "%#x\n", dcr->code);
837}
838static DEVICE_ATTR_RO(format);
839
840static ssize_t serial_show(struct device *dev,
841 struct device_attribute *attr, char *buf)
842{
843 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
844
845 return sprintf(buf, "%#x\n", dcr->serial_number);
846}
847static DEVICE_ATTR_RO(serial);
848
Dan Williams58138822015-06-23 20:08:34 -0400849static ssize_t flags_show(struct device *dev,
850 struct device_attribute *attr, char *buf)
851{
852 u16 flags = to_nfit_memdev(dev)->flags;
853
854 return sprintf(buf, "%s%s%s%s%s\n",
Toshi Kani402bae52015-08-26 10:20:23 -0600855 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
856 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
857 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
Bob Mooreca321d12015-10-19 10:24:52 +0800858 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
Toshi Kani402bae52015-08-26 10:20:23 -0600859 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
Dan Williams58138822015-06-23 20:08:34 -0400860}
861static DEVICE_ATTR_RO(flags);
862
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400863static struct attribute *acpi_nfit_dimm_attributes[] = {
864 &dev_attr_handle.attr,
865 &dev_attr_phys_id.attr,
866 &dev_attr_vendor.attr,
867 &dev_attr_device.attr,
868 &dev_attr_format.attr,
869 &dev_attr_serial.attr,
870 &dev_attr_rev_id.attr,
Dan Williams58138822015-06-23 20:08:34 -0400871 &dev_attr_flags.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400872 NULL,
873};
874
875static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
876 struct attribute *a, int n)
877{
878 struct device *dev = container_of(kobj, struct device, kobj);
879
880 if (to_nfit_dcr(dev))
881 return a->mode;
882 else
883 return 0;
884}
885
886static struct attribute_group acpi_nfit_dimm_attribute_group = {
887 .name = "nfit",
888 .attrs = acpi_nfit_dimm_attributes,
889 .is_visible = acpi_nfit_dimm_attr_visible,
890};
891
892static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
Dan Williams62232e452015-06-08 14:27:06 -0400893 &nvdimm_attribute_group,
Dan Williams4d88a972015-05-31 14:41:48 -0400894 &nd_device_attribute_group,
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400895 &acpi_nfit_dimm_attribute_group,
896 NULL,
897};
898
899static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
900 u32 device_handle)
901{
902 struct nfit_mem *nfit_mem;
903
904 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
905 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
906 return nfit_mem->nvdimm;
907
908 return NULL;
909}
910
Dan Williams62232e452015-06-08 14:27:06 -0400911static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
912 struct nfit_mem *nfit_mem, u32 device_handle)
913{
914 struct acpi_device *adev, *adev_dimm;
915 struct device *dev = acpi_desc->dev;
916 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
Linda Knippers60e95f42015-07-22 16:17:22 -0400917 int i;
Dan Williams62232e452015-06-08 14:27:06 -0400918
919 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
920 adev = to_acpi_dev(acpi_desc);
921 if (!adev)
922 return 0;
923
924 adev_dimm = acpi_find_child_device(adev, device_handle, false);
925 nfit_mem->adev = adev_dimm;
926 if (!adev_dimm) {
927 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
928 device_handle);
Dan Williams4d88a972015-05-31 14:41:48 -0400929 return force_enable_dimms ? 0 : -ENODEV;
Dan Williams62232e452015-06-08 14:27:06 -0400930 }
931
Dan Williams62232e452015-06-08 14:27:06 -0400932 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
933 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
934 set_bit(i, &nfit_mem->dsm_mask);
935
Linda Knippers60e95f42015-07-22 16:17:22 -0400936 return 0;
Dan Williams62232e452015-06-08 14:27:06 -0400937}
938
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400939static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
940{
941 struct nfit_mem *nfit_mem;
Dan Williams4d88a972015-05-31 14:41:48 -0400942 int dimm_count = 0;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400943
944 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
945 struct nvdimm *nvdimm;
946 unsigned long flags = 0;
947 u32 device_handle;
Dan Williams58138822015-06-23 20:08:34 -0400948 u16 mem_flags;
Dan Williams62232e452015-06-08 14:27:06 -0400949 int rc;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400950
951 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
952 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
953 if (nvdimm) {
Vishal Verma20985162015-10-27 16:58:27 -0600954 dimm_count++;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400955 continue;
956 }
957
958 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
959 flags |= NDD_ALIASING;
960
Dan Williams58138822015-06-23 20:08:34 -0400961 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
Bob Mooreca321d12015-10-19 10:24:52 +0800962 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
Dan Williams58138822015-06-23 20:08:34 -0400963 flags |= NDD_UNARMED;
964
Dan Williams62232e452015-06-08 14:27:06 -0400965 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
966 if (rc)
967 continue;
968
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400969 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
Dan Williams62232e452015-06-08 14:27:06 -0400970 acpi_nfit_dimm_attribute_groups,
971 flags, &nfit_mem->dsm_mask);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400972 if (!nvdimm)
973 return -ENOMEM;
974
975 nfit_mem->nvdimm = nvdimm;
Dan Williams4d88a972015-05-31 14:41:48 -0400976 dimm_count++;
Dan Williams58138822015-06-23 20:08:34 -0400977
978 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
979 continue;
980
Toshi Kani402bae52015-08-26 10:20:23 -0600981 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
Dan Williams58138822015-06-23 20:08:34 -0400982 nvdimm_name(nvdimm),
Toshi Kani402bae52015-08-26 10:20:23 -0600983 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
984 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
985 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
Bob Mooreca321d12015-10-19 10:24:52 +0800986 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
Dan Williams58138822015-06-23 20:08:34 -0400987
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400988 }
989
Dan Williams4d88a972015-05-31 14:41:48 -0400990 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400991}
992
Dan Williams62232e452015-06-08 14:27:06 -0400993static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
994{
995 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
996 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
997 struct acpi_device *adev;
998 int i;
999
Vishal Verma39c686b2015-07-09 13:25:36 -06001000 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
Dan Williams62232e452015-06-08 14:27:06 -04001001 adev = to_acpi_dev(acpi_desc);
1002 if (!adev)
1003 return;
1004
1005 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
1006 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
1007 set_bit(i, &nd_desc->dsm_mask);
1008}
1009
Dan Williams1f7df6f2015-06-09 20:13:14 -04001010static ssize_t range_index_show(struct device *dev,
1011 struct device_attribute *attr, char *buf)
1012{
1013 struct nd_region *nd_region = to_nd_region(dev);
1014 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1015
1016 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1017}
1018static DEVICE_ATTR_RO(range_index);
1019
1020static struct attribute *acpi_nfit_region_attributes[] = {
1021 &dev_attr_range_index.attr,
1022 NULL,
1023};
1024
1025static struct attribute_group acpi_nfit_region_attribute_group = {
1026 .name = "nfit",
1027 .attrs = acpi_nfit_region_attributes,
1028};
1029
1030static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1031 &nd_region_attribute_group,
1032 &nd_mapping_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001033 &nd_device_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001034 &nd_numa_attribute_group,
Dan Williams1f7df6f2015-06-09 20:13:14 -04001035 &acpi_nfit_region_attribute_group,
1036 NULL,
1037};
1038
Dan Williamseaf96152015-05-01 13:11:27 -04001039/* enough info to uniquely specify an interleave set */
1040struct nfit_set_info {
1041 struct nfit_set_info_map {
1042 u64 region_offset;
1043 u32 serial_number;
1044 u32 pad;
1045 } mapping[0];
1046};
1047
1048static size_t sizeof_nfit_set_info(int num_mappings)
1049{
1050 return sizeof(struct nfit_set_info)
1051 + num_mappings * sizeof(struct nfit_set_info_map);
1052}
1053
1054static int cmp_map(const void *m0, const void *m1)
1055{
1056 const struct nfit_set_info_map *map0 = m0;
1057 const struct nfit_set_info_map *map1 = m1;
1058
1059 return memcmp(&map0->region_offset, &map1->region_offset,
1060 sizeof(u64));
1061}
1062
1063/* Retrieve the nth entry referencing this spa */
1064static struct acpi_nfit_memory_map *memdev_from_spa(
1065 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1066{
1067 struct nfit_memdev *nfit_memdev;
1068
1069 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1070 if (nfit_memdev->memdev->range_index == range_index)
1071 if (n-- == 0)
1072 return nfit_memdev->memdev;
1073 return NULL;
1074}
1075
1076static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1077 struct nd_region_desc *ndr_desc,
1078 struct acpi_nfit_system_address *spa)
1079{
1080 int i, spa_type = nfit_spa_type(spa);
1081 struct device *dev = acpi_desc->dev;
1082 struct nd_interleave_set *nd_set;
1083 u16 nr = ndr_desc->num_mappings;
1084 struct nfit_set_info *info;
1085
1086 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1087 /* pass */;
1088 else
1089 return 0;
1090
1091 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1092 if (!nd_set)
1093 return -ENOMEM;
1094
1095 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1096 if (!info)
1097 return -ENOMEM;
1098 for (i = 0; i < nr; i++) {
1099 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1100 struct nfit_set_info_map *map = &info->mapping[i];
1101 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1102 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1103 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1104 spa->range_index, i);
1105
1106 if (!memdev || !nfit_mem->dcr) {
1107 dev_err(dev, "%s: failed to find DCR\n", __func__);
1108 return -ENODEV;
1109 }
1110
1111 map->region_offset = memdev->region_offset;
1112 map->serial_number = nfit_mem->dcr->serial_number;
1113 }
1114
1115 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1116 cmp_map, NULL);
1117 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1118 ndr_desc->nd_set = nd_set;
1119 devm_kfree(dev, info);
1120
1121 return 0;
1122}
1123
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001124static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1125{
1126 struct acpi_nfit_interleave *idt = mmio->idt;
1127 u32 sub_line_offset, line_index, line_offset;
1128 u64 line_no, table_skip_count, table_offset;
1129
1130 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1131 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1132 line_offset = idt->line_offset[line_index]
1133 * mmio->line_size;
1134 table_offset = table_skip_count * mmio->table_size;
1135
1136 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1137}
1138
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001139static void wmb_blk(struct nfit_blk *nfit_blk)
1140{
1141
1142 if (nfit_blk->nvdimm_flush) {
1143 /*
1144 * The first wmb() is needed to 'sfence' all previous writes
1145 * such that they are architecturally visible for the platform
1146 * buffer flush. Note that we've already arranged for pmem
1147 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1148 * final wmb() ensures ordering for the NVDIMM flush write.
1149 */
1150 wmb();
1151 writeq(1, nfit_blk->nvdimm_flush);
1152 wmb();
1153 } else
1154 wmb_pmem();
1155}
1156
Ross Zwislerde4a1962015-08-20 16:27:38 -06001157static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001158{
1159 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1160 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1161
1162 if (mmio->num_lines)
1163 offset = to_interleave_offset(offset, mmio);
1164
Linus Torvalds12f03ee2015-09-08 14:35:59 -07001165 return readl(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001166}
1167
1168static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1169 resource_size_t dpa, unsigned int len, unsigned int write)
1170{
1171 u64 cmd, offset;
1172 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1173
1174 enum {
1175 BCW_OFFSET_MASK = (1ULL << 48)-1,
1176 BCW_LEN_SHIFT = 48,
1177 BCW_LEN_MASK = (1ULL << 8) - 1,
1178 BCW_CMD_SHIFT = 56,
1179 };
1180
1181 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1182 len = len >> L1_CACHE_SHIFT;
1183 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1184 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1185
1186 offset = nfit_blk->cmd_offset + mmio->size * bw;
1187 if (mmio->num_lines)
1188 offset = to_interleave_offset(offset, mmio);
1189
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001190 writeq(cmd, mmio->addr.base + offset);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001191 wmb_blk(nfit_blk);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001192
Dan Williamsaef25332016-02-12 17:01:11 -08001193 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001194 readq(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001195}
1196
1197static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1198 resource_size_t dpa, void *iobuf, size_t len, int rw,
1199 unsigned int lane)
1200{
1201 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1202 unsigned int copied = 0;
1203 u64 base_offset;
1204 int rc;
1205
1206 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1207 + lane * mmio->size;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001208 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1209 while (len) {
1210 unsigned int c;
1211 u64 offset;
1212
1213 if (mmio->num_lines) {
1214 u32 line_offset;
1215
1216 offset = to_interleave_offset(base_offset + copied,
1217 mmio);
1218 div_u64_rem(offset, mmio->line_size, &line_offset);
1219 c = min_t(size_t, len, mmio->line_size - line_offset);
1220 } else {
1221 offset = base_offset + nfit_blk->bdw_offset;
1222 c = len;
1223 }
1224
1225 if (rw)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001226 memcpy_to_pmem(mmio->addr.aperture + offset,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001227 iobuf + copied, c);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001228 else {
Dan Williamsaef25332016-02-12 17:01:11 -08001229 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001230 mmio_flush_range((void __force *)
1231 mmio->addr.aperture + offset, c);
1232
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001233 memcpy_from_pmem(iobuf + copied,
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001234 mmio->addr.aperture + offset, c);
1235 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001236
1237 copied += c;
1238 len -= c;
1239 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001240
1241 if (rw)
1242 wmb_blk(nfit_blk);
1243
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001244 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1245 return rc;
1246}
1247
1248static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1249 resource_size_t dpa, void *iobuf, u64 len, int rw)
1250{
1251 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1252 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1253 struct nd_region *nd_region = nfit_blk->nd_region;
1254 unsigned int lane, copied = 0;
1255 int rc = 0;
1256
1257 lane = nd_region_acquire_lane(nd_region);
1258 while (len) {
1259 u64 c = min(len, mmio->size);
1260
1261 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1262 iobuf + copied, c, rw, lane);
1263 if (rc)
1264 break;
1265
1266 copied += c;
1267 len -= c;
1268 }
1269 nd_region_release_lane(nd_region, lane);
1270
1271 return rc;
1272}
1273
1274static void nfit_spa_mapping_release(struct kref *kref)
1275{
1276 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1277 struct acpi_nfit_system_address *spa = spa_map->spa;
1278 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1279
1280 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1281 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001282 if (spa_map->type == SPA_MAP_APERTURE)
1283 memunmap((void __force *)spa_map->addr.aperture);
1284 else
1285 iounmap(spa_map->addr.base);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001286 release_mem_region(spa->address, spa->length);
1287 list_del(&spa_map->list);
1288 kfree(spa_map);
1289}
1290
1291static struct nfit_spa_mapping *find_spa_mapping(
1292 struct acpi_nfit_desc *acpi_desc,
1293 struct acpi_nfit_system_address *spa)
1294{
1295 struct nfit_spa_mapping *spa_map;
1296
1297 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1298 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1299 if (spa_map->spa == spa)
1300 return spa_map;
1301
1302 return NULL;
1303}
1304
1305static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1306 struct acpi_nfit_system_address *spa)
1307{
1308 struct nfit_spa_mapping *spa_map;
1309
1310 mutex_lock(&acpi_desc->spa_map_mutex);
1311 spa_map = find_spa_mapping(acpi_desc, spa);
1312
1313 if (spa_map)
1314 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1315 mutex_unlock(&acpi_desc->spa_map_mutex);
1316}
1317
1318static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001319 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001320{
1321 resource_size_t start = spa->address;
1322 resource_size_t n = spa->length;
1323 struct nfit_spa_mapping *spa_map;
1324 struct resource *res;
1325
1326 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1327
1328 spa_map = find_spa_mapping(acpi_desc, spa);
1329 if (spa_map) {
1330 kref_get(&spa_map->kref);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001331 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001332 }
1333
1334 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1335 if (!spa_map)
1336 return NULL;
1337
1338 INIT_LIST_HEAD(&spa_map->list);
1339 spa_map->spa = spa;
1340 kref_init(&spa_map->kref);
1341 spa_map->acpi_desc = acpi_desc;
1342
1343 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1344 if (!res)
1345 goto err_mem;
1346
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001347 spa_map->type = type;
1348 if (type == SPA_MAP_APERTURE)
1349 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1350 ARCH_MEMREMAP_PMEM);
1351 else
1352 spa_map->addr.base = ioremap_nocache(start, n);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001353
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001354
1355 if (!spa_map->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001356 goto err_map;
1357
1358 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001359 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001360
1361 err_map:
1362 release_mem_region(start, n);
1363 err_mem:
1364 kfree(spa_map);
1365 return NULL;
1366}
1367
1368/**
1369 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1370 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1371 * @nfit_spa: spa table to map
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001372 * @type: aperture or control region
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001373 *
1374 * In the case where block-data-window apertures and
1375 * dimm-control-regions are interleaved they will end up sharing a
1376 * single request_mem_region() + ioremap() for the address range. In
1377 * the style of devm nfit_spa_map() mappings are automatically dropped
1378 * when all region devices referencing the same mapping are disabled /
1379 * unbound.
1380 */
1381static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001382 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001383{
1384 void __iomem *iomem;
1385
1386 mutex_lock(&acpi_desc->spa_map_mutex);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001387 iomem = __nfit_spa_map(acpi_desc, spa, type);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001388 mutex_unlock(&acpi_desc->spa_map_mutex);
1389
1390 return iomem;
1391}
1392
1393static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1394 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1395{
1396 if (idt) {
1397 mmio->num_lines = idt->line_count;
1398 mmio->line_size = idt->line_size;
1399 if (interleave_ways == 0)
1400 return -ENXIO;
1401 mmio->table_size = mmio->num_lines * interleave_ways
1402 * mmio->line_size;
1403 }
1404
1405 return 0;
1406}
1407
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001408static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1409 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1410{
1411 struct nd_cmd_dimm_flags flags;
1412 int rc;
1413
1414 memset(&flags, 0, sizeof(flags));
1415 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
Dan Williamsaef25332016-02-12 17:01:11 -08001416 sizeof(flags), NULL);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001417
1418 if (rc >= 0 && flags.status == 0)
1419 nfit_blk->dimm_flags = flags.flags;
1420 else if (rc == -ENOTTY) {
1421 /* fall back to a conservative default */
Dan Williamsaef25332016-02-12 17:01:11 -08001422 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001423 rc = 0;
1424 } else
1425 rc = -ENXIO;
1426
1427 return rc;
1428}
1429
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001430static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1431 struct device *dev)
1432{
1433 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1434 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1435 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001436 struct nfit_flush *nfit_flush;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001437 struct nfit_blk_mmio *mmio;
1438 struct nfit_blk *nfit_blk;
1439 struct nfit_mem *nfit_mem;
1440 struct nvdimm *nvdimm;
1441 int rc;
1442
1443 nvdimm = nd_blk_region_to_dimm(ndbr);
1444 nfit_mem = nvdimm_provider_data(nvdimm);
1445 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1446 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1447 nfit_mem ? "" : " nfit_mem",
Dan Williams193ccca2015-06-30 16:09:39 -04001448 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1449 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001450 return -ENXIO;
1451 }
1452
1453 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1454 if (!nfit_blk)
1455 return -ENOMEM;
1456 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1457 nfit_blk->nd_region = to_nd_region(dev);
1458
1459 /* map block aperture memory */
1460 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1461 mmio = &nfit_blk->mmio[BDW];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001462 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001463 SPA_MAP_APERTURE);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001464 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001465 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1466 nvdimm_name(nvdimm));
1467 return -ENOMEM;
1468 }
1469 mmio->size = nfit_mem->bdw->size;
1470 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1471 mmio->idt = nfit_mem->idt_bdw;
1472 mmio->spa = nfit_mem->spa_bdw;
1473 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1474 nfit_mem->memdev_bdw->interleave_ways);
1475 if (rc) {
1476 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1477 __func__, nvdimm_name(nvdimm));
1478 return rc;
1479 }
1480
1481 /* map block control memory */
1482 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1483 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1484 mmio = &nfit_blk->mmio[DCR];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001485 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001486 SPA_MAP_CONTROL);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001487 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001488 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1489 nvdimm_name(nvdimm));
1490 return -ENOMEM;
1491 }
1492 mmio->size = nfit_mem->dcr->window_size;
1493 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1494 mmio->idt = nfit_mem->idt_dcr;
1495 mmio->spa = nfit_mem->spa_dcr;
1496 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1497 nfit_mem->memdev_dcr->interleave_ways);
1498 if (rc) {
1499 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1500 __func__, nvdimm_name(nvdimm));
1501 return rc;
1502 }
1503
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001504 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1505 if (rc < 0) {
1506 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1507 __func__, nvdimm_name(nvdimm));
1508 return rc;
1509 }
1510
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001511 nfit_flush = nfit_mem->nfit_flush;
1512 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1513 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1514 nfit_flush->flush->hint_address[0], 8);
1515 if (!nfit_blk->nvdimm_flush)
1516 return -ENOMEM;
1517 }
1518
Dan Williams96601ad2015-08-24 18:29:38 -04001519 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001520 dev_warn(dev, "unable to guarantee persistence of writes\n");
1521
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001522 if (mmio->line_size == 0)
1523 return 0;
1524
1525 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1526 + 8 > mmio->line_size) {
1527 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1528 return -ENXIO;
1529 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1530 + 8 > mmio->line_size) {
1531 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1532 return -ENXIO;
1533 }
1534
1535 return 0;
1536}
1537
1538static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1539 struct device *dev)
1540{
1541 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1542 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1543 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1544 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1545 int i;
1546
1547 if (!nfit_blk)
1548 return; /* never enabled */
1549
1550 /* auto-free BLK spa mappings */
1551 for (i = 0; i < 2; i++) {
1552 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1553
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001554 if (mmio->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001555 nfit_spa_unmap(acpi_desc, mmio->spa);
1556 }
1557 nd_blk_region_set_provider_data(ndbr, NULL);
1558 /* devm will free nfit_blk */
1559}
1560
Dan Williamsaef25332016-02-12 17:01:11 -08001561static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08001562 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001563{
Dan Williamsaef25332016-02-12 17:01:11 -08001564 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001565 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Dan Williamsaef25332016-02-12 17:01:11 -08001566 int cmd_rc, rc;
1567
Dan Williams1cf03c02016-02-17 13:01:23 -08001568 cmd->address = spa->address;
1569 cmd->length = spa->length;
Dan Williamsaef25332016-02-12 17:01:11 -08001570 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1571 sizeof(*cmd), &cmd_rc);
1572 if (rc < 0)
1573 return rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001574 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001575}
1576
Dan Williams1cf03c02016-02-17 13:01:23 -08001577static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001578{
1579 int rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001580 int cmd_rc;
1581 struct nd_cmd_ars_start ars_start;
1582 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1583 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001584
Dan Williams1cf03c02016-02-17 13:01:23 -08001585 memset(&ars_start, 0, sizeof(ars_start));
1586 ars_start.address = spa->address;
1587 ars_start.length = spa->length;
1588 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1589 ars_start.type = ND_ARS_PERSISTENT;
1590 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1591 ars_start.type = ND_ARS_VOLATILE;
1592 else
1593 return -ENOTTY;
Vishal Verma0caeef62015-12-24 19:21:43 -07001594
Dan Williams1cf03c02016-02-17 13:01:23 -08001595 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1596 sizeof(ars_start), &cmd_rc);
Dan Williamsaef25332016-02-12 17:01:11 -08001597
Dan Williams1cf03c02016-02-17 13:01:23 -08001598 if (rc < 0)
1599 return rc;
1600 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001601}
1602
Dan Williams1cf03c02016-02-17 13:01:23 -08001603static int ars_continue(struct acpi_nfit_desc *acpi_desc)
Vishal Verma0caeef62015-12-24 19:21:43 -07001604{
Dan Williamsaef25332016-02-12 17:01:11 -08001605 int rc, cmd_rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001606 struct nd_cmd_ars_start ars_start;
1607 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1608 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
Vishal Verma0caeef62015-12-24 19:21:43 -07001609
Dan Williams1cf03c02016-02-17 13:01:23 -08001610 memset(&ars_start, 0, sizeof(ars_start));
1611 ars_start.address = ars_status->restart_address;
1612 ars_start.length = ars_status->restart_length;
1613 ars_start.type = ars_status->type;
1614 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1615 sizeof(ars_start), &cmd_rc);
1616 if (rc < 0)
1617 return rc;
1618 return cmd_rc;
1619}
Dan Williamsaef25332016-02-12 17:01:11 -08001620
Dan Williams1cf03c02016-02-17 13:01:23 -08001621static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1622{
1623 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1624 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1625 int rc, cmd_rc;
Dan Williamsaef25332016-02-12 17:01:11 -08001626
Dan Williams1cf03c02016-02-17 13:01:23 -08001627 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1628 acpi_desc->ars_status_size, &cmd_rc);
1629 if (rc < 0)
1630 return rc;
1631 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001632}
1633
1634static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
Dan Williams1cf03c02016-02-17 13:01:23 -08001635 struct nd_cmd_ars_status *ars_status)
Vishal Verma0caeef62015-12-24 19:21:43 -07001636{
1637 int rc;
1638 u32 i;
1639
Vishal Verma0caeef62015-12-24 19:21:43 -07001640 for (i = 0; i < ars_status->num_records; i++) {
1641 rc = nvdimm_bus_add_poison(nvdimm_bus,
1642 ars_status->records[i].err_address,
1643 ars_status->records[i].length);
1644 if (rc)
1645 return rc;
1646 }
1647
1648 return 0;
1649}
1650
Dan Williams1f7df6f2015-06-09 20:13:14 -04001651static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1652 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1653 struct acpi_nfit_memory_map *memdev,
Dan Williams1cf03c02016-02-17 13:01:23 -08001654 struct nfit_spa *nfit_spa)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001655{
1656 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1657 memdev->device_handle);
Dan Williams1cf03c02016-02-17 13:01:23 -08001658 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001659 struct nd_blk_region_desc *ndbr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001660 struct nfit_mem *nfit_mem;
1661 int blk_valid = 0;
1662
1663 if (!nvdimm) {
1664 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1665 spa->range_index, memdev->device_handle);
1666 return -ENODEV;
1667 }
1668
1669 nd_mapping->nvdimm = nvdimm;
1670 switch (nfit_spa_type(spa)) {
1671 case NFIT_SPA_PM:
1672 case NFIT_SPA_VOLATILE:
1673 nd_mapping->start = memdev->address;
1674 nd_mapping->size = memdev->region_size;
1675 break;
1676 case NFIT_SPA_DCR:
1677 nfit_mem = nvdimm_provider_data(nvdimm);
1678 if (!nfit_mem || !nfit_mem->bdw) {
1679 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1680 spa->range_index, nvdimm_name(nvdimm));
1681 } else {
1682 nd_mapping->size = nfit_mem->bdw->capacity;
1683 nd_mapping->start = nfit_mem->bdw->start_address;
Vishal Verma5212e112015-06-25 04:20:32 -04001684 ndr_desc->num_lanes = nfit_mem->bdw->windows;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001685 blk_valid = 1;
1686 }
1687
1688 ndr_desc->nd_mapping = nd_mapping;
1689 ndr_desc->num_mappings = blk_valid;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001690 ndbr_desc = to_blk_region_desc(ndr_desc);
1691 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1692 ndbr_desc->disable = acpi_nfit_blk_region_disable;
Dan Williams6bc75612015-06-17 17:23:32 -04001693 ndbr_desc->do_io = acpi_desc->blk_do_io;
Dan Williams1cf03c02016-02-17 13:01:23 -08001694 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1695 ndr_desc);
1696 if (!nfit_spa->nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001697 return -ENOMEM;
1698 break;
1699 }
1700
1701 return 0;
1702}
1703
1704static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1705 struct nfit_spa *nfit_spa)
1706{
1707 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1708 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001709 struct nd_blk_region_desc ndbr_desc;
1710 struct nd_region_desc *ndr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001711 struct nfit_memdev *nfit_memdev;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001712 struct nvdimm_bus *nvdimm_bus;
1713 struct resource res;
Dan Williamseaf96152015-05-01 13:11:27 -04001714 int count = 0, rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001715
Dan Williams1cf03c02016-02-17 13:01:23 -08001716 if (nfit_spa->nd_region)
Vishal Verma20985162015-10-27 16:58:27 -06001717 return 0;
1718
Dan Williams1f7df6f2015-06-09 20:13:14 -04001719 if (spa->range_index == 0) {
1720 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1721 __func__);
1722 return 0;
1723 }
1724
1725 memset(&res, 0, sizeof(res));
1726 memset(&nd_mappings, 0, sizeof(nd_mappings));
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001727 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
Dan Williams1f7df6f2015-06-09 20:13:14 -04001728 res.start = spa->address;
1729 res.end = res.start + spa->length - 1;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001730 ndr_desc = &ndbr_desc.ndr_desc;
1731 ndr_desc->res = &res;
1732 ndr_desc->provider_data = nfit_spa;
1733 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001734 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1735 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1736 spa->proximity_domain);
1737 else
1738 ndr_desc->numa_node = NUMA_NO_NODE;
1739
Dan Williams1f7df6f2015-06-09 20:13:14 -04001740 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1741 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1742 struct nd_mapping *nd_mapping;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001743
1744 if (memdev->range_index != spa->range_index)
1745 continue;
1746 if (count >= ND_MAX_MAPPINGS) {
1747 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1748 spa->range_index, ND_MAX_MAPPINGS);
1749 return -ENXIO;
1750 }
1751 nd_mapping = &nd_mappings[count++];
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001752 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08001753 memdev, nfit_spa);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001754 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08001755 goto out;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001756 }
1757
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001758 ndr_desc->nd_mapping = nd_mappings;
1759 ndr_desc->num_mappings = count;
1760 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
Dan Williamseaf96152015-05-01 13:11:27 -04001761 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08001762 goto out;
Dan Williamseaf96152015-05-01 13:11:27 -04001763
Dan Williams1f7df6f2015-06-09 20:13:14 -04001764 nvdimm_bus = acpi_desc->nvdimm_bus;
1765 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
Dan Williams1cf03c02016-02-17 13:01:23 -08001766 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
1767 ndr_desc);
1768 if (!nfit_spa->nd_region)
1769 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001770 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
Dan Williams1cf03c02016-02-17 13:01:23 -08001771 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
1772 ndr_desc);
1773 if (!nfit_spa->nd_region)
1774 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001775 }
Vishal Verma20985162015-10-27 16:58:27 -06001776
Dan Williams1cf03c02016-02-17 13:01:23 -08001777 out:
1778 if (rc)
1779 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
1780 nfit_spa->spa->range_index);
1781 return rc;
1782}
1783
1784static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
1785 u32 max_ars)
1786{
1787 struct device *dev = acpi_desc->dev;
1788 struct nd_cmd_ars_status *ars_status;
1789
1790 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
1791 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
1792 return 0;
1793 }
1794
1795 if (acpi_desc->ars_status)
1796 devm_kfree(dev, acpi_desc->ars_status);
1797 acpi_desc->ars_status = NULL;
1798 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
1799 if (!ars_status)
1800 return -ENOMEM;
1801 acpi_desc->ars_status = ars_status;
1802 acpi_desc->ars_status_size = max_ars;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001803 return 0;
1804}
1805
Dan Williams1cf03c02016-02-17 13:01:23 -08001806static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
1807 struct nfit_spa *nfit_spa)
1808{
1809 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1810 int rc;
1811
1812 if (!nfit_spa->max_ars) {
1813 struct nd_cmd_ars_cap ars_cap;
1814
1815 memset(&ars_cap, 0, sizeof(ars_cap));
1816 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
1817 if (rc < 0)
1818 return rc;
1819 nfit_spa->max_ars = ars_cap.max_ars_out;
1820 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
1821 /* check that the supported scrub types match the spa type */
1822 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
1823 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
1824 return -ENOTTY;
1825 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
1826 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
1827 return -ENOTTY;
1828 }
1829
1830 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
1831 return -ENOMEM;
1832
1833 rc = ars_get_status(acpi_desc);
1834 if (rc < 0 && rc != -ENOSPC)
1835 return rc;
1836
1837 if (ars_status_process_records(acpi_desc->nvdimm_bus,
1838 acpi_desc->ars_status))
1839 return -ENOMEM;
1840
1841 return 0;
1842}
1843
1844static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
1845 struct nfit_spa *nfit_spa)
1846{
1847 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1848 unsigned int overflow_retry = scrub_overflow_abort;
1849 u64 init_ars_start = 0, init_ars_len = 0;
1850 struct device *dev = acpi_desc->dev;
1851 unsigned int tmo = scrub_timeout;
1852 int rc;
1853
1854 if (nfit_spa->ars_done || !nfit_spa->nd_region)
1855 return;
1856
1857 rc = ars_start(acpi_desc, nfit_spa);
1858 /*
1859 * If we timed out the initial scan we'll still be busy here,
1860 * and will wait another timeout before giving up permanently.
1861 */
1862 if (rc < 0 && rc != -EBUSY)
1863 return;
1864
1865 do {
1866 u64 ars_start, ars_len;
1867
1868 if (acpi_desc->cancel)
1869 break;
1870 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
1871 if (rc == -ENOTTY)
1872 break;
1873 if (rc == -EBUSY && !tmo) {
1874 dev_warn(dev, "range %d ars timeout, aborting\n",
1875 spa->range_index);
1876 break;
1877 }
1878
1879 if (rc == -EBUSY) {
1880 /*
1881 * Note, entries may be appended to the list
1882 * while the lock is dropped, but the workqueue
1883 * being active prevents entries being deleted /
1884 * freed.
1885 */
1886 mutex_unlock(&acpi_desc->init_mutex);
1887 ssleep(1);
1888 tmo--;
1889 mutex_lock(&acpi_desc->init_mutex);
1890 continue;
1891 }
1892
1893 /* we got some results, but there are more pending... */
1894 if (rc == -ENOSPC && overflow_retry--) {
1895 if (!init_ars_len) {
1896 init_ars_len = acpi_desc->ars_status->length;
1897 init_ars_start = acpi_desc->ars_status->address;
1898 }
1899 rc = ars_continue(acpi_desc);
1900 }
1901
1902 if (rc < 0) {
1903 dev_warn(dev, "range %d ars continuation failed\n",
1904 spa->range_index);
1905 break;
1906 }
1907
1908 if (init_ars_len) {
1909 ars_start = init_ars_start;
1910 ars_len = init_ars_len;
1911 } else {
1912 ars_start = acpi_desc->ars_status->address;
1913 ars_len = acpi_desc->ars_status->length;
1914 }
1915 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
1916 spa->range_index, ars_start, ars_len);
1917 /* notify the region about new poison entries */
1918 nvdimm_region_notify(nfit_spa->nd_region,
1919 NVDIMM_REVALIDATE_POISON);
1920 break;
1921 } while (1);
1922}
1923
1924static void acpi_nfit_scrub(struct work_struct *work)
1925{
1926 struct device *dev;
1927 u64 init_scrub_length = 0;
1928 struct nfit_spa *nfit_spa;
1929 u64 init_scrub_address = 0;
1930 bool init_ars_done = false;
1931 struct acpi_nfit_desc *acpi_desc;
1932 unsigned int tmo = scrub_timeout;
1933 unsigned int overflow_retry = scrub_overflow_abort;
1934
1935 acpi_desc = container_of(work, typeof(*acpi_desc), work);
1936 dev = acpi_desc->dev;
1937
1938 /*
1939 * We scrub in 2 phases. The first phase waits for any platform
1940 * firmware initiated scrubs to complete and then we go search for the
1941 * affected spa regions to mark them scanned. In the second phase we
1942 * initiate a directed scrub for every range that was not scrubbed in
1943 * phase 1.
1944 */
1945
1946 /* process platform firmware initiated scrubs */
1947 retry:
1948 mutex_lock(&acpi_desc->init_mutex);
1949 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1950 struct nd_cmd_ars_status *ars_status;
1951 struct acpi_nfit_system_address *spa;
1952 u64 ars_start, ars_len;
1953 int rc;
1954
1955 if (acpi_desc->cancel)
1956 break;
1957
1958 if (nfit_spa->nd_region)
1959 continue;
1960
1961 if (init_ars_done) {
1962 /*
1963 * No need to re-query, we're now just
1964 * reconciling all the ranges covered by the
1965 * initial scrub
1966 */
1967 rc = 0;
1968 } else
1969 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
1970
1971 if (rc == -ENOTTY) {
1972 /* no ars capability, just register spa and move on */
1973 acpi_nfit_register_region(acpi_desc, nfit_spa);
1974 continue;
1975 }
1976
1977 if (rc == -EBUSY && !tmo) {
1978 /* fallthrough to directed scrub in phase 2 */
1979 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
1980 break;
1981 } else if (rc == -EBUSY) {
1982 mutex_unlock(&acpi_desc->init_mutex);
1983 ssleep(1);
1984 tmo--;
1985 goto retry;
1986 }
1987
1988 /* we got some results, but there are more pending... */
1989 if (rc == -ENOSPC && overflow_retry--) {
1990 ars_status = acpi_desc->ars_status;
1991 /*
1992 * Record the original scrub range, so that we
1993 * can recall all the ranges impacted by the
1994 * initial scrub.
1995 */
1996 if (!init_scrub_length) {
1997 init_scrub_length = ars_status->length;
1998 init_scrub_address = ars_status->address;
1999 }
2000 rc = ars_continue(acpi_desc);
2001 if (rc == 0) {
2002 mutex_unlock(&acpi_desc->init_mutex);
2003 goto retry;
2004 }
2005 }
2006
2007 if (rc < 0) {
2008 /*
2009 * Initial scrub failed, we'll give it one more
2010 * try below...
2011 */
2012 break;
2013 }
2014
2015 /* We got some final results, record completed ranges */
2016 ars_status = acpi_desc->ars_status;
2017 if (init_scrub_length) {
2018 ars_start = init_scrub_address;
2019 ars_len = ars_start + init_scrub_length;
2020 } else {
2021 ars_start = ars_status->address;
2022 ars_len = ars_status->length;
2023 }
2024 spa = nfit_spa->spa;
2025
2026 if (!init_ars_done) {
2027 init_ars_done = true;
2028 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2029 ars_start, ars_len);
2030 }
2031 if (ars_start <= spa->address && ars_start + ars_len
2032 >= spa->address + spa->length)
2033 acpi_nfit_register_region(acpi_desc, nfit_spa);
2034 }
2035
2036 /*
2037 * For all the ranges not covered by an initial scrub we still
2038 * want to see if there are errors, but it's ok to discover them
2039 * asynchronously.
2040 */
2041 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2042 /*
2043 * Flag all the ranges that still need scrubbing, but
2044 * register them now to make data available.
2045 */
2046 if (nfit_spa->nd_region)
2047 nfit_spa->ars_done = 1;
2048 else
2049 acpi_nfit_register_region(acpi_desc, nfit_spa);
2050 }
2051
2052 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2053 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2054 mutex_unlock(&acpi_desc->init_mutex);
2055}
2056
Dan Williams1f7df6f2015-06-09 20:13:14 -04002057static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2058{
2059 struct nfit_spa *nfit_spa;
Dan Williams1cf03c02016-02-17 13:01:23 -08002060 int rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002061
Dan Williams1cf03c02016-02-17 13:01:23 -08002062 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2063 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2064 /* BLK regions don't need to wait for ars results */
2065 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2066 if (rc)
2067 return rc;
2068 }
Dan Williams1f7df6f2015-06-09 20:13:14 -04002069
Dan Williams1cf03c02016-02-17 13:01:23 -08002070 queue_work(nfit_wq, &acpi_desc->work);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002071 return 0;
2072}
2073
Vishal Verma20985162015-10-27 16:58:27 -06002074static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2075 struct nfit_table_prev *prev)
2076{
2077 struct device *dev = acpi_desc->dev;
2078
2079 if (!list_empty(&prev->spas) ||
2080 !list_empty(&prev->memdevs) ||
2081 !list_empty(&prev->dcrs) ||
2082 !list_empty(&prev->bdws) ||
2083 !list_empty(&prev->idts) ||
2084 !list_empty(&prev->flushes)) {
2085 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2086 return -ENXIO;
2087 }
2088 return 0;
2089}
2090
Dan Williams6bc75612015-06-17 17:23:32 -04002091int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
Dan Williamsb94d5232015-05-19 22:54:31 -04002092{
2093 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -06002094 struct nfit_table_prev prev;
Dan Williamsb94d5232015-05-19 22:54:31 -04002095 const void *end;
2096 u8 *data;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002097 int rc;
Dan Williamsb94d5232015-05-19 22:54:31 -04002098
Vishal Verma20985162015-10-27 16:58:27 -06002099 mutex_lock(&acpi_desc->init_mutex);
2100
2101 INIT_LIST_HEAD(&prev.spas);
2102 INIT_LIST_HEAD(&prev.memdevs);
2103 INIT_LIST_HEAD(&prev.dcrs);
2104 INIT_LIST_HEAD(&prev.bdws);
2105 INIT_LIST_HEAD(&prev.idts);
2106 INIT_LIST_HEAD(&prev.flushes);
2107
2108 list_cut_position(&prev.spas, &acpi_desc->spas,
2109 acpi_desc->spas.prev);
2110 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2111 acpi_desc->memdevs.prev);
2112 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2113 acpi_desc->dcrs.prev);
2114 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2115 acpi_desc->bdws.prev);
2116 list_cut_position(&prev.idts, &acpi_desc->idts,
2117 acpi_desc->idts.prev);
2118 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2119 acpi_desc->flushes.prev);
2120
2121 data = (u8 *) acpi_desc->nfit;
2122 end = data + sz;
Vishal Verma20985162015-10-27 16:58:27 -06002123 while (!IS_ERR_OR_NULL(data))
2124 data = add_table(acpi_desc, &prev, data, end);
2125
2126 if (IS_ERR(data)) {
2127 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2128 PTR_ERR(data));
2129 rc = PTR_ERR(data);
2130 goto out_unlock;
2131 }
2132
2133 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2134 if (rc)
2135 goto out_unlock;
2136
2137 if (nfit_mem_init(acpi_desc) != 0) {
2138 rc = -ENOMEM;
2139 goto out_unlock;
2140 }
2141
2142 acpi_nfit_init_dsms(acpi_desc);
2143
2144 rc = acpi_nfit_register_dimms(acpi_desc);
2145 if (rc)
2146 goto out_unlock;
2147
2148 rc = acpi_nfit_register_regions(acpi_desc);
2149
2150 out_unlock:
2151 mutex_unlock(&acpi_desc->init_mutex);
2152 return rc;
2153}
2154EXPORT_SYMBOL_GPL(acpi_nfit_init);
2155
Dan Williams7ae0fa432016-02-19 12:16:34 -08002156struct acpi_nfit_flush_work {
2157 struct work_struct work;
2158 struct completion cmp;
2159};
2160
2161static void flush_probe(struct work_struct *work)
2162{
2163 struct acpi_nfit_flush_work *flush;
2164
2165 flush = container_of(work, typeof(*flush), work);
2166 complete(&flush->cmp);
2167}
2168
2169static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2170{
2171 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2172 struct device *dev = acpi_desc->dev;
2173 struct acpi_nfit_flush_work flush;
2174
2175 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2176 device_lock(dev);
2177 device_unlock(dev);
2178
2179 /*
2180 * Scrub work could take 10s of seconds, userspace may give up so we
2181 * need to be interruptible while waiting.
2182 */
2183 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2184 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2185 queue_work(nfit_wq, &flush.work);
2186 return wait_for_completion_interruptible(&flush.cmp);
2187}
2188
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002189void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
Vishal Verma20985162015-10-27 16:58:27 -06002190{
2191 struct nvdimm_bus_descriptor *nd_desc;
Vishal Verma20985162015-10-27 16:58:27 -06002192
2193 dev_set_drvdata(dev, acpi_desc);
2194 acpi_desc->dev = dev;
2195 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2196 nd_desc = &acpi_desc->nd_desc;
2197 nd_desc->provider_name = "ACPI.NFIT";
2198 nd_desc->ndctl = acpi_nfit_ctl;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002199 nd_desc->flush_probe = acpi_nfit_flush_probe;
Vishal Verma20985162015-10-27 16:58:27 -06002200 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2201
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002202 INIT_LIST_HEAD(&acpi_desc->spa_maps);
Dan Williamsb94d5232015-05-19 22:54:31 -04002203 INIT_LIST_HEAD(&acpi_desc->spas);
2204 INIT_LIST_HEAD(&acpi_desc->dcrs);
2205 INIT_LIST_HEAD(&acpi_desc->bdws);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002206 INIT_LIST_HEAD(&acpi_desc->idts);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06002207 INIT_LIST_HEAD(&acpi_desc->flushes);
Dan Williamsb94d5232015-05-19 22:54:31 -04002208 INIT_LIST_HEAD(&acpi_desc->memdevs);
2209 INIT_LIST_HEAD(&acpi_desc->dimms);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002210 mutex_init(&acpi_desc->spa_map_mutex);
Vishal Verma20985162015-10-27 16:58:27 -06002211 mutex_init(&acpi_desc->init_mutex);
Dan Williams1cf03c02016-02-17 13:01:23 -08002212 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
Dan Williamsb94d5232015-05-19 22:54:31 -04002213}
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002214EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
Dan Williamsb94d5232015-05-19 22:54:31 -04002215
2216static int acpi_nfit_add(struct acpi_device *adev)
2217{
Vishal Verma20985162015-10-27 16:58:27 -06002218 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Dan Williamsb94d5232015-05-19 22:54:31 -04002219 struct acpi_nfit_desc *acpi_desc;
2220 struct device *dev = &adev->dev;
2221 struct acpi_table_header *tbl;
2222 acpi_status status = AE_OK;
2223 acpi_size sz;
2224 int rc;
2225
2226 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
2227 if (ACPI_FAILURE(status)) {
Vishal Verma20985162015-10-27 16:58:27 -06002228 /* This is ok, we could have an nvdimm hotplugged later */
2229 dev_dbg(dev, "failed to find NFIT at startup\n");
2230 return 0;
Dan Williamsb94d5232015-05-19 22:54:31 -04002231 }
2232
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002233 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2234 if (!acpi_desc)
2235 return -ENOMEM;
2236 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2237 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2238 if (!acpi_desc->nvdimm_bus)
2239 return -ENOMEM;
Dan Williamsb94d5232015-05-19 22:54:31 -04002240
Linda Knippers6b577c92015-11-20 19:05:49 -05002241 /*
2242 * Save the acpi header for later and then skip it,
2243 * making nfit point to the first nfit table header.
2244 */
2245 acpi_desc->acpi_header = *tbl;
2246 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2247 sz -= sizeof(struct acpi_table_nfit);
Dan Williamsb94d5232015-05-19 22:54:31 -04002248
Vishal Verma20985162015-10-27 16:58:27 -06002249 /* Evaluate _FIT and override with that if present */
2250 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2251 if (ACPI_SUCCESS(status) && buf.length > 0) {
Linda Knippers6b577c92015-11-20 19:05:49 -05002252 union acpi_object *obj;
2253 /*
2254 * Adjust for the acpi_object header of the _FIT
2255 */
2256 obj = buf.pointer;
2257 if (obj->type == ACPI_TYPE_BUFFER) {
2258 acpi_desc->nfit =
2259 (struct acpi_nfit_header *)obj->buffer.pointer;
2260 sz = obj->buffer.length;
2261 } else
2262 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2263 __func__, (int) obj->type);
Vishal Verma20985162015-10-27 16:58:27 -06002264 }
Dan Williamsb94d5232015-05-19 22:54:31 -04002265
2266 rc = acpi_nfit_init(acpi_desc, sz);
2267 if (rc) {
2268 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2269 return rc;
2270 }
2271 return 0;
2272}
2273
2274static int acpi_nfit_remove(struct acpi_device *adev)
2275{
2276 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2277
Dan Williams7ae0fa432016-02-19 12:16:34 -08002278 acpi_desc->cancel = 1;
2279 flush_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002280 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2281 return 0;
2282}
2283
Vishal Verma20985162015-10-27 16:58:27 -06002284static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2285{
2286 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2287 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Linda Knippers6b577c92015-11-20 19:05:49 -05002288 struct acpi_nfit_header *nfit_saved;
2289 union acpi_object *obj;
Vishal Verma20985162015-10-27 16:58:27 -06002290 struct device *dev = &adev->dev;
2291 acpi_status status;
2292 int ret;
2293
2294 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2295
2296 device_lock(dev);
2297 if (!dev->driver) {
2298 /* dev->driver may be null if we're being removed */
2299 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
Alexey Khoroshilovd91e8922015-12-11 23:24:10 +03002300 goto out_unlock;
Vishal Verma20985162015-10-27 16:58:27 -06002301 }
2302
2303 if (!acpi_desc) {
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002304 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2305 if (!acpi_desc)
Vishal Verma20985162015-10-27 16:58:27 -06002306 goto out_unlock;
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002307 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2308 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2309 if (!acpi_desc->nvdimm_bus)
2310 goto out_unlock;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002311 } else {
2312 /*
2313 * Finish previous registration before considering new
2314 * regions.
2315 */
2316 flush_workqueue(nfit_wq);
Vishal Verma20985162015-10-27 16:58:27 -06002317 }
2318
2319 /* Evaluate _FIT */
2320 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2321 if (ACPI_FAILURE(status)) {
2322 dev_err(dev, "failed to evaluate _FIT\n");
2323 goto out_unlock;
2324 }
2325
2326 nfit_saved = acpi_desc->nfit;
Linda Knippers6b577c92015-11-20 19:05:49 -05002327 obj = buf.pointer;
2328 if (obj->type == ACPI_TYPE_BUFFER) {
2329 acpi_desc->nfit =
2330 (struct acpi_nfit_header *)obj->buffer.pointer;
2331 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
2332 if (ret) {
2333 /* Merge failed, restore old nfit, and exit */
2334 acpi_desc->nfit = nfit_saved;
2335 dev_err(dev, "failed to merge updated NFIT\n");
2336 }
2337 } else {
2338 /* Bad _FIT, restore old nfit */
2339 dev_err(dev, "Invalid _FIT\n");
Vishal Verma20985162015-10-27 16:58:27 -06002340 }
2341 kfree(buf.pointer);
2342
2343 out_unlock:
2344 device_unlock(dev);
2345}
2346
Dan Williamsb94d5232015-05-19 22:54:31 -04002347static const struct acpi_device_id acpi_nfit_ids[] = {
2348 { "ACPI0012", 0 },
2349 { "", 0 },
2350};
2351MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2352
2353static struct acpi_driver acpi_nfit_driver = {
2354 .name = KBUILD_MODNAME,
2355 .ids = acpi_nfit_ids,
2356 .ops = {
2357 .add = acpi_nfit_add,
2358 .remove = acpi_nfit_remove,
Vishal Verma20985162015-10-27 16:58:27 -06002359 .notify = acpi_nfit_notify,
Dan Williamsb94d5232015-05-19 22:54:31 -04002360 },
2361};
2362
2363static __init int nfit_init(void)
2364{
2365 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2366 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2367 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2368 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2369 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2370 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2371 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2372
2373 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2374 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2375 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2376 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2377 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2378 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2379 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2380 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2381 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2382 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
2383
Dan Williams7ae0fa432016-02-19 12:16:34 -08002384 nfit_wq = create_singlethread_workqueue("nfit");
2385 if (!nfit_wq)
2386 return -ENOMEM;
2387
Dan Williamsb94d5232015-05-19 22:54:31 -04002388 return acpi_bus_register_driver(&acpi_nfit_driver);
2389}
2390
2391static __exit void nfit_exit(void)
2392{
2393 acpi_bus_unregister_driver(&acpi_nfit_driver);
Dan Williams7ae0fa432016-02-19 12:16:34 -08002394 destroy_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002395}
2396
2397module_init(nfit_init);
2398module_exit(nfit_exit);
2399MODULE_LICENSE("GPL v2");
2400MODULE_AUTHOR("Intel Corporation");