blob: a26c2297c1ed0d7287b97f605fd1af34a49621f8 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040016#include <linux/mutex.h>
Dan Williams62232e452015-06-08 14:27:06 -040017#include <linux/ndctl.h>
Vishal Verma37b137f2016-07-23 21:51:42 -070018#include <linux/sysfs.h>
Vishal Verma0caeef62015-12-24 19:21:43 -070019#include <linux/delay.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040020#include <linux/list.h>
21#include <linux/acpi.h>
Dan Williamseaf96152015-05-01 13:11:27 -040022#include <linux/sort.h>
Ross Zwislerc2ad2952015-07-10 11:06:13 -060023#include <linux/pmem.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040024#include <linux/io.h>
Dan Williams1cf03c02016-02-17 13:01:23 -080025#include <linux/nd.h>
Dan Williams96601ad2015-08-24 18:29:38 -040026#include <asm/cacheflush.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040027#include "nfit.h"
28
Ross Zwisler047fc8a2015-06-25 04:21:02 -040029/*
30 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
31 * irrelevant.
32 */
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020033#include <linux/io-64-nonatomic-hi-lo.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040034
Dan Williams4d88a972015-05-31 14:41:48 -040035static bool force_enable_dimms;
36module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
37MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
38
Dan Williams1cf03c02016-02-17 13:01:23 -080039static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
40module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
41MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
42
43/* after three payloads of overflow, it's dead jim */
44static unsigned int scrub_overflow_abort = 3;
45module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
46MODULE_PARM_DESC(scrub_overflow_abort,
47 "Number of times we overflow ARS results before abort");
48
Dan Williams87554092016-04-28 18:01:20 -070049static bool disable_vendor_specific;
50module_param(disable_vendor_specific, bool, S_IRUGO);
51MODULE_PARM_DESC(disable_vendor_specific,
Linda Knippersf2668fa2017-03-07 16:35:14 -050052 "Limit commands to the publicly specified set");
Dan Williams87554092016-04-28 18:01:20 -070053
Linda Knippers095ab4b2017-03-07 16:35:12 -050054static unsigned long override_dsm_mask;
55module_param(override_dsm_mask, ulong, S_IRUGO);
56MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
57
Linda Knippersba650cf2017-03-07 16:35:13 -050058static int default_dsm_family = -1;
59module_param(default_dsm_family, int, S_IRUGO);
60MODULE_PARM_DESC(default_dsm_family,
61 "Try this DSM type first when identifying NVDIMM family");
62
Vishal Verma6839a6d2016-07-23 21:51:21 -070063LIST_HEAD(acpi_descs);
64DEFINE_MUTEX(acpi_desc_lock);
65
Dan Williams7ae0fa432016-02-19 12:16:34 -080066static struct workqueue_struct *nfit_wq;
67
Vishal Verma20985162015-10-27 16:58:27 -060068struct nfit_table_prev {
69 struct list_head spas;
70 struct list_head memdevs;
71 struct list_head dcrs;
72 struct list_head bdws;
73 struct list_head idts;
74 struct list_head flushes;
75};
76
Dan Williamsb94d5232015-05-19 22:54:31 -040077static u8 nfit_uuid[NFIT_UUID_MAX][16];
78
Dan Williams6bc75612015-06-17 17:23:32 -040079const u8 *to_nfit_uuid(enum nfit_uuids id)
Dan Williamsb94d5232015-05-19 22:54:31 -040080{
81 return nfit_uuid[id];
82}
Dan Williams6bc75612015-06-17 17:23:32 -040083EXPORT_SYMBOL(to_nfit_uuid);
Dan Williamsb94d5232015-05-19 22:54:31 -040084
Dan Williams62232e452015-06-08 14:27:06 -040085static struct acpi_nfit_desc *to_acpi_nfit_desc(
86 struct nvdimm_bus_descriptor *nd_desc)
87{
88 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
89}
90
91static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
92{
93 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
94
95 /*
96 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
97 * acpi_device.
98 */
99 if (!nd_desc->provider_name
100 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
101 return NULL;
102
103 return to_acpi_device(acpi_desc->dev);
104}
105
Dan Williamsd6eb2702016-12-06 15:06:55 -0800106static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
Dan Williamsaef25332016-02-12 17:01:11 -0800107{
Dan Williamsd4f32362016-03-03 16:08:54 -0800108 struct nd_cmd_clear_error *clear_err;
Dan Williamsaef25332016-02-12 17:01:11 -0800109 struct nd_cmd_ars_status *ars_status;
Dan Williamsaef25332016-02-12 17:01:11 -0800110 u16 flags;
111
112 switch (cmd) {
113 case ND_CMD_ARS_CAP:
Dan Williams11294d62016-09-21 09:21:26 -0700114 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
Dan Williamsaef25332016-02-12 17:01:11 -0800115 return -ENOTTY;
116
117 /* Command failed */
Dan Williams11294d62016-09-21 09:21:26 -0700118 if (status & 0xffff)
Dan Williamsaef25332016-02-12 17:01:11 -0800119 return -EIO;
120
121 /* No supported scan types for this range */
122 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
Dan Williams11294d62016-09-21 09:21:26 -0700123 if ((status >> 16 & flags) == 0)
Dan Williamsaef25332016-02-12 17:01:11 -0800124 return -ENOTTY;
Vishal Verma9a901f52016-12-05 17:00:37 -0700125 return 0;
Dan Williamsaef25332016-02-12 17:01:11 -0800126 case ND_CMD_ARS_START:
Dan Williamsaef25332016-02-12 17:01:11 -0800127 /* ARS is in progress */
Dan Williams11294d62016-09-21 09:21:26 -0700128 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
Dan Williamsaef25332016-02-12 17:01:11 -0800129 return -EBUSY;
130
131 /* Command failed */
Dan Williams11294d62016-09-21 09:21:26 -0700132 if (status & 0xffff)
Dan Williamsaef25332016-02-12 17:01:11 -0800133 return -EIO;
Vishal Verma9a901f52016-12-05 17:00:37 -0700134 return 0;
Dan Williamsaef25332016-02-12 17:01:11 -0800135 case ND_CMD_ARS_STATUS:
136 ars_status = buf;
137 /* Command failed */
Dan Williams11294d62016-09-21 09:21:26 -0700138 if (status & 0xffff)
Dan Williamsaef25332016-02-12 17:01:11 -0800139 return -EIO;
140 /* Check extended status (Upper two bytes) */
Dan Williams11294d62016-09-21 09:21:26 -0700141 if (status == NFIT_ARS_STATUS_DONE)
Dan Williamsaef25332016-02-12 17:01:11 -0800142 return 0;
143
144 /* ARS is in progress */
Dan Williams11294d62016-09-21 09:21:26 -0700145 if (status == NFIT_ARS_STATUS_BUSY)
Dan Williamsaef25332016-02-12 17:01:11 -0800146 return -EBUSY;
147
148 /* No ARS performed for the current boot */
Dan Williams11294d62016-09-21 09:21:26 -0700149 if (status == NFIT_ARS_STATUS_NONE)
Dan Williamsaef25332016-02-12 17:01:11 -0800150 return -EAGAIN;
151
152 /*
153 * ARS interrupted, either we overflowed or some other
154 * agent wants the scan to stop. If we didn't overflow
155 * then just continue with the returned results.
156 */
Dan Williams11294d62016-09-21 09:21:26 -0700157 if (status == NFIT_ARS_STATUS_INTR) {
Dan Williams82aa37c2016-12-06 12:45:24 -0800158 if (ars_status->out_length >= 40 && (ars_status->flags
159 & NFIT_ARS_F_OVERFLOW))
Dan Williamsaef25332016-02-12 17:01:11 -0800160 return -ENOSPC;
161 return 0;
162 }
163
164 /* Unknown status */
Dan Williams11294d62016-09-21 09:21:26 -0700165 if (status >> 16)
Dan Williamsaef25332016-02-12 17:01:11 -0800166 return -EIO;
Vishal Verma9a901f52016-12-05 17:00:37 -0700167 return 0;
Dan Williamsd4f32362016-03-03 16:08:54 -0800168 case ND_CMD_CLEAR_ERROR:
169 clear_err = buf;
Dan Williams11294d62016-09-21 09:21:26 -0700170 if (status & 0xffff)
Dan Williamsd4f32362016-03-03 16:08:54 -0800171 return -EIO;
172 if (!clear_err->cleared)
173 return -EIO;
174 if (clear_err->length > clear_err->cleared)
175 return clear_err->cleared;
Vishal Verma9a901f52016-12-05 17:00:37 -0700176 return 0;
Dan Williamsaef25332016-02-12 17:01:11 -0800177 default:
178 break;
179 }
180
Dan Williams11294d62016-09-21 09:21:26 -0700181 /* all other non-zero status results in an error */
182 if (status)
183 return -EIO;
Dan Williamsaef25332016-02-12 17:01:11 -0800184 return 0;
185}
186
Dan Williamsd6eb2702016-12-06 15:06:55 -0800187static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
188 u32 status)
189{
190 if (!nvdimm)
191 return xlat_bus_status(buf, cmd, status);
192 if (status)
193 return -EIO;
194 return 0;
195}
196
Dan Williamsa7de92d2016-12-05 13:43:25 -0800197int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
198 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
Dan Williamsb94d5232015-05-19 22:54:31 -0400199{
Dan Williams62232e452015-06-08 14:27:06 -0400200 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
Dan Williams62232e452015-06-08 14:27:06 -0400201 union acpi_object in_obj, in_buf, *out_obj;
Dan Williams31eca762016-04-28 16:23:43 -0700202 const struct nd_cmd_desc *desc = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400203 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -0700204 struct nd_cmd_pkg *call_pkg = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400205 const char *cmd_name, *dimm_name;
Dan Williams31eca762016-04-28 16:23:43 -0700206 unsigned long cmd_mask, dsm_mask;
Dan Williams11294d62016-09-21 09:21:26 -0700207 u32 offset, fw_status = 0;
Dan Williams62232e452015-06-08 14:27:06 -0400208 acpi_handle handle;
Dan Williams31eca762016-04-28 16:23:43 -0700209 unsigned int func;
Dan Williams62232e452015-06-08 14:27:06 -0400210 const u8 *uuid;
Dan Williams62232e452015-06-08 14:27:06 -0400211 int rc, i;
212
Dan Williams31eca762016-04-28 16:23:43 -0700213 func = cmd;
214 if (cmd == ND_CMD_CALL) {
215 call_pkg = buf;
216 func = call_pkg->nd_command;
217 }
218
Dan Williams62232e452015-06-08 14:27:06 -0400219 if (nvdimm) {
220 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
221 struct acpi_device *adev = nfit_mem->adev;
222
223 if (!adev)
224 return -ENOTTY;
Dan Williams31eca762016-04-28 16:23:43 -0700225 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
226 return -ENOTTY;
227
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400228 dimm_name = nvdimm_name(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400229 cmd_name = nvdimm_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700230 cmd_mask = nvdimm_cmd_mask(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400231 dsm_mask = nfit_mem->dsm_mask;
232 desc = nd_cmd_dimm_desc(cmd);
Dan Williams31eca762016-04-28 16:23:43 -0700233 uuid = to_nfit_uuid(nfit_mem->family);
Dan Williams62232e452015-06-08 14:27:06 -0400234 handle = adev->handle;
235 } else {
236 struct acpi_device *adev = to_acpi_dev(acpi_desc);
237
238 cmd_name = nvdimm_bus_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700239 cmd_mask = nd_desc->cmd_mask;
Dan Williams31eca762016-04-28 16:23:43 -0700240 dsm_mask = cmd_mask;
Dan Williams62232e452015-06-08 14:27:06 -0400241 desc = nd_cmd_bus_desc(cmd);
242 uuid = to_nfit_uuid(NFIT_DEV_BUS);
243 handle = adev->handle;
244 dimm_name = "bus";
245 }
246
247 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
248 return -ENOTTY;
249
Dan Williams31eca762016-04-28 16:23:43 -0700250 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
Dan Williams62232e452015-06-08 14:27:06 -0400251 return -ENOTTY;
252
253 in_obj.type = ACPI_TYPE_PACKAGE;
254 in_obj.package.count = 1;
255 in_obj.package.elements = &in_buf;
256 in_buf.type = ACPI_TYPE_BUFFER;
257 in_buf.buffer.pointer = buf;
258 in_buf.buffer.length = 0;
259
260 /* libnvdimm has already validated the input envelope */
261 for (i = 0; i < desc->in_num; i++)
262 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
263 i, buf);
264
Dan Williams31eca762016-04-28 16:23:43 -0700265 if (call_pkg) {
266 /* skip over package wrapper */
267 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
268 in_buf.buffer.length = call_pkg->nd_size_in;
Dan Williams62232e452015-06-08 14:27:06 -0400269 }
270
Dan Williams7699a6a2017-04-28 13:54:30 -0700271 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
272 __func__, dimm_name, cmd, func, in_buf.buffer.length);
273 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
Dan Williams31eca762016-04-28 16:23:43 -0700274 in_buf.buffer.pointer,
275 min_t(u32, 256, in_buf.buffer.length), true);
Dan Williams31eca762016-04-28 16:23:43 -0700276
277 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
Dan Williams62232e452015-06-08 14:27:06 -0400278 if (!out_obj) {
279 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
280 cmd_name);
281 return -EINVAL;
282 }
283
Dan Williams31eca762016-04-28 16:23:43 -0700284 if (call_pkg) {
285 call_pkg->nd_fw_size = out_obj->buffer.length;
286 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
287 out_obj->buffer.pointer,
288 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
289
290 ACPI_FREE(out_obj);
291 /*
292 * Need to support FW function w/o known size in advance.
293 * Caller can determine required size based upon nd_fw_size.
294 * If we return an error (like elsewhere) then caller wouldn't
295 * be able to rely upon data returned to make calculation.
296 */
297 return 0;
298 }
299
Dan Williams62232e452015-06-08 14:27:06 -0400300 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
301 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
302 __func__, dimm_name, cmd_name, out_obj->type);
303 rc = -EINVAL;
304 goto out;
305 }
306
Dan Williams7699a6a2017-04-28 13:54:30 -0700307 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
308 cmd_name, out_obj->buffer.length);
309 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
310 out_obj->buffer.pointer,
311 min_t(u32, 128, out_obj->buffer.length), true);
Dan Williams62232e452015-06-08 14:27:06 -0400312
313 for (i = 0, offset = 0; i < desc->out_num; i++) {
314 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
Dan Williamsefda1b5d2016-12-06 09:10:12 -0800315 (u32 *) out_obj->buffer.pointer,
316 out_obj->buffer.length - offset);
Dan Williams62232e452015-06-08 14:27:06 -0400317
318 if (offset + out_size > out_obj->buffer.length) {
319 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
320 __func__, dimm_name, cmd_name, i);
321 break;
322 }
323
324 if (in_buf.buffer.length + offset + out_size > buf_len) {
325 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
326 __func__, dimm_name, cmd_name, i);
327 rc = -ENXIO;
328 goto out;
329 }
330 memcpy(buf + in_buf.buffer.length + offset,
331 out_obj->buffer.pointer + offset, out_size);
332 offset += out_size;
333 }
Dan Williams11294d62016-09-21 09:21:26 -0700334
335 /*
336 * Set fw_status for all the commands with a known format to be
337 * later interpreted by xlat_status().
338 */
339 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
340 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
341 fw_status = *(u32 *) out_obj->buffer.pointer;
342
Dan Williams62232e452015-06-08 14:27:06 -0400343 if (offset + in_buf.buffer.length < buf_len) {
344 if (i >= 1) {
345 /*
346 * status valid, return the number of bytes left
347 * unfilled in the output buffer
348 */
349 rc = buf_len - offset - in_buf.buffer.length;
Dan Williamsaef25332016-02-12 17:01:11 -0800350 if (cmd_rc)
Dan Williamsd6eb2702016-12-06 15:06:55 -0800351 *cmd_rc = xlat_status(nvdimm, buf, cmd,
352 fw_status);
Dan Williams62232e452015-06-08 14:27:06 -0400353 } else {
354 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
355 __func__, dimm_name, cmd_name, buf_len,
356 offset);
357 rc = -ENXIO;
358 }
Dan Williams2eea6582016-05-02 09:11:53 -0700359 } else {
Dan Williams62232e452015-06-08 14:27:06 -0400360 rc = 0;
Dan Williams2eea6582016-05-02 09:11:53 -0700361 if (cmd_rc)
Dan Williamsd6eb2702016-12-06 15:06:55 -0800362 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
Dan Williams2eea6582016-05-02 09:11:53 -0700363 }
Dan Williams62232e452015-06-08 14:27:06 -0400364
365 out:
366 ACPI_FREE(out_obj);
367
368 return rc;
Dan Williamsb94d5232015-05-19 22:54:31 -0400369}
Dan Williamsa7de92d2016-12-05 13:43:25 -0800370EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
Dan Williamsb94d5232015-05-19 22:54:31 -0400371
372static const char *spa_type_name(u16 type)
373{
374 static const char *to_name[] = {
375 [NFIT_SPA_VOLATILE] = "volatile",
376 [NFIT_SPA_PM] = "pmem",
377 [NFIT_SPA_DCR] = "dimm-control-region",
378 [NFIT_SPA_BDW] = "block-data-window",
379 [NFIT_SPA_VDISK] = "volatile-disk",
380 [NFIT_SPA_VCD] = "volatile-cd",
381 [NFIT_SPA_PDISK] = "persistent-disk",
382 [NFIT_SPA_PCD] = "persistent-cd",
383
384 };
385
386 if (type > NFIT_SPA_PCD)
387 return "unknown";
388
389 return to_name[type];
390}
391
Vishal Verma6839a6d2016-07-23 21:51:21 -0700392int nfit_spa_type(struct acpi_nfit_system_address *spa)
Dan Williamsb94d5232015-05-19 22:54:31 -0400393{
394 int i;
395
396 for (i = 0; i < NFIT_UUID_MAX; i++)
397 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
398 return i;
399 return -1;
400}
401
402static bool add_spa(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600403 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400404 struct acpi_nfit_system_address *spa)
405{
406 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600407 struct nfit_spa *nfit_spa;
Dan Williamsb94d5232015-05-19 22:54:31 -0400408
Dan Williams31932042016-07-14 17:22:48 -0700409 if (spa->header.length != sizeof(*spa))
410 return false;
411
Vishal Verma20985162015-10-27 16:58:27 -0600412 list_for_each_entry(nfit_spa, &prev->spas, list) {
Dan Williams31932042016-07-14 17:22:48 -0700413 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600414 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
415 return true;
416 }
417 }
418
Dan Williams31932042016-07-14 17:22:48 -0700419 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
420 GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400421 if (!nfit_spa)
422 return false;
423 INIT_LIST_HEAD(&nfit_spa->list);
Dan Williams31932042016-07-14 17:22:48 -0700424 memcpy(nfit_spa->spa, spa, sizeof(*spa));
Dan Williamsb94d5232015-05-19 22:54:31 -0400425 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
426 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
427 spa->range_index,
428 spa_type_name(nfit_spa_type(spa)));
429 return true;
430}
431
432static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600433 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400434 struct acpi_nfit_memory_map *memdev)
435{
436 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600437 struct nfit_memdev *nfit_memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400438
Dan Williams31932042016-07-14 17:22:48 -0700439 if (memdev->header.length != sizeof(*memdev))
440 return false;
441
Vishal Verma20985162015-10-27 16:58:27 -0600442 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
Dan Williams31932042016-07-14 17:22:48 -0700443 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600444 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
445 return true;
446 }
447
Dan Williams31932042016-07-14 17:22:48 -0700448 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
449 GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400450 if (!nfit_memdev)
451 return false;
452 INIT_LIST_HEAD(&nfit_memdev->list);
Dan Williams31932042016-07-14 17:22:48 -0700453 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
Dan Williamsb94d5232015-05-19 22:54:31 -0400454 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
Dan Williamscaa603a2017-04-14 10:27:11 -0700455 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
Dan Williamsb94d5232015-05-19 22:54:31 -0400456 __func__, memdev->device_handle, memdev->range_index,
Dan Williamscaa603a2017-04-14 10:27:11 -0700457 memdev->region_index, memdev->flags);
Dan Williamsb94d5232015-05-19 22:54:31 -0400458 return true;
459}
460
Dan Williams31932042016-07-14 17:22:48 -0700461/*
462 * An implementation may provide a truncated control region if no block windows
463 * are defined.
464 */
465static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
466{
467 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
468 window_size))
469 return 0;
470 if (dcr->windows)
471 return sizeof(*dcr);
472 return offsetof(struct acpi_nfit_control_region, window_size);
473}
474
Dan Williamsb94d5232015-05-19 22:54:31 -0400475static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600476 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400477 struct acpi_nfit_control_region *dcr)
478{
479 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600480 struct nfit_dcr *nfit_dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400481
Dan Williams31932042016-07-14 17:22:48 -0700482 if (!sizeof_dcr(dcr))
483 return false;
484
Vishal Verma20985162015-10-27 16:58:27 -0600485 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
Dan Williams31932042016-07-14 17:22:48 -0700486 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600487 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
488 return true;
489 }
490
Dan Williams31932042016-07-14 17:22:48 -0700491 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
492 GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400493 if (!nfit_dcr)
494 return false;
495 INIT_LIST_HEAD(&nfit_dcr->list);
Dan Williams31932042016-07-14 17:22:48 -0700496 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
Dan Williamsb94d5232015-05-19 22:54:31 -0400497 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
498 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
499 dcr->region_index, dcr->windows);
500 return true;
501}
502
503static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600504 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400505 struct acpi_nfit_data_region *bdw)
506{
507 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600508 struct nfit_bdw *nfit_bdw;
Dan Williamsb94d5232015-05-19 22:54:31 -0400509
Dan Williams31932042016-07-14 17:22:48 -0700510 if (bdw->header.length != sizeof(*bdw))
511 return false;
Vishal Verma20985162015-10-27 16:58:27 -0600512 list_for_each_entry(nfit_bdw, &prev->bdws, list)
Dan Williams31932042016-07-14 17:22:48 -0700513 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600514 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
515 return true;
516 }
517
Dan Williams31932042016-07-14 17:22:48 -0700518 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
519 GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400520 if (!nfit_bdw)
521 return false;
522 INIT_LIST_HEAD(&nfit_bdw->list);
Dan Williams31932042016-07-14 17:22:48 -0700523 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
Dan Williamsb94d5232015-05-19 22:54:31 -0400524 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
525 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
526 bdw->region_index, bdw->windows);
527 return true;
528}
529
Dan Williams31932042016-07-14 17:22:48 -0700530static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
531{
532 if (idt->header.length < sizeof(*idt))
533 return 0;
534 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
535}
536
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400537static bool add_idt(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600538 struct nfit_table_prev *prev,
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400539 struct acpi_nfit_interleave *idt)
540{
541 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600542 struct nfit_idt *nfit_idt;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400543
Dan Williams31932042016-07-14 17:22:48 -0700544 if (!sizeof_idt(idt))
545 return false;
546
547 list_for_each_entry(nfit_idt, &prev->idts, list) {
548 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
549 continue;
550
551 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600552 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
553 return true;
554 }
Dan Williams31932042016-07-14 17:22:48 -0700555 }
Vishal Verma20985162015-10-27 16:58:27 -0600556
Dan Williams31932042016-07-14 17:22:48 -0700557 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
558 GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400559 if (!nfit_idt)
560 return false;
561 INIT_LIST_HEAD(&nfit_idt->list);
Dan Williams31932042016-07-14 17:22:48 -0700562 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400563 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
564 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
565 idt->interleave_index, idt->line_count);
566 return true;
567}
568
Dan Williams31932042016-07-14 17:22:48 -0700569static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
570{
571 if (flush->header.length < sizeof(*flush))
572 return 0;
573 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
574}
575
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600576static bool add_flush(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600577 struct nfit_table_prev *prev,
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600578 struct acpi_nfit_flush_address *flush)
579{
580 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600581 struct nfit_flush *nfit_flush;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600582
Dan Williams31932042016-07-14 17:22:48 -0700583 if (!sizeof_flush(flush))
584 return false;
585
586 list_for_each_entry(nfit_flush, &prev->flushes, list) {
587 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
588 continue;
589
590 if (memcmp(nfit_flush->flush, flush,
591 sizeof_flush(flush)) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600592 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
593 return true;
594 }
Dan Williams31932042016-07-14 17:22:48 -0700595 }
Vishal Verma20985162015-10-27 16:58:27 -0600596
Dan Williams31932042016-07-14 17:22:48 -0700597 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
598 + sizeof_flush(flush), GFP_KERNEL);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600599 if (!nfit_flush)
600 return false;
601 INIT_LIST_HEAD(&nfit_flush->list);
Dan Williams31932042016-07-14 17:22:48 -0700602 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600603 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
604 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
605 flush->device_handle, flush->hint_count);
606 return true;
607}
608
Vishal Verma20985162015-10-27 16:58:27 -0600609static void *add_table(struct acpi_nfit_desc *acpi_desc,
610 struct nfit_table_prev *prev, void *table, const void *end)
Dan Williamsb94d5232015-05-19 22:54:31 -0400611{
612 struct device *dev = acpi_desc->dev;
613 struct acpi_nfit_header *hdr;
614 void *err = ERR_PTR(-ENOMEM);
615
616 if (table >= end)
617 return NULL;
618
619 hdr = table;
Vishal Verma564d5012015-10-27 16:58:26 -0600620 if (!hdr->length) {
621 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
622 hdr->type);
623 return NULL;
624 }
625
Dan Williamsb94d5232015-05-19 22:54:31 -0400626 switch (hdr->type) {
627 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600628 if (!add_spa(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400629 return err;
630 break;
631 case ACPI_NFIT_TYPE_MEMORY_MAP:
Vishal Verma20985162015-10-27 16:58:27 -0600632 if (!add_memdev(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400633 return err;
634 break;
635 case ACPI_NFIT_TYPE_CONTROL_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600636 if (!add_dcr(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400637 return err;
638 break;
639 case ACPI_NFIT_TYPE_DATA_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600640 if (!add_bdw(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400641 return err;
642 break;
Dan Williamsb94d5232015-05-19 22:54:31 -0400643 case ACPI_NFIT_TYPE_INTERLEAVE:
Vishal Verma20985162015-10-27 16:58:27 -0600644 if (!add_idt(acpi_desc, prev, table))
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400645 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400646 break;
647 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600648 if (!add_flush(acpi_desc, prev, table))
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600649 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400650 break;
651 case ACPI_NFIT_TYPE_SMBIOS:
652 dev_dbg(dev, "%s: smbios\n", __func__);
653 break;
654 default:
655 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
656 break;
657 }
658
659 return table + hdr->length;
660}
661
662static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
663 struct nfit_mem *nfit_mem)
664{
665 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
666 u16 dcr = nfit_mem->dcr->region_index;
667 struct nfit_spa *nfit_spa;
668
669 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
670 u16 range_index = nfit_spa->spa->range_index;
671 int type = nfit_spa_type(nfit_spa->spa);
672 struct nfit_memdev *nfit_memdev;
673
674 if (type != NFIT_SPA_BDW)
675 continue;
676
677 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
678 if (nfit_memdev->memdev->range_index != range_index)
679 continue;
680 if (nfit_memdev->memdev->device_handle != device_handle)
681 continue;
682 if (nfit_memdev->memdev->region_index != dcr)
683 continue;
684
685 nfit_mem->spa_bdw = nfit_spa->spa;
686 return;
687 }
688 }
689
690 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
691 nfit_mem->spa_dcr->range_index);
692 nfit_mem->bdw = NULL;
693}
694
Dan Williams6697b2c2016-02-04 16:51:00 -0800695static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
Dan Williamsb94d5232015-05-19 22:54:31 -0400696 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
697{
698 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400699 struct nfit_memdev *nfit_memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400700 struct nfit_bdw *nfit_bdw;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400701 struct nfit_idt *nfit_idt;
702 u16 idt_idx, range_index;
Dan Williamsb94d5232015-05-19 22:54:31 -0400703
Dan Williamsb94d5232015-05-19 22:54:31 -0400704 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
705 if (nfit_bdw->bdw->region_index != dcr)
706 continue;
707 nfit_mem->bdw = nfit_bdw->bdw;
708 break;
709 }
710
711 if (!nfit_mem->bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800712 return;
Dan Williamsb94d5232015-05-19 22:54:31 -0400713
714 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400715
716 if (!nfit_mem->spa_bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800717 return;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400718
719 range_index = nfit_mem->spa_bdw->range_index;
720 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
721 if (nfit_memdev->memdev->range_index != range_index ||
722 nfit_memdev->memdev->region_index != dcr)
723 continue;
724 nfit_mem->memdev_bdw = nfit_memdev->memdev;
725 idt_idx = nfit_memdev->memdev->interleave_index;
726 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
727 if (nfit_idt->idt->interleave_index != idt_idx)
728 continue;
729 nfit_mem->idt_bdw = nfit_idt->idt;
730 break;
731 }
732 break;
733 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400734}
735
Dan Williams14999342017-04-13 19:46:36 -0700736static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
Dan Williamsb94d5232015-05-19 22:54:31 -0400737 struct acpi_nfit_system_address *spa)
738{
739 struct nfit_mem *nfit_mem, *found;
740 struct nfit_memdev *nfit_memdev;
Dan Williams14999342017-04-13 19:46:36 -0700741 int type = spa ? nfit_spa_type(spa) : 0;
Dan Williamsb94d5232015-05-19 22:54:31 -0400742
743 switch (type) {
744 case NFIT_SPA_DCR:
745 case NFIT_SPA_PM:
746 break;
747 default:
Dan Williams14999342017-04-13 19:46:36 -0700748 if (spa)
749 return 0;
Dan Williamsb94d5232015-05-19 22:54:31 -0400750 }
751
Dan Williams14999342017-04-13 19:46:36 -0700752 /*
753 * This loop runs in two modes, when a dimm is mapped the loop
754 * adds memdev associations to an existing dimm, or creates a
755 * dimm. In the unmapped dimm case this loop sweeps for memdev
756 * instances with an invalid / zero range_index and adds those
757 * dimms without spa associations.
758 */
Dan Williamsb94d5232015-05-19 22:54:31 -0400759 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
Dan Williamsad9ac5e2016-05-26 11:38:08 -0700760 struct nfit_flush *nfit_flush;
Dan Williams6697b2c2016-02-04 16:51:00 -0800761 struct nfit_dcr *nfit_dcr;
762 u32 device_handle;
763 u16 dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400764
Dan Williams14999342017-04-13 19:46:36 -0700765 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
766 continue;
767 if (!spa && nfit_memdev->memdev->range_index)
Dan Williamsb94d5232015-05-19 22:54:31 -0400768 continue;
769 found = NULL;
770 dcr = nfit_memdev->memdev->region_index;
Dan Williams6697b2c2016-02-04 16:51:00 -0800771 device_handle = nfit_memdev->memdev->device_handle;
Dan Williamsb94d5232015-05-19 22:54:31 -0400772 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
Dan Williams6697b2c2016-02-04 16:51:00 -0800773 if (__to_nfit_memdev(nfit_mem)->device_handle
774 == device_handle) {
Dan Williamsb94d5232015-05-19 22:54:31 -0400775 found = nfit_mem;
776 break;
777 }
778
779 if (found)
780 nfit_mem = found;
781 else {
782 nfit_mem = devm_kzalloc(acpi_desc->dev,
783 sizeof(*nfit_mem), GFP_KERNEL);
784 if (!nfit_mem)
785 return -ENOMEM;
786 INIT_LIST_HEAD(&nfit_mem->list);
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700787 nfit_mem->acpi_desc = acpi_desc;
Dan Williams6697b2c2016-02-04 16:51:00 -0800788 list_add(&nfit_mem->list, &acpi_desc->dimms);
789 }
790
791 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
792 if (nfit_dcr->dcr->region_index != dcr)
793 continue;
794 /*
795 * Record the control region for the dimm. For
796 * the ACPI 6.1 case, where there are separate
797 * control regions for the pmem vs blk
798 * interfaces, be sure to record the extended
799 * blk details.
800 */
801 if (!nfit_mem->dcr)
802 nfit_mem->dcr = nfit_dcr->dcr;
803 else if (nfit_mem->dcr->windows == 0
804 && nfit_dcr->dcr->windows)
805 nfit_mem->dcr = nfit_dcr->dcr;
806 break;
807 }
808
Dan Williamsad9ac5e2016-05-26 11:38:08 -0700809 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
Dan Williamse5ae3b22016-06-07 17:00:04 -0700810 struct acpi_nfit_flush_address *flush;
811 u16 i;
812
Dan Williamsad9ac5e2016-05-26 11:38:08 -0700813 if (nfit_flush->flush->device_handle != device_handle)
814 continue;
815 nfit_mem->nfit_flush = nfit_flush;
Dan Williamse5ae3b22016-06-07 17:00:04 -0700816 flush = nfit_flush->flush;
817 nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
818 flush->hint_count
819 * sizeof(struct resource), GFP_KERNEL);
820 if (!nfit_mem->flush_wpq)
821 return -ENOMEM;
822 for (i = 0; i < flush->hint_count; i++) {
823 struct resource *res = &nfit_mem->flush_wpq[i];
824
825 res->start = flush->hint_address[i];
826 res->end = res->start + 8 - 1;
827 }
Dan Williamsad9ac5e2016-05-26 11:38:08 -0700828 break;
829 }
830
Dan Williams6697b2c2016-02-04 16:51:00 -0800831 if (dcr && !nfit_mem->dcr) {
832 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
833 spa->range_index, dcr);
834 return -ENODEV;
Dan Williamsb94d5232015-05-19 22:54:31 -0400835 }
836
837 if (type == NFIT_SPA_DCR) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400838 struct nfit_idt *nfit_idt;
839 u16 idt_idx;
840
Dan Williamsb94d5232015-05-19 22:54:31 -0400841 /* multiple dimms may share a SPA when interleaved */
842 nfit_mem->spa_dcr = spa;
843 nfit_mem->memdev_dcr = nfit_memdev->memdev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400844 idt_idx = nfit_memdev->memdev->interleave_index;
845 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
846 if (nfit_idt->idt->interleave_index != idt_idx)
847 continue;
848 nfit_mem->idt_dcr = nfit_idt->idt;
849 break;
850 }
Dan Williams6697b2c2016-02-04 16:51:00 -0800851 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
Dan Williams14999342017-04-13 19:46:36 -0700852 } else if (type == NFIT_SPA_PM) {
Dan Williamsb94d5232015-05-19 22:54:31 -0400853 /*
854 * A single dimm may belong to multiple SPA-PM
855 * ranges, record at least one in addition to
856 * any SPA-DCR range.
857 */
858 nfit_mem->memdev_pmem = nfit_memdev->memdev;
Dan Williams14999342017-04-13 19:46:36 -0700859 } else
860 nfit_mem->memdev_dcr = nfit_memdev->memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400861 }
862
863 return 0;
864}
865
866static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
867{
868 struct nfit_mem *a = container_of(_a, typeof(*a), list);
869 struct nfit_mem *b = container_of(_b, typeof(*b), list);
870 u32 handleA, handleB;
871
872 handleA = __to_nfit_memdev(a)->device_handle;
873 handleB = __to_nfit_memdev(b)->device_handle;
874 if (handleA < handleB)
875 return -1;
876 else if (handleA > handleB)
877 return 1;
878 return 0;
879}
880
881static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
882{
883 struct nfit_spa *nfit_spa;
Dan Williams14999342017-04-13 19:46:36 -0700884 int rc;
885
Dan Williamsb94d5232015-05-19 22:54:31 -0400886
887 /*
888 * For each SPA-DCR or SPA-PMEM address range find its
889 * corresponding MEMDEV(s). From each MEMDEV find the
890 * corresponding DCR. Then, if we're operating on a SPA-DCR,
891 * try to find a SPA-BDW and a corresponding BDW that references
892 * the DCR. Throw it all into an nfit_mem object. Note, that
893 * BDWs are optional.
894 */
895 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
Dan Williams14999342017-04-13 19:46:36 -0700896 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400897 if (rc)
898 return rc;
899 }
900
Dan Williams14999342017-04-13 19:46:36 -0700901 /*
902 * If a DIMM has failed to be mapped into SPA there will be no
903 * SPA entries above. Find and register all the unmapped DIMMs
904 * for reporting and recovery purposes.
905 */
906 rc = __nfit_mem_init(acpi_desc, NULL);
907 if (rc)
908 return rc;
909
Dan Williamsb94d5232015-05-19 22:54:31 -0400910 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
911
912 return 0;
913}
914
Dan Williams45def222015-04-26 19:26:48 -0400915static ssize_t revision_show(struct device *dev,
916 struct device_attribute *attr, char *buf)
917{
918 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
919 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
920 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
921
Linda Knippers6b577c92015-11-20 19:05:49 -0500922 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
Dan Williams45def222015-04-26 19:26:48 -0400923}
924static DEVICE_ATTR_RO(revision);
925
Vishal Verma9ffd6352016-09-30 17:19:29 -0600926static ssize_t hw_error_scrub_show(struct device *dev,
927 struct device_attribute *attr, char *buf)
928{
929 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
930 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
931 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
932
933 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
934}
935
936/*
937 * The 'hw_error_scrub' attribute can have the following values written to it:
938 * '0': Switch to the default mode where an exception will only insert
939 * the address of the memory error into the poison and badblocks lists.
940 * '1': Enable a full scrub to happen if an exception for a memory error is
941 * received.
942 */
943static ssize_t hw_error_scrub_store(struct device *dev,
944 struct device_attribute *attr, const char *buf, size_t size)
945{
946 struct nvdimm_bus_descriptor *nd_desc;
947 ssize_t rc;
948 long val;
949
950 rc = kstrtol(buf, 0, &val);
951 if (rc)
952 return rc;
953
954 device_lock(dev);
955 nd_desc = dev_get_drvdata(dev);
956 if (nd_desc) {
957 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
958
959 switch (val) {
960 case HW_ERROR_SCRUB_ON:
961 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
962 break;
963 case HW_ERROR_SCRUB_OFF:
964 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
965 break;
966 default:
967 rc = -EINVAL;
968 break;
969 }
970 }
971 device_unlock(dev);
972 if (rc)
973 return rc;
974 return size;
975}
976static DEVICE_ATTR_RW(hw_error_scrub);
977
Vishal Verma37b137f2016-07-23 21:51:42 -0700978/*
979 * This shows the number of full Address Range Scrubs that have been
980 * completed since driver load time. Userspace can wait on this using
981 * select/poll etc. A '+' at the end indicates an ARS is in progress
982 */
983static ssize_t scrub_show(struct device *dev,
984 struct device_attribute *attr, char *buf)
985{
986 struct nvdimm_bus_descriptor *nd_desc;
987 ssize_t rc = -ENXIO;
988
989 device_lock(dev);
990 nd_desc = dev_get_drvdata(dev);
991 if (nd_desc) {
992 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
993
994 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
995 (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
996 }
997 device_unlock(dev);
998 return rc;
999}
1000
Vishal Verma37b137f2016-07-23 21:51:42 -07001001static ssize_t scrub_store(struct device *dev,
1002 struct device_attribute *attr, const char *buf, size_t size)
1003{
1004 struct nvdimm_bus_descriptor *nd_desc;
1005 ssize_t rc;
1006 long val;
1007
1008 rc = kstrtol(buf, 0, &val);
1009 if (rc)
1010 return rc;
1011 if (val != 1)
1012 return -EINVAL;
1013
1014 device_lock(dev);
1015 nd_desc = dev_get_drvdata(dev);
1016 if (nd_desc) {
1017 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1018
1019 rc = acpi_nfit_ars_rescan(acpi_desc);
1020 }
1021 device_unlock(dev);
1022 if (rc)
1023 return rc;
1024 return size;
1025}
1026static DEVICE_ATTR_RW(scrub);
1027
1028static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1029{
1030 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1031 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1032 | 1 << ND_CMD_ARS_STATUS;
1033
1034 return (nd_desc->cmd_mask & mask) == mask;
1035}
1036
1037static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1038{
1039 struct device *dev = container_of(kobj, struct device, kobj);
1040 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1041
1042 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1043 return 0;
1044 return a->mode;
1045}
1046
Dan Williams45def222015-04-26 19:26:48 -04001047static struct attribute *acpi_nfit_attributes[] = {
1048 &dev_attr_revision.attr,
Vishal Verma37b137f2016-07-23 21:51:42 -07001049 &dev_attr_scrub.attr,
Vishal Verma9ffd6352016-09-30 17:19:29 -06001050 &dev_attr_hw_error_scrub.attr,
Dan Williams45def222015-04-26 19:26:48 -04001051 NULL,
1052};
1053
1054static struct attribute_group acpi_nfit_attribute_group = {
1055 .name = "nfit",
1056 .attrs = acpi_nfit_attributes,
Vishal Verma37b137f2016-07-23 21:51:42 -07001057 .is_visible = nfit_visible,
Dan Williams45def222015-04-26 19:26:48 -04001058};
1059
Dan Williamsa61fe6f2016-02-19 12:29:32 -08001060static const struct attribute_group *acpi_nfit_attribute_groups[] = {
Dan Williams45def222015-04-26 19:26:48 -04001061 &nvdimm_bus_attribute_group,
1062 &acpi_nfit_attribute_group,
1063 NULL,
1064};
1065
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001066static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1067{
1068 struct nvdimm *nvdimm = to_nvdimm(dev);
1069 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1070
1071 return __to_nfit_memdev(nfit_mem);
1072}
1073
1074static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1075{
1076 struct nvdimm *nvdimm = to_nvdimm(dev);
1077 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1078
1079 return nfit_mem->dcr;
1080}
1081
1082static ssize_t handle_show(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1084{
1085 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1086
1087 return sprintf(buf, "%#x\n", memdev->device_handle);
1088}
1089static DEVICE_ATTR_RO(handle);
1090
1091static ssize_t phys_id_show(struct device *dev,
1092 struct device_attribute *attr, char *buf)
1093{
1094 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1095
1096 return sprintf(buf, "%#x\n", memdev->physical_id);
1097}
1098static DEVICE_ATTR_RO(phys_id);
1099
1100static ssize_t vendor_show(struct device *dev,
1101 struct device_attribute *attr, char *buf)
1102{
1103 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1104
Toshi Kani5ad9a7f2016-04-25 15:34:58 -06001105 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001106}
1107static DEVICE_ATTR_RO(vendor);
1108
1109static ssize_t rev_id_show(struct device *dev,
1110 struct device_attribute *attr, char *buf)
1111{
1112 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1113
Toshi Kani5ad9a7f2016-04-25 15:34:58 -06001114 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001115}
1116static DEVICE_ATTR_RO(rev_id);
1117
1118static ssize_t device_show(struct device *dev,
1119 struct device_attribute *attr, char *buf)
1120{
1121 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1122
Toshi Kani5ad9a7f2016-04-25 15:34:58 -06001123 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001124}
1125static DEVICE_ATTR_RO(device);
1126
Dan Williams6ca72082016-04-29 10:33:23 -07001127static ssize_t subsystem_vendor_show(struct device *dev,
1128 struct device_attribute *attr, char *buf)
1129{
1130 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1131
1132 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1133}
1134static DEVICE_ATTR_RO(subsystem_vendor);
1135
1136static ssize_t subsystem_rev_id_show(struct device *dev,
1137 struct device_attribute *attr, char *buf)
1138{
1139 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1140
1141 return sprintf(buf, "0x%04x\n",
1142 be16_to_cpu(dcr->subsystem_revision_id));
1143}
1144static DEVICE_ATTR_RO(subsystem_rev_id);
1145
1146static ssize_t subsystem_device_show(struct device *dev,
1147 struct device_attribute *attr, char *buf)
1148{
1149 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1150
1151 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1152}
1153static DEVICE_ATTR_RO(subsystem_device);
1154
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001155static int num_nvdimm_formats(struct nvdimm *nvdimm)
1156{
1157 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1158 int formats = 0;
1159
1160 if (nfit_mem->memdev_pmem)
1161 formats++;
1162 if (nfit_mem->memdev_bdw)
1163 formats++;
1164 return formats;
1165}
1166
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001167static ssize_t format_show(struct device *dev,
1168 struct device_attribute *attr, char *buf)
1169{
1170 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1171
Dan Williams1bcbf422016-06-29 11:19:32 -07001172 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001173}
1174static DEVICE_ATTR_RO(format);
1175
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001176static ssize_t format1_show(struct device *dev,
1177 struct device_attribute *attr, char *buf)
1178{
1179 u32 handle;
1180 ssize_t rc = -ENXIO;
1181 struct nfit_mem *nfit_mem;
1182 struct nfit_memdev *nfit_memdev;
1183 struct acpi_nfit_desc *acpi_desc;
1184 struct nvdimm *nvdimm = to_nvdimm(dev);
1185 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1186
1187 nfit_mem = nvdimm_provider_data(nvdimm);
1188 acpi_desc = nfit_mem->acpi_desc;
1189 handle = to_nfit_memdev(dev)->device_handle;
1190
1191 /* assumes DIMMs have at most 2 published interface codes */
1192 mutex_lock(&acpi_desc->init_mutex);
1193 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1194 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1195 struct nfit_dcr *nfit_dcr;
1196
1197 if (memdev->device_handle != handle)
1198 continue;
1199
1200 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1201 if (nfit_dcr->dcr->region_index != memdev->region_index)
1202 continue;
1203 if (nfit_dcr->dcr->code == dcr->code)
1204 continue;
Dan Williams1bcbf422016-06-29 11:19:32 -07001205 rc = sprintf(buf, "0x%04x\n",
1206 le16_to_cpu(nfit_dcr->dcr->code));
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001207 break;
1208 }
1209 if (rc != ENXIO)
1210 break;
1211 }
1212 mutex_unlock(&acpi_desc->init_mutex);
1213 return rc;
1214}
1215static DEVICE_ATTR_RO(format1);
1216
1217static ssize_t formats_show(struct device *dev,
1218 struct device_attribute *attr, char *buf)
1219{
1220 struct nvdimm *nvdimm = to_nvdimm(dev);
1221
1222 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1223}
1224static DEVICE_ATTR_RO(formats);
1225
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001226static ssize_t serial_show(struct device *dev,
1227 struct device_attribute *attr, char *buf)
1228{
1229 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1230
Toshi Kani5ad9a7f2016-04-25 15:34:58 -06001231 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001232}
1233static DEVICE_ATTR_RO(serial);
1234
Dan Williamsa94e3fb2016-04-28 18:18:05 -07001235static ssize_t family_show(struct device *dev,
1236 struct device_attribute *attr, char *buf)
1237{
1238 struct nvdimm *nvdimm = to_nvdimm(dev);
1239 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1240
1241 if (nfit_mem->family < 0)
1242 return -ENXIO;
1243 return sprintf(buf, "%d\n", nfit_mem->family);
1244}
1245static DEVICE_ATTR_RO(family);
1246
1247static ssize_t dsm_mask_show(struct device *dev,
1248 struct device_attribute *attr, char *buf)
1249{
1250 struct nvdimm *nvdimm = to_nvdimm(dev);
1251 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1252
1253 if (nfit_mem->family < 0)
1254 return -ENXIO;
1255 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1256}
1257static DEVICE_ATTR_RO(dsm_mask);
1258
Dan Williams58138822015-06-23 20:08:34 -04001259static ssize_t flags_show(struct device *dev,
1260 struct device_attribute *attr, char *buf)
1261{
1262 u16 flags = to_nfit_memdev(dev)->flags;
1263
Dan Williamsffab9382017-04-13 15:05:30 -07001264 return sprintf(buf, "%s%s%s%s%s%s%s\n",
Toshi Kani402bae52015-08-26 10:20:23 -06001265 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1266 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1267 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
Bob Mooreca321d12015-10-19 10:24:52 +08001268 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
Dan Williamsffab9382017-04-13 15:05:30 -07001269 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1270 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1271 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
Dan Williams58138822015-06-23 20:08:34 -04001272}
1273static DEVICE_ATTR_RO(flags);
1274
Toshi Kani38a879b2016-04-25 15:34:59 -06001275static ssize_t id_show(struct device *dev,
1276 struct device_attribute *attr, char *buf)
1277{
1278 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1279
1280 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1281 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1282 be16_to_cpu(dcr->vendor_id),
1283 dcr->manufacturing_location,
1284 be16_to_cpu(dcr->manufacturing_date),
1285 be32_to_cpu(dcr->serial_number));
1286 else
1287 return sprintf(buf, "%04x-%08x\n",
1288 be16_to_cpu(dcr->vendor_id),
1289 be32_to_cpu(dcr->serial_number));
1290}
1291static DEVICE_ATTR_RO(id);
1292
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001293static struct attribute *acpi_nfit_dimm_attributes[] = {
1294 &dev_attr_handle.attr,
1295 &dev_attr_phys_id.attr,
1296 &dev_attr_vendor.attr,
1297 &dev_attr_device.attr,
Dan Williams6ca72082016-04-29 10:33:23 -07001298 &dev_attr_rev_id.attr,
1299 &dev_attr_subsystem_vendor.attr,
1300 &dev_attr_subsystem_device.attr,
1301 &dev_attr_subsystem_rev_id.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001302 &dev_attr_format.attr,
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001303 &dev_attr_formats.attr,
1304 &dev_attr_format1.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001305 &dev_attr_serial.attr,
Dan Williams58138822015-06-23 20:08:34 -04001306 &dev_attr_flags.attr,
Toshi Kani38a879b2016-04-25 15:34:59 -06001307 &dev_attr_id.attr,
Dan Williamsa94e3fb2016-04-28 18:18:05 -07001308 &dev_attr_family.attr,
1309 &dev_attr_dsm_mask.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001310 NULL,
1311};
1312
1313static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1314 struct attribute *a, int n)
1315{
1316 struct device *dev = container_of(kobj, struct device, kobj);
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001317 struct nvdimm *nvdimm = to_nvdimm(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001318
Dan Williams14999342017-04-13 19:46:36 -07001319 if (!to_nfit_dcr(dev)) {
1320 /* Without a dcr only the memdev attributes can be surfaced */
1321 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1322 || a == &dev_attr_flags.attr
1323 || a == &dev_attr_family.attr
1324 || a == &dev_attr_dsm_mask.attr)
1325 return a->mode;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001326 return 0;
Dan Williams14999342017-04-13 19:46:36 -07001327 }
1328
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001329 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1330 return 0;
1331 return a->mode;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001332}
1333
1334static struct attribute_group acpi_nfit_dimm_attribute_group = {
1335 .name = "nfit",
1336 .attrs = acpi_nfit_dimm_attributes,
1337 .is_visible = acpi_nfit_dimm_attr_visible,
1338};
1339
1340static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
Dan Williams62232e452015-06-08 14:27:06 -04001341 &nvdimm_attribute_group,
Dan Williams4d88a972015-05-31 14:41:48 -04001342 &nd_device_attribute_group,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001343 &acpi_nfit_dimm_attribute_group,
1344 NULL,
1345};
1346
1347static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1348 u32 device_handle)
1349{
1350 struct nfit_mem *nfit_mem;
1351
1352 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1353 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1354 return nfit_mem->nvdimm;
1355
1356 return NULL;
1357}
1358
Dan Williams231bf112016-08-22 19:23:25 -07001359void __acpi_nvdimm_notify(struct device *dev, u32 event)
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001360{
1361 struct nfit_mem *nfit_mem;
1362 struct acpi_nfit_desc *acpi_desc;
1363
1364 dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
1365 event);
1366
1367 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1368 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1369 event);
1370 return;
1371 }
1372
1373 acpi_desc = dev_get_drvdata(dev->parent);
1374 if (!acpi_desc)
1375 return;
1376
1377 /*
1378 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1379 * is still valid.
1380 */
1381 nfit_mem = dev_get_drvdata(dev);
1382 if (nfit_mem && nfit_mem->flags_attr)
1383 sysfs_notify_dirent(nfit_mem->flags_attr);
1384}
Dan Williams231bf112016-08-22 19:23:25 -07001385EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001386
1387static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1388{
1389 struct acpi_device *adev = data;
1390 struct device *dev = &adev->dev;
1391
1392 device_lock(dev->parent);
1393 __acpi_nvdimm_notify(dev, event);
1394 device_unlock(dev->parent);
1395}
1396
Dan Williams62232e452015-06-08 14:27:06 -04001397static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1398 struct nfit_mem *nfit_mem, u32 device_handle)
1399{
1400 struct acpi_device *adev, *adev_dimm;
1401 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -07001402 unsigned long dsm_mask;
1403 const u8 *uuid;
Linda Knippers60e95f42015-07-22 16:17:22 -04001404 int i;
Linda Knippersba650cf2017-03-07 16:35:13 -05001405 int family = -1;
Dan Williams62232e452015-06-08 14:27:06 -04001406
Dan Williamse3654ec2016-04-28 16:17:07 -07001407 /* nfit test assumes 1:1 relationship between commands and dsms */
1408 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
Dan Williams31eca762016-04-28 16:23:43 -07001409 nfit_mem->family = NVDIMM_FAMILY_INTEL;
Dan Williams62232e452015-06-08 14:27:06 -04001410 adev = to_acpi_dev(acpi_desc);
1411 if (!adev)
1412 return 0;
1413
1414 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1415 nfit_mem->adev = adev_dimm;
1416 if (!adev_dimm) {
1417 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1418 device_handle);
Dan Williams4d88a972015-05-31 14:41:48 -04001419 return force_enable_dimms ? 0 : -ENODEV;
Dan Williams62232e452015-06-08 14:27:06 -04001420 }
1421
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001422 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1423 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1424 dev_err(dev, "%s: notification registration failed\n",
1425 dev_name(&adev_dimm->dev));
1426 return -ENXIO;
1427 }
1428
Dan Williams31eca762016-04-28 16:23:43 -07001429 /*
stuart hayese02fb722016-05-26 11:38:41 -05001430 * Until standardization materializes we need to consider 4
Dan Williamsa7225592016-07-19 12:32:39 -07001431 * different command sets. Note, that checking for function0 (bit0)
1432 * tells us if any commands are reachable through this uuid.
Dan Williams31eca762016-04-28 16:23:43 -07001433 */
stuart hayese02fb722016-05-26 11:38:41 -05001434 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
Dan Williamsa7225592016-07-19 12:32:39 -07001435 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
Linda Knippersba650cf2017-03-07 16:35:13 -05001436 if (family < 0 || i == default_dsm_family)
1437 family = i;
Dan Williams31eca762016-04-28 16:23:43 -07001438
1439 /* limit the supported commands to those that are publicly documented */
Linda Knippersba650cf2017-03-07 16:35:13 -05001440 nfit_mem->family = family;
Linda Knippers095ab4b2017-03-07 16:35:12 -05001441 if (override_dsm_mask && !disable_vendor_specific)
1442 dsm_mask = override_dsm_mask;
1443 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
Dan Williams31eca762016-04-28 16:23:43 -07001444 dsm_mask = 0x3fe;
Dan Williams87554092016-04-28 18:01:20 -07001445 if (disable_vendor_specific)
1446 dsm_mask &= ~(1 << ND_CMD_VENDOR);
stuart hayese02fb722016-05-26 11:38:41 -05001447 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
Dan Williams31eca762016-04-28 16:23:43 -07001448 dsm_mask = 0x1c3c76;
stuart hayese02fb722016-05-26 11:38:41 -05001449 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
Dan Williams31eca762016-04-28 16:23:43 -07001450 dsm_mask = 0x1fe;
Dan Williams87554092016-04-28 18:01:20 -07001451 if (disable_vendor_specific)
1452 dsm_mask &= ~(1 << 8);
stuart hayese02fb722016-05-26 11:38:41 -05001453 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1454 dsm_mask = 0xffffffff;
Dan Williams87554092016-04-28 18:01:20 -07001455 } else {
Dan Williamsa7225592016-07-19 12:32:39 -07001456 dev_dbg(dev, "unknown dimm command family\n");
Dan Williams31eca762016-04-28 16:23:43 -07001457 nfit_mem->family = -1;
Dan Williamsa7225592016-07-19 12:32:39 -07001458 /* DSMs are optional, continue loading the driver... */
1459 return 0;
Dan Williams31eca762016-04-28 16:23:43 -07001460 }
1461
1462 uuid = to_nfit_uuid(nfit_mem->family);
1463 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -04001464 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1465 set_bit(i, &nfit_mem->dsm_mask);
1466
Linda Knippers60e95f42015-07-22 16:17:22 -04001467 return 0;
Dan Williams62232e452015-06-08 14:27:06 -04001468}
1469
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001470static void shutdown_dimm_notify(void *data)
1471{
1472 struct acpi_nfit_desc *acpi_desc = data;
1473 struct nfit_mem *nfit_mem;
1474
1475 mutex_lock(&acpi_desc->init_mutex);
1476 /*
1477 * Clear out the nfit_mem->flags_attr and shut down dimm event
1478 * notifications.
1479 */
1480 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
Dan Williams231bf112016-08-22 19:23:25 -07001481 struct acpi_device *adev_dimm = nfit_mem->adev;
1482
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001483 if (nfit_mem->flags_attr) {
1484 sysfs_put(nfit_mem->flags_attr);
1485 nfit_mem->flags_attr = NULL;
1486 }
Dan Williams231bf112016-08-22 19:23:25 -07001487 if (adev_dimm)
1488 acpi_remove_notify_handler(adev_dimm->handle,
1489 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001490 }
1491 mutex_unlock(&acpi_desc->init_mutex);
1492}
1493
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001494static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1495{
1496 struct nfit_mem *nfit_mem;
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001497 int dimm_count = 0, rc;
1498 struct nvdimm *nvdimm;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001499
1500 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
Dan Williamse5ae3b22016-06-07 17:00:04 -07001501 struct acpi_nfit_flush_address *flush;
Dan Williams31eca762016-04-28 16:23:43 -07001502 unsigned long flags = 0, cmd_mask;
Dan Williamscaa603a2017-04-14 10:27:11 -07001503 struct nfit_memdev *nfit_memdev;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001504 u32 device_handle;
Dan Williams58138822015-06-23 20:08:34 -04001505 u16 mem_flags;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001506
1507 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1508 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1509 if (nvdimm) {
Vishal Verma20985162015-10-27 16:58:27 -06001510 dimm_count++;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001511 continue;
1512 }
1513
1514 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
Dan Williams8f078b32017-05-04 14:01:24 -07001515 set_bit(NDD_ALIASING, &flags);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001516
Dan Williamscaa603a2017-04-14 10:27:11 -07001517 /* collate flags across all memdevs for this dimm */
1518 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1519 struct acpi_nfit_memory_map *dimm_memdev;
1520
1521 dimm_memdev = __to_nfit_memdev(nfit_mem);
1522 if (dimm_memdev->device_handle
1523 != nfit_memdev->memdev->device_handle)
1524 continue;
1525 dimm_memdev->flags |= nfit_memdev->memdev->flags;
1526 }
1527
Dan Williams58138822015-06-23 20:08:34 -04001528 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
Bob Mooreca321d12015-10-19 10:24:52 +08001529 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
Dan Williams8f078b32017-05-04 14:01:24 -07001530 set_bit(NDD_UNARMED, &flags);
Dan Williams58138822015-06-23 20:08:34 -04001531
Dan Williams62232e452015-06-08 14:27:06 -04001532 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1533 if (rc)
1534 continue;
1535
Dan Williamse3654ec2016-04-28 16:17:07 -07001536 /*
Dan Williams31eca762016-04-28 16:23:43 -07001537 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1538 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1539 * userspace interface.
Dan Williamse3654ec2016-04-28 16:17:07 -07001540 */
Dan Williams31eca762016-04-28 16:23:43 -07001541 cmd_mask = 1UL << ND_CMD_CALL;
1542 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1543 cmd_mask |= nfit_mem->dsm_mask;
1544
Dan Williamse5ae3b22016-06-07 17:00:04 -07001545 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
1546 : NULL;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001547 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
Dan Williams62232e452015-06-08 14:27:06 -04001548 acpi_nfit_dimm_attribute_groups,
Dan Williamse5ae3b22016-06-07 17:00:04 -07001549 flags, cmd_mask, flush ? flush->hint_count : 0,
1550 nfit_mem->flush_wpq);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001551 if (!nvdimm)
1552 return -ENOMEM;
1553
1554 nfit_mem->nvdimm = nvdimm;
Dan Williams4d88a972015-05-31 14:41:48 -04001555 dimm_count++;
Dan Williams58138822015-06-23 20:08:34 -04001556
1557 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1558 continue;
1559
Dan Williams14999342017-04-13 19:46:36 -07001560 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
Dan Williams58138822015-06-23 20:08:34 -04001561 nvdimm_name(nvdimm),
Toshi Kani402bae52015-08-26 10:20:23 -06001562 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1563 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1564 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
Dan Williams14999342017-04-13 19:46:36 -07001565 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
1566 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
Dan Williams58138822015-06-23 20:08:34 -04001567
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001568 }
1569
Dan Williamsba9c8dd2016-08-22 19:28:37 -07001570 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
1571 if (rc)
1572 return rc;
1573
1574 /*
1575 * Now that dimms are successfully registered, and async registration
1576 * is flushed, attempt to enable event notification.
1577 */
1578 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1579 struct kernfs_node *nfit_kernfs;
1580
1581 nvdimm = nfit_mem->nvdimm;
1582 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
1583 if (nfit_kernfs)
1584 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
1585 "flags");
1586 sysfs_put(nfit_kernfs);
1587 if (!nfit_mem->flags_attr)
1588 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
1589 nvdimm_name(nvdimm));
1590 }
1591
1592 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
1593 acpi_desc);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001594}
1595
Dan Williams62232e452015-06-08 14:27:06 -04001596static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1597{
1598 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1599 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1600 struct acpi_device *adev;
1601 int i;
1602
Dan Williamse3654ec2016-04-28 16:17:07 -07001603 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
Dan Williams62232e452015-06-08 14:27:06 -04001604 adev = to_acpi_dev(acpi_desc);
1605 if (!adev)
1606 return;
1607
Dan Williamsd4f32362016-03-03 16:08:54 -08001608 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
Dan Williams62232e452015-06-08 14:27:06 -04001609 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
Dan Williamse3654ec2016-04-28 16:17:07 -07001610 set_bit(i, &nd_desc->cmd_mask);
Dan Williams62232e452015-06-08 14:27:06 -04001611}
1612
Dan Williams1f7df6f2015-06-09 20:13:14 -04001613static ssize_t range_index_show(struct device *dev,
1614 struct device_attribute *attr, char *buf)
1615{
1616 struct nd_region *nd_region = to_nd_region(dev);
1617 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1618
1619 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1620}
1621static DEVICE_ATTR_RO(range_index);
1622
1623static struct attribute *acpi_nfit_region_attributes[] = {
1624 &dev_attr_range_index.attr,
1625 NULL,
1626};
1627
1628static struct attribute_group acpi_nfit_region_attribute_group = {
1629 .name = "nfit",
1630 .attrs = acpi_nfit_region_attributes,
1631};
1632
1633static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1634 &nd_region_attribute_group,
1635 &nd_mapping_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001636 &nd_device_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001637 &nd_numa_attribute_group,
Dan Williams1f7df6f2015-06-09 20:13:14 -04001638 &acpi_nfit_region_attribute_group,
1639 NULL,
1640};
1641
Dan Williamseaf96152015-05-01 13:11:27 -04001642/* enough info to uniquely specify an interleave set */
1643struct nfit_set_info {
1644 struct nfit_set_info_map {
1645 u64 region_offset;
1646 u32 serial_number;
1647 u32 pad;
1648 } mapping[0];
1649};
1650
1651static size_t sizeof_nfit_set_info(int num_mappings)
1652{
1653 return sizeof(struct nfit_set_info)
1654 + num_mappings * sizeof(struct nfit_set_info_map);
1655}
1656
Dan Williams86ef58a2017-02-28 18:32:48 -08001657static int cmp_map_compat(const void *m0, const void *m1)
Dan Williamseaf96152015-05-01 13:11:27 -04001658{
1659 const struct nfit_set_info_map *map0 = m0;
1660 const struct nfit_set_info_map *map1 = m1;
1661
1662 return memcmp(&map0->region_offset, &map1->region_offset,
1663 sizeof(u64));
1664}
1665
Dan Williams86ef58a2017-02-28 18:32:48 -08001666static int cmp_map(const void *m0, const void *m1)
1667{
1668 const struct nfit_set_info_map *map0 = m0;
1669 const struct nfit_set_info_map *map1 = m1;
1670
Dan Williamsb03b99a2017-03-27 21:53:38 -07001671 if (map0->region_offset < map1->region_offset)
1672 return -1;
1673 else if (map0->region_offset > map1->region_offset)
1674 return 1;
1675 return 0;
Dan Williams86ef58a2017-02-28 18:32:48 -08001676}
1677
Dan Williamseaf96152015-05-01 13:11:27 -04001678/* Retrieve the nth entry referencing this spa */
1679static struct acpi_nfit_memory_map *memdev_from_spa(
1680 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1681{
1682 struct nfit_memdev *nfit_memdev;
1683
1684 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1685 if (nfit_memdev->memdev->range_index == range_index)
1686 if (n-- == 0)
1687 return nfit_memdev->memdev;
1688 return NULL;
1689}
1690
1691static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1692 struct nd_region_desc *ndr_desc,
1693 struct acpi_nfit_system_address *spa)
1694{
1695 int i, spa_type = nfit_spa_type(spa);
1696 struct device *dev = acpi_desc->dev;
1697 struct nd_interleave_set *nd_set;
1698 u16 nr = ndr_desc->num_mappings;
1699 struct nfit_set_info *info;
1700
1701 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1702 /* pass */;
1703 else
1704 return 0;
1705
1706 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1707 if (!nd_set)
1708 return -ENOMEM;
1709
1710 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1711 if (!info)
1712 return -ENOMEM;
1713 for (i = 0; i < nr; i++) {
Dan Williams44c462e2016-09-19 16:38:50 -07001714 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
Dan Williamseaf96152015-05-01 13:11:27 -04001715 struct nfit_set_info_map *map = &info->mapping[i];
Dan Williams44c462e2016-09-19 16:38:50 -07001716 struct nvdimm *nvdimm = mapping->nvdimm;
Dan Williamseaf96152015-05-01 13:11:27 -04001717 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1718 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1719 spa->range_index, i);
1720
1721 if (!memdev || !nfit_mem->dcr) {
1722 dev_err(dev, "%s: failed to find DCR\n", __func__);
1723 return -ENODEV;
1724 }
1725
1726 map->region_offset = memdev->region_offset;
1727 map->serial_number = nfit_mem->dcr->serial_number;
1728 }
1729
1730 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1731 cmp_map, NULL);
1732 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
Dan Williams86ef58a2017-02-28 18:32:48 -08001733
1734 /* support namespaces created with the wrong sort order */
1735 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1736 cmp_map_compat, NULL);
1737 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1738
Dan Williamseaf96152015-05-01 13:11:27 -04001739 ndr_desc->nd_set = nd_set;
1740 devm_kfree(dev, info);
1741
1742 return 0;
1743}
1744
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001745static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1746{
1747 struct acpi_nfit_interleave *idt = mmio->idt;
1748 u32 sub_line_offset, line_index, line_offset;
1749 u64 line_no, table_skip_count, table_offset;
1750
1751 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1752 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1753 line_offset = idt->line_offset[line_index]
1754 * mmio->line_size;
1755 table_offset = table_skip_count * mmio->table_size;
1756
1757 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1758}
1759
Ross Zwislerde4a1962015-08-20 16:27:38 -06001760static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001761{
1762 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1763 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
Ross Zwisler68202c92016-07-29 14:59:12 -06001764 const u32 STATUS_MASK = 0x80000037;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001765
1766 if (mmio->num_lines)
1767 offset = to_interleave_offset(offset, mmio);
1768
Ross Zwisler68202c92016-07-29 14:59:12 -06001769 return readl(mmio->addr.base + offset) & STATUS_MASK;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001770}
1771
1772static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1773 resource_size_t dpa, unsigned int len, unsigned int write)
1774{
1775 u64 cmd, offset;
1776 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1777
1778 enum {
1779 BCW_OFFSET_MASK = (1ULL << 48)-1,
1780 BCW_LEN_SHIFT = 48,
1781 BCW_LEN_MASK = (1ULL << 8) - 1,
1782 BCW_CMD_SHIFT = 56,
1783 };
1784
1785 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1786 len = len >> L1_CACHE_SHIFT;
1787 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1788 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1789
1790 offset = nfit_blk->cmd_offset + mmio->size * bw;
1791 if (mmio->num_lines)
1792 offset = to_interleave_offset(offset, mmio);
1793
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001794 writeq(cmd, mmio->addr.base + offset);
Dan Williamsf284a4f2016-07-07 19:44:50 -07001795 nvdimm_flush(nfit_blk->nd_region);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001796
Dan Williamsaef25332016-02-12 17:01:11 -08001797 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001798 readq(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001799}
1800
1801static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1802 resource_size_t dpa, void *iobuf, size_t len, int rw,
1803 unsigned int lane)
1804{
1805 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1806 unsigned int copied = 0;
1807 u64 base_offset;
1808 int rc;
1809
1810 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1811 + lane * mmio->size;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001812 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1813 while (len) {
1814 unsigned int c;
1815 u64 offset;
1816
1817 if (mmio->num_lines) {
1818 u32 line_offset;
1819
1820 offset = to_interleave_offset(base_offset + copied,
1821 mmio);
1822 div_u64_rem(offset, mmio->line_size, &line_offset);
1823 c = min_t(size_t, len, mmio->line_size - line_offset);
1824 } else {
1825 offset = base_offset + nfit_blk->bdw_offset;
1826 c = len;
1827 }
1828
1829 if (rw)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001830 memcpy_to_pmem(mmio->addr.aperture + offset,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001831 iobuf + copied, c);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001832 else {
Dan Williamsaef25332016-02-12 17:01:11 -08001833 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001834 mmio_flush_range((void __force *)
1835 mmio->addr.aperture + offset, c);
1836
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001837 memcpy_from_pmem(iobuf + copied,
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001838 mmio->addr.aperture + offset, c);
1839 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001840
1841 copied += c;
1842 len -= c;
1843 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001844
1845 if (rw)
Dan Williamsf284a4f2016-07-07 19:44:50 -07001846 nvdimm_flush(nfit_blk->nd_region);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001847
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001848 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1849 return rc;
1850}
1851
1852static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1853 resource_size_t dpa, void *iobuf, u64 len, int rw)
1854{
1855 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1856 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1857 struct nd_region *nd_region = nfit_blk->nd_region;
1858 unsigned int lane, copied = 0;
1859 int rc = 0;
1860
1861 lane = nd_region_acquire_lane(nd_region);
1862 while (len) {
1863 u64 c = min(len, mmio->size);
1864
1865 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1866 iobuf + copied, c, rw, lane);
1867 if (rc)
1868 break;
1869
1870 copied += c;
1871 len -= c;
1872 }
1873 nd_region_release_lane(nd_region, lane);
1874
1875 return rc;
1876}
1877
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001878static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1879 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1880{
1881 if (idt) {
1882 mmio->num_lines = idt->line_count;
1883 mmio->line_size = idt->line_size;
1884 if (interleave_ways == 0)
1885 return -ENXIO;
1886 mmio->table_size = mmio->num_lines * interleave_ways
1887 * mmio->line_size;
1888 }
1889
1890 return 0;
1891}
1892
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001893static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1894 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1895{
1896 struct nd_cmd_dimm_flags flags;
1897 int rc;
1898
1899 memset(&flags, 0, sizeof(flags));
1900 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
Dan Williamsaef25332016-02-12 17:01:11 -08001901 sizeof(flags), NULL);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001902
1903 if (rc >= 0 && flags.status == 0)
1904 nfit_blk->dimm_flags = flags.flags;
1905 else if (rc == -ENOTTY) {
1906 /* fall back to a conservative default */
Dan Williamsaef25332016-02-12 17:01:11 -08001907 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001908 rc = 0;
1909 } else
1910 rc = -ENXIO;
1911
1912 return rc;
1913}
1914
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001915static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1916 struct device *dev)
1917{
1918 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001919 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1920 struct nfit_blk_mmio *mmio;
1921 struct nfit_blk *nfit_blk;
1922 struct nfit_mem *nfit_mem;
1923 struct nvdimm *nvdimm;
1924 int rc;
1925
1926 nvdimm = nd_blk_region_to_dimm(ndbr);
1927 nfit_mem = nvdimm_provider_data(nvdimm);
1928 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1929 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1930 nfit_mem ? "" : " nfit_mem",
Dan Williams193ccca2015-06-30 16:09:39 -04001931 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1932 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001933 return -ENXIO;
1934 }
1935
1936 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1937 if (!nfit_blk)
1938 return -ENOMEM;
1939 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1940 nfit_blk->nd_region = to_nd_region(dev);
1941
1942 /* map block aperture memory */
1943 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1944 mmio = &nfit_blk->mmio[BDW];
Dan Williams29b9aa02016-06-06 17:42:38 -07001945 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
1946 nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001947 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001948 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1949 nvdimm_name(nvdimm));
1950 return -ENOMEM;
1951 }
1952 mmio->size = nfit_mem->bdw->size;
1953 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1954 mmio->idt = nfit_mem->idt_bdw;
1955 mmio->spa = nfit_mem->spa_bdw;
1956 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1957 nfit_mem->memdev_bdw->interleave_ways);
1958 if (rc) {
1959 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1960 __func__, nvdimm_name(nvdimm));
1961 return rc;
1962 }
1963
1964 /* map block control memory */
1965 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1966 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1967 mmio = &nfit_blk->mmio[DCR];
Dan Williams29b9aa02016-06-06 17:42:38 -07001968 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
1969 nfit_mem->spa_dcr->length);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001970 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001971 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1972 nvdimm_name(nvdimm));
1973 return -ENOMEM;
1974 }
1975 mmio->size = nfit_mem->dcr->window_size;
1976 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1977 mmio->idt = nfit_mem->idt_dcr;
1978 mmio->spa = nfit_mem->spa_dcr;
1979 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1980 nfit_mem->memdev_dcr->interleave_ways);
1981 if (rc) {
1982 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1983 __func__, nvdimm_name(nvdimm));
1984 return rc;
1985 }
1986
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001987 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1988 if (rc < 0) {
1989 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1990 __func__, nvdimm_name(nvdimm));
1991 return rc;
1992 }
1993
Dan Williamsf284a4f2016-07-07 19:44:50 -07001994 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001995 dev_warn(dev, "unable to guarantee persistence of writes\n");
1996
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001997 if (mmio->line_size == 0)
1998 return 0;
1999
2000 if ((u32) nfit_blk->cmd_offset % mmio->line_size
2001 + 8 > mmio->line_size) {
2002 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2003 return -ENXIO;
2004 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2005 + 8 > mmio->line_size) {
2006 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2007 return -ENXIO;
2008 }
2009
2010 return 0;
2011}
2012
Dan Williamsaef25332016-02-12 17:01:11 -08002013static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08002014 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07002015{
Dan Williamsaef25332016-02-12 17:01:11 -08002016 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Dan Williams1cf03c02016-02-17 13:01:23 -08002017 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Dan Williamsaef25332016-02-12 17:01:11 -08002018 int cmd_rc, rc;
2019
Dan Williams1cf03c02016-02-17 13:01:23 -08002020 cmd->address = spa->address;
2021 cmd->length = spa->length;
Dan Williamsaef25332016-02-12 17:01:11 -08002022 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2023 sizeof(*cmd), &cmd_rc);
2024 if (rc < 0)
2025 return rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08002026 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07002027}
2028
Dan Williams1cf03c02016-02-17 13:01:23 -08002029static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07002030{
2031 int rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08002032 int cmd_rc;
2033 struct nd_cmd_ars_start ars_start;
2034 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2035 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Vishal Verma0caeef62015-12-24 19:21:43 -07002036
Dan Williams1cf03c02016-02-17 13:01:23 -08002037 memset(&ars_start, 0, sizeof(ars_start));
2038 ars_start.address = spa->address;
2039 ars_start.length = spa->length;
2040 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2041 ars_start.type = ND_ARS_PERSISTENT;
2042 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2043 ars_start.type = ND_ARS_VOLATILE;
2044 else
2045 return -ENOTTY;
Vishal Verma0caeef62015-12-24 19:21:43 -07002046
Dan Williams1cf03c02016-02-17 13:01:23 -08002047 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2048 sizeof(ars_start), &cmd_rc);
Dan Williamsaef25332016-02-12 17:01:11 -08002049
Dan Williams1cf03c02016-02-17 13:01:23 -08002050 if (rc < 0)
2051 return rc;
2052 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07002053}
2054
Dan Williams1cf03c02016-02-17 13:01:23 -08002055static int ars_continue(struct acpi_nfit_desc *acpi_desc)
Vishal Verma0caeef62015-12-24 19:21:43 -07002056{
Dan Williamsaef25332016-02-12 17:01:11 -08002057 int rc, cmd_rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08002058 struct nd_cmd_ars_start ars_start;
2059 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2060 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
Vishal Verma0caeef62015-12-24 19:21:43 -07002061
Dan Williams1cf03c02016-02-17 13:01:23 -08002062 memset(&ars_start, 0, sizeof(ars_start));
2063 ars_start.address = ars_status->restart_address;
2064 ars_start.length = ars_status->restart_length;
2065 ars_start.type = ars_status->type;
2066 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2067 sizeof(ars_start), &cmd_rc);
2068 if (rc < 0)
2069 return rc;
2070 return cmd_rc;
2071}
Dan Williamsaef25332016-02-12 17:01:11 -08002072
Dan Williams1cf03c02016-02-17 13:01:23 -08002073static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2074{
2075 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2076 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2077 int rc, cmd_rc;
Dan Williamsaef25332016-02-12 17:01:11 -08002078
Dan Williams1cf03c02016-02-17 13:01:23 -08002079 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2080 acpi_desc->ars_status_size, &cmd_rc);
2081 if (rc < 0)
2082 return rc;
2083 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07002084}
2085
Dan Williams82aa37c2016-12-06 12:45:24 -08002086static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08002087 struct nd_cmd_ars_status *ars_status)
Vishal Verma0caeef62015-12-24 19:21:43 -07002088{
Dan Williams82aa37c2016-12-06 12:45:24 -08002089 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
Vishal Verma0caeef62015-12-24 19:21:43 -07002090 int rc;
2091 u32 i;
2092
Dan Williams82aa37c2016-12-06 12:45:24 -08002093 /*
2094 * First record starts at 44 byte offset from the start of the
2095 * payload.
2096 */
2097 if (ars_status->out_length < 44)
2098 return 0;
Vishal Verma0caeef62015-12-24 19:21:43 -07002099 for (i = 0; i < ars_status->num_records; i++) {
Dan Williams82aa37c2016-12-06 12:45:24 -08002100 /* only process full records */
2101 if (ars_status->out_length
2102 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2103 break;
Vishal Verma0caeef62015-12-24 19:21:43 -07002104 rc = nvdimm_bus_add_poison(nvdimm_bus,
2105 ars_status->records[i].err_address,
2106 ars_status->records[i].length);
2107 if (rc)
2108 return rc;
2109 }
Dan Williams82aa37c2016-12-06 12:45:24 -08002110 if (i < ars_status->num_records)
2111 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
Vishal Verma0caeef62015-12-24 19:21:43 -07002112
2113 return 0;
2114}
2115
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002116static void acpi_nfit_remove_resource(void *data)
2117{
2118 struct resource *res = data;
2119
2120 remove_resource(res);
2121}
2122
2123static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2124 struct nd_region_desc *ndr_desc)
2125{
2126 struct resource *res, *nd_res = ndr_desc->res;
2127 int is_pmem, ret;
2128
2129 /* No operation if the region is already registered as PMEM */
2130 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2131 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2132 if (is_pmem == REGION_INTERSECTS)
2133 return 0;
2134
2135 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2136 if (!res)
2137 return -ENOMEM;
2138
2139 res->name = "Persistent Memory";
2140 res->start = nd_res->start;
2141 res->end = nd_res->end;
2142 res->flags = IORESOURCE_MEM;
2143 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2144
2145 ret = insert_resource(&iomem_resource, res);
2146 if (ret)
2147 return ret;
2148
Sajjan, Vikas Cd932dd22016-07-04 10:02:51 +05302149 ret = devm_add_action_or_reset(acpi_desc->dev,
2150 acpi_nfit_remove_resource,
2151 res);
2152 if (ret)
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002153 return ret;
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002154
2155 return 0;
2156}
2157
Dan Williams1f7df6f2015-06-09 20:13:14 -04002158static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
Dan Williams44c462e2016-09-19 16:38:50 -07002159 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
Dan Williams1f7df6f2015-06-09 20:13:14 -04002160 struct acpi_nfit_memory_map *memdev,
Dan Williams1cf03c02016-02-17 13:01:23 -08002161 struct nfit_spa *nfit_spa)
Dan Williams1f7df6f2015-06-09 20:13:14 -04002162{
2163 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2164 memdev->device_handle);
Dan Williams1cf03c02016-02-17 13:01:23 -08002165 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002166 struct nd_blk_region_desc *ndbr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002167 struct nfit_mem *nfit_mem;
2168 int blk_valid = 0;
2169
2170 if (!nvdimm) {
2171 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2172 spa->range_index, memdev->device_handle);
2173 return -ENODEV;
2174 }
2175
Dan Williams44c462e2016-09-19 16:38:50 -07002176 mapping->nvdimm = nvdimm;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002177 switch (nfit_spa_type(spa)) {
2178 case NFIT_SPA_PM:
2179 case NFIT_SPA_VOLATILE:
Dan Williams44c462e2016-09-19 16:38:50 -07002180 mapping->start = memdev->address;
2181 mapping->size = memdev->region_size;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002182 break;
2183 case NFIT_SPA_DCR:
2184 nfit_mem = nvdimm_provider_data(nvdimm);
2185 if (!nfit_mem || !nfit_mem->bdw) {
2186 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2187 spa->range_index, nvdimm_name(nvdimm));
2188 } else {
Dan Williams44c462e2016-09-19 16:38:50 -07002189 mapping->size = nfit_mem->bdw->capacity;
2190 mapping->start = nfit_mem->bdw->start_address;
Vishal Verma5212e112015-06-25 04:20:32 -04002191 ndr_desc->num_lanes = nfit_mem->bdw->windows;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002192 blk_valid = 1;
2193 }
2194
Dan Williams44c462e2016-09-19 16:38:50 -07002195 ndr_desc->mapping = mapping;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002196 ndr_desc->num_mappings = blk_valid;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002197 ndbr_desc = to_blk_region_desc(ndr_desc);
2198 ndbr_desc->enable = acpi_nfit_blk_region_enable;
Dan Williams6bc75612015-06-17 17:23:32 -04002199 ndbr_desc->do_io = acpi_desc->blk_do_io;
Dan Williams1cf03c02016-02-17 13:01:23 -08002200 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2201 ndr_desc);
2202 if (!nfit_spa->nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -04002203 return -ENOMEM;
2204 break;
2205 }
2206
2207 return 0;
2208}
2209
Lee, Chun-Yic2f32ac2016-07-15 12:05:35 +08002210static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2211{
2212 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2213 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2214 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2215 nfit_spa_type(spa) == NFIT_SPA_PCD);
2216}
2217
Dan Williams1f7df6f2015-06-09 20:13:14 -04002218static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2219 struct nfit_spa *nfit_spa)
2220{
Dan Williams44c462e2016-09-19 16:38:50 -07002221 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
Dan Williams1f7df6f2015-06-09 20:13:14 -04002222 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002223 struct nd_blk_region_desc ndbr_desc;
2224 struct nd_region_desc *ndr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002225 struct nfit_memdev *nfit_memdev;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002226 struct nvdimm_bus *nvdimm_bus;
2227 struct resource res;
Dan Williamseaf96152015-05-01 13:11:27 -04002228 int count = 0, rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002229
Dan Williams1cf03c02016-02-17 13:01:23 -08002230 if (nfit_spa->nd_region)
Vishal Verma20985162015-10-27 16:58:27 -06002231 return 0;
2232
Lee, Chun-Yic2f32ac2016-07-15 12:05:35 +08002233 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
Dan Williams1f7df6f2015-06-09 20:13:14 -04002234 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2235 __func__);
2236 return 0;
2237 }
2238
2239 memset(&res, 0, sizeof(res));
Dan Williams44c462e2016-09-19 16:38:50 -07002240 memset(&mappings, 0, sizeof(mappings));
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002241 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
Dan Williams1f7df6f2015-06-09 20:13:14 -04002242 res.start = spa->address;
2243 res.end = res.start + spa->length - 1;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002244 ndr_desc = &ndbr_desc.ndr_desc;
2245 ndr_desc->res = &res;
2246 ndr_desc->provider_data = nfit_spa;
2247 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06002248 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2249 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2250 spa->proximity_domain);
2251 else
2252 ndr_desc->numa_node = NUMA_NO_NODE;
2253
Dan Williams1f7df6f2015-06-09 20:13:14 -04002254 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2255 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
Dan Williams44c462e2016-09-19 16:38:50 -07002256 struct nd_mapping_desc *mapping;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002257
2258 if (memdev->range_index != spa->range_index)
2259 continue;
2260 if (count >= ND_MAX_MAPPINGS) {
2261 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2262 spa->range_index, ND_MAX_MAPPINGS);
2263 return -ENXIO;
2264 }
Dan Williams44c462e2016-09-19 16:38:50 -07002265 mapping = &mappings[count++];
2266 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08002267 memdev, nfit_spa);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002268 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08002269 goto out;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002270 }
2271
Dan Williams44c462e2016-09-19 16:38:50 -07002272 ndr_desc->mapping = mappings;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002273 ndr_desc->num_mappings = count;
2274 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
Dan Williamseaf96152015-05-01 13:11:27 -04002275 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08002276 goto out;
Dan Williamseaf96152015-05-01 13:11:27 -04002277
Dan Williams1f7df6f2015-06-09 20:13:14 -04002278 nvdimm_bus = acpi_desc->nvdimm_bus;
2279 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002280 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
Dan Williams48901162016-03-09 17:15:43 -08002281 if (rc) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002282 dev_warn(acpi_desc->dev,
2283 "failed to insert pmem resource to iomem: %d\n",
2284 rc);
Dan Williams48901162016-03-09 17:15:43 -08002285 goto out;
Vishal Verma0caeef62015-12-24 19:21:43 -07002286 }
Dan Williams48901162016-03-09 17:15:43 -08002287
Dan Williams1cf03c02016-02-17 13:01:23 -08002288 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2289 ndr_desc);
2290 if (!nfit_spa->nd_region)
2291 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002292 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
Dan Williams1cf03c02016-02-17 13:01:23 -08002293 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2294 ndr_desc);
2295 if (!nfit_spa->nd_region)
2296 rc = -ENOMEM;
Lee, Chun-Yic2f32ac2016-07-15 12:05:35 +08002297 } else if (nfit_spa_is_virtual(spa)) {
2298 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2299 ndr_desc);
2300 if (!nfit_spa->nd_region)
2301 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002302 }
Vishal Verma20985162015-10-27 16:58:27 -06002303
Dan Williams1cf03c02016-02-17 13:01:23 -08002304 out:
2305 if (rc)
2306 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2307 nfit_spa->spa->range_index);
2308 return rc;
2309}
2310
2311static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2312 u32 max_ars)
2313{
2314 struct device *dev = acpi_desc->dev;
2315 struct nd_cmd_ars_status *ars_status;
2316
2317 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2318 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2319 return 0;
2320 }
2321
2322 if (acpi_desc->ars_status)
2323 devm_kfree(dev, acpi_desc->ars_status);
2324 acpi_desc->ars_status = NULL;
2325 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2326 if (!ars_status)
2327 return -ENOMEM;
2328 acpi_desc->ars_status = ars_status;
2329 acpi_desc->ars_status_size = max_ars;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002330 return 0;
2331}
2332
Dan Williams1cf03c02016-02-17 13:01:23 -08002333static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2334 struct nfit_spa *nfit_spa)
2335{
2336 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2337 int rc;
2338
2339 if (!nfit_spa->max_ars) {
2340 struct nd_cmd_ars_cap ars_cap;
2341
2342 memset(&ars_cap, 0, sizeof(ars_cap));
2343 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2344 if (rc < 0)
2345 return rc;
2346 nfit_spa->max_ars = ars_cap.max_ars_out;
2347 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2348 /* check that the supported scrub types match the spa type */
2349 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2350 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2351 return -ENOTTY;
2352 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2353 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2354 return -ENOTTY;
2355 }
2356
2357 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2358 return -ENOMEM;
2359
2360 rc = ars_get_status(acpi_desc);
2361 if (rc < 0 && rc != -ENOSPC)
2362 return rc;
2363
Dan Williams82aa37c2016-12-06 12:45:24 -08002364 if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
Dan Williams1cf03c02016-02-17 13:01:23 -08002365 return -ENOMEM;
2366
2367 return 0;
2368}
2369
2370static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2371 struct nfit_spa *nfit_spa)
2372{
2373 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2374 unsigned int overflow_retry = scrub_overflow_abort;
2375 u64 init_ars_start = 0, init_ars_len = 0;
2376 struct device *dev = acpi_desc->dev;
2377 unsigned int tmo = scrub_timeout;
2378 int rc;
2379
Vishal Verma37b137f2016-07-23 21:51:42 -07002380 if (!nfit_spa->ars_required || !nfit_spa->nd_region)
Dan Williams1cf03c02016-02-17 13:01:23 -08002381 return;
2382
2383 rc = ars_start(acpi_desc, nfit_spa);
2384 /*
2385 * If we timed out the initial scan we'll still be busy here,
2386 * and will wait another timeout before giving up permanently.
2387 */
2388 if (rc < 0 && rc != -EBUSY)
2389 return;
2390
2391 do {
2392 u64 ars_start, ars_len;
2393
2394 if (acpi_desc->cancel)
2395 break;
2396 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2397 if (rc == -ENOTTY)
2398 break;
2399 if (rc == -EBUSY && !tmo) {
2400 dev_warn(dev, "range %d ars timeout, aborting\n",
2401 spa->range_index);
2402 break;
2403 }
2404
2405 if (rc == -EBUSY) {
2406 /*
2407 * Note, entries may be appended to the list
2408 * while the lock is dropped, but the workqueue
2409 * being active prevents entries being deleted /
2410 * freed.
2411 */
2412 mutex_unlock(&acpi_desc->init_mutex);
2413 ssleep(1);
2414 tmo--;
2415 mutex_lock(&acpi_desc->init_mutex);
2416 continue;
2417 }
2418
2419 /* we got some results, but there are more pending... */
2420 if (rc == -ENOSPC && overflow_retry--) {
2421 if (!init_ars_len) {
2422 init_ars_len = acpi_desc->ars_status->length;
2423 init_ars_start = acpi_desc->ars_status->address;
2424 }
2425 rc = ars_continue(acpi_desc);
2426 }
2427
2428 if (rc < 0) {
2429 dev_warn(dev, "range %d ars continuation failed\n",
2430 spa->range_index);
2431 break;
2432 }
2433
2434 if (init_ars_len) {
2435 ars_start = init_ars_start;
2436 ars_len = init_ars_len;
2437 } else {
2438 ars_start = acpi_desc->ars_status->address;
2439 ars_len = acpi_desc->ars_status->length;
2440 }
2441 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2442 spa->range_index, ars_start, ars_len);
2443 /* notify the region about new poison entries */
2444 nvdimm_region_notify(nfit_spa->nd_region,
2445 NVDIMM_REVALIDATE_POISON);
2446 break;
2447 } while (1);
2448}
2449
2450static void acpi_nfit_scrub(struct work_struct *work)
2451{
2452 struct device *dev;
2453 u64 init_scrub_length = 0;
2454 struct nfit_spa *nfit_spa;
2455 u64 init_scrub_address = 0;
2456 bool init_ars_done = false;
2457 struct acpi_nfit_desc *acpi_desc;
2458 unsigned int tmo = scrub_timeout;
2459 unsigned int overflow_retry = scrub_overflow_abort;
2460
2461 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2462 dev = acpi_desc->dev;
2463
2464 /*
2465 * We scrub in 2 phases. The first phase waits for any platform
2466 * firmware initiated scrubs to complete and then we go search for the
2467 * affected spa regions to mark them scanned. In the second phase we
2468 * initiate a directed scrub for every range that was not scrubbed in
Vishal Verma37b137f2016-07-23 21:51:42 -07002469 * phase 1. If we're called for a 'rescan', we harmlessly pass through
2470 * the first phase, but really only care about running phase 2, where
2471 * regions can be notified of new poison.
Dan Williams1cf03c02016-02-17 13:01:23 -08002472 */
2473
2474 /* process platform firmware initiated scrubs */
2475 retry:
2476 mutex_lock(&acpi_desc->init_mutex);
2477 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2478 struct nd_cmd_ars_status *ars_status;
2479 struct acpi_nfit_system_address *spa;
2480 u64 ars_start, ars_len;
2481 int rc;
2482
2483 if (acpi_desc->cancel)
2484 break;
2485
2486 if (nfit_spa->nd_region)
2487 continue;
2488
2489 if (init_ars_done) {
2490 /*
2491 * No need to re-query, we're now just
2492 * reconciling all the ranges covered by the
2493 * initial scrub
2494 */
2495 rc = 0;
2496 } else
2497 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2498
2499 if (rc == -ENOTTY) {
2500 /* no ars capability, just register spa and move on */
2501 acpi_nfit_register_region(acpi_desc, nfit_spa);
2502 continue;
2503 }
2504
2505 if (rc == -EBUSY && !tmo) {
2506 /* fallthrough to directed scrub in phase 2 */
2507 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2508 break;
2509 } else if (rc == -EBUSY) {
2510 mutex_unlock(&acpi_desc->init_mutex);
2511 ssleep(1);
2512 tmo--;
2513 goto retry;
2514 }
2515
2516 /* we got some results, but there are more pending... */
2517 if (rc == -ENOSPC && overflow_retry--) {
2518 ars_status = acpi_desc->ars_status;
2519 /*
2520 * Record the original scrub range, so that we
2521 * can recall all the ranges impacted by the
2522 * initial scrub.
2523 */
2524 if (!init_scrub_length) {
2525 init_scrub_length = ars_status->length;
2526 init_scrub_address = ars_status->address;
2527 }
2528 rc = ars_continue(acpi_desc);
2529 if (rc == 0) {
2530 mutex_unlock(&acpi_desc->init_mutex);
2531 goto retry;
2532 }
2533 }
2534
2535 if (rc < 0) {
2536 /*
2537 * Initial scrub failed, we'll give it one more
2538 * try below...
2539 */
2540 break;
2541 }
2542
2543 /* We got some final results, record completed ranges */
2544 ars_status = acpi_desc->ars_status;
2545 if (init_scrub_length) {
2546 ars_start = init_scrub_address;
2547 ars_len = ars_start + init_scrub_length;
2548 } else {
2549 ars_start = ars_status->address;
2550 ars_len = ars_status->length;
2551 }
2552 spa = nfit_spa->spa;
2553
2554 if (!init_ars_done) {
2555 init_ars_done = true;
2556 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2557 ars_start, ars_len);
2558 }
2559 if (ars_start <= spa->address && ars_start + ars_len
2560 >= spa->address + spa->length)
2561 acpi_nfit_register_region(acpi_desc, nfit_spa);
2562 }
2563
2564 /*
2565 * For all the ranges not covered by an initial scrub we still
2566 * want to see if there are errors, but it's ok to discover them
2567 * asynchronously.
2568 */
2569 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2570 /*
2571 * Flag all the ranges that still need scrubbing, but
2572 * register them now to make data available.
2573 */
Vishal Verma37b137f2016-07-23 21:51:42 -07002574 if (!nfit_spa->nd_region) {
2575 nfit_spa->ars_required = 1;
Dan Williams1cf03c02016-02-17 13:01:23 -08002576 acpi_nfit_register_region(acpi_desc, nfit_spa);
Vishal Verma37b137f2016-07-23 21:51:42 -07002577 }
Dan Williams1cf03c02016-02-17 13:01:23 -08002578 }
Dan Williams9ccaed42017-04-13 22:48:46 -07002579 acpi_desc->init_complete = 1;
Dan Williams1cf03c02016-02-17 13:01:23 -08002580
2581 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2582 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
Vishal Verma37b137f2016-07-23 21:51:42 -07002583 acpi_desc->scrub_count++;
2584 if (acpi_desc->scrub_count_state)
2585 sysfs_notify_dirent(acpi_desc->scrub_count_state);
Dan Williams1cf03c02016-02-17 13:01:23 -08002586 mutex_unlock(&acpi_desc->init_mutex);
2587}
2588
Dan Williams1f7df6f2015-06-09 20:13:14 -04002589static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2590{
2591 struct nfit_spa *nfit_spa;
Dan Williams1cf03c02016-02-17 13:01:23 -08002592 int rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002593
Dan Williams1cf03c02016-02-17 13:01:23 -08002594 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2595 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2596 /* BLK regions don't need to wait for ars results */
2597 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2598 if (rc)
2599 return rc;
2600 }
Dan Williams1f7df6f2015-06-09 20:13:14 -04002601
Dan Williamsfbabd822017-04-18 09:56:31 -07002602 if (!acpi_desc->cancel)
2603 queue_work(nfit_wq, &acpi_desc->work);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002604 return 0;
2605}
2606
Vishal Verma20985162015-10-27 16:58:27 -06002607static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2608 struct nfit_table_prev *prev)
2609{
2610 struct device *dev = acpi_desc->dev;
2611
2612 if (!list_empty(&prev->spas) ||
2613 !list_empty(&prev->memdevs) ||
2614 !list_empty(&prev->dcrs) ||
2615 !list_empty(&prev->bdws) ||
2616 !list_empty(&prev->idts) ||
2617 !list_empty(&prev->flushes)) {
2618 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2619 return -ENXIO;
2620 }
2621 return 0;
2622}
2623
Vishal Verma37b137f2016-07-23 21:51:42 -07002624static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
2625{
2626 struct device *dev = acpi_desc->dev;
2627 struct kernfs_node *nfit;
2628 struct device *bus_dev;
2629
2630 if (!ars_supported(acpi_desc->nvdimm_bus))
2631 return 0;
2632
2633 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2634 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
2635 if (!nfit) {
2636 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
2637 return -ENODEV;
2638 }
2639 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
2640 sysfs_put(nfit);
2641 if (!acpi_desc->scrub_count_state) {
2642 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
2643 return -ENODEV;
2644 }
2645
2646 return 0;
2647}
2648
Dan Williamsfbabd822017-04-18 09:56:31 -07002649static void acpi_nfit_unregister(void *data)
Dan Williams58cd71b2016-07-21 18:05:36 -07002650{
2651 struct acpi_nfit_desc *acpi_desc = data;
2652
Dan Williams58cd71b2016-07-21 18:05:36 -07002653 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
Dan Williams58cd71b2016-07-21 18:05:36 -07002654}
2655
Dan Williamse7a11b42016-07-14 16:19:55 -07002656int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
Dan Williamsb94d5232015-05-19 22:54:31 -04002657{
2658 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -06002659 struct nfit_table_prev prev;
Dan Williamsb94d5232015-05-19 22:54:31 -04002660 const void *end;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002661 int rc;
Dan Williamsb94d5232015-05-19 22:54:31 -04002662
Dan Williams58cd71b2016-07-21 18:05:36 -07002663 if (!acpi_desc->nvdimm_bus) {
Vishal Verma37b137f2016-07-23 21:51:42 -07002664 acpi_nfit_init_dsms(acpi_desc);
2665
Dan Williams58cd71b2016-07-21 18:05:36 -07002666 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
2667 &acpi_desc->nd_desc);
2668 if (!acpi_desc->nvdimm_bus)
2669 return -ENOMEM;
Vishal Verma37b137f2016-07-23 21:51:42 -07002670
Dan Williamsfbabd822017-04-18 09:56:31 -07002671 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
Dan Williams58cd71b2016-07-21 18:05:36 -07002672 acpi_desc);
2673 if (rc)
2674 return rc;
Vishal Verma37b137f2016-07-23 21:51:42 -07002675
2676 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
2677 if (rc)
2678 return rc;
Vishal Verma6839a6d2016-07-23 21:51:21 -07002679
2680 /* register this acpi_desc for mce notifications */
2681 mutex_lock(&acpi_desc_lock);
2682 list_add_tail(&acpi_desc->list, &acpi_descs);
2683 mutex_unlock(&acpi_desc_lock);
Dan Williams58cd71b2016-07-21 18:05:36 -07002684 }
2685
Vishal Verma20985162015-10-27 16:58:27 -06002686 mutex_lock(&acpi_desc->init_mutex);
2687
2688 INIT_LIST_HEAD(&prev.spas);
2689 INIT_LIST_HEAD(&prev.memdevs);
2690 INIT_LIST_HEAD(&prev.dcrs);
2691 INIT_LIST_HEAD(&prev.bdws);
2692 INIT_LIST_HEAD(&prev.idts);
2693 INIT_LIST_HEAD(&prev.flushes);
2694
2695 list_cut_position(&prev.spas, &acpi_desc->spas,
2696 acpi_desc->spas.prev);
2697 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2698 acpi_desc->memdevs.prev);
2699 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2700 acpi_desc->dcrs.prev);
2701 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2702 acpi_desc->bdws.prev);
2703 list_cut_position(&prev.idts, &acpi_desc->idts,
2704 acpi_desc->idts.prev);
2705 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2706 acpi_desc->flushes.prev);
2707
Vishal Verma20985162015-10-27 16:58:27 -06002708 end = data + sz;
Vishal Verma20985162015-10-27 16:58:27 -06002709 while (!IS_ERR_OR_NULL(data))
2710 data = add_table(acpi_desc, &prev, data, end);
2711
2712 if (IS_ERR(data)) {
2713 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2714 PTR_ERR(data));
2715 rc = PTR_ERR(data);
2716 goto out_unlock;
2717 }
2718
2719 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2720 if (rc)
2721 goto out_unlock;
2722
Dan Williams81ed4e32016-06-10 18:20:53 -07002723 rc = nfit_mem_init(acpi_desc);
2724 if (rc)
Vishal Verma20985162015-10-27 16:58:27 -06002725 goto out_unlock;
Vishal Verma20985162015-10-27 16:58:27 -06002726
2727 rc = acpi_nfit_register_dimms(acpi_desc);
2728 if (rc)
2729 goto out_unlock;
2730
2731 rc = acpi_nfit_register_regions(acpi_desc);
2732
2733 out_unlock:
2734 mutex_unlock(&acpi_desc->init_mutex);
2735 return rc;
2736}
2737EXPORT_SYMBOL_GPL(acpi_nfit_init);
2738
Dan Williams7ae0fa432016-02-19 12:16:34 -08002739struct acpi_nfit_flush_work {
2740 struct work_struct work;
2741 struct completion cmp;
2742};
2743
2744static void flush_probe(struct work_struct *work)
2745{
2746 struct acpi_nfit_flush_work *flush;
2747
2748 flush = container_of(work, typeof(*flush), work);
2749 complete(&flush->cmp);
2750}
2751
2752static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2753{
2754 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2755 struct device *dev = acpi_desc->dev;
2756 struct acpi_nfit_flush_work flush;
Dan Williamse4714862017-02-02 10:31:00 -08002757 int rc;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002758
2759 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2760 device_lock(dev);
2761 device_unlock(dev);
2762
Dan Williams9ccaed42017-04-13 22:48:46 -07002763 /* bounce the init_mutex to make init_complete valid */
2764 mutex_lock(&acpi_desc->init_mutex);
Dan Williamsfbabd822017-04-18 09:56:31 -07002765 if (acpi_desc->cancel || acpi_desc->init_complete) {
2766 mutex_unlock(&acpi_desc->init_mutex);
Dan Williams9ccaed42017-04-13 22:48:46 -07002767 return 0;
Dan Williamsfbabd822017-04-18 09:56:31 -07002768 }
Dan Williams9ccaed42017-04-13 22:48:46 -07002769
Dan Williams7ae0fa432016-02-19 12:16:34 -08002770 /*
2771 * Scrub work could take 10s of seconds, userspace may give up so we
2772 * need to be interruptible while waiting.
2773 */
2774 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2775 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2776 queue_work(nfit_wq, &flush.work);
Dan Williamsfbabd822017-04-18 09:56:31 -07002777 mutex_unlock(&acpi_desc->init_mutex);
Dan Williamse4714862017-02-02 10:31:00 -08002778
2779 rc = wait_for_completion_interruptible(&flush.cmp);
2780 cancel_work_sync(&flush.work);
2781 return rc;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002782}
2783
Dan Williams87bf5722016-02-22 21:50:31 -08002784static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2785 struct nvdimm *nvdimm, unsigned int cmd)
2786{
2787 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2788
2789 if (nvdimm)
2790 return 0;
2791 if (cmd != ND_CMD_ARS_START)
2792 return 0;
2793
2794 /*
2795 * The kernel and userspace may race to initiate a scrub, but
2796 * the scrub thread is prepared to lose that initial race. It
2797 * just needs guarantees that any ars it initiates are not
2798 * interrupted by any intervening start reqeusts from userspace.
2799 */
2800 if (work_busy(&acpi_desc->work))
2801 return -EBUSY;
2802
2803 return 0;
2804}
2805
Vishal Verma6839a6d2016-07-23 21:51:21 -07002806int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
Vishal Verma37b137f2016-07-23 21:51:42 -07002807{
2808 struct device *dev = acpi_desc->dev;
2809 struct nfit_spa *nfit_spa;
2810
2811 if (work_busy(&acpi_desc->work))
2812 return -EBUSY;
2813
Vishal Verma37b137f2016-07-23 21:51:42 -07002814 mutex_lock(&acpi_desc->init_mutex);
Dan Williamsfbabd822017-04-18 09:56:31 -07002815 if (acpi_desc->cancel) {
2816 mutex_unlock(&acpi_desc->init_mutex);
2817 return 0;
2818 }
2819
Vishal Verma37b137f2016-07-23 21:51:42 -07002820 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2821 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2822
2823 if (nfit_spa_type(spa) != NFIT_SPA_PM)
2824 continue;
2825
2826 nfit_spa->ars_required = 1;
2827 }
2828 queue_work(nfit_wq, &acpi_desc->work);
2829 dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
2830 mutex_unlock(&acpi_desc->init_mutex);
2831
2832 return 0;
2833}
2834
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002835void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
Vishal Verma20985162015-10-27 16:58:27 -06002836{
2837 struct nvdimm_bus_descriptor *nd_desc;
Vishal Verma20985162015-10-27 16:58:27 -06002838
2839 dev_set_drvdata(dev, acpi_desc);
2840 acpi_desc->dev = dev;
2841 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2842 nd_desc = &acpi_desc->nd_desc;
2843 nd_desc->provider_name = "ACPI.NFIT";
Dan Williamsbc9775d2016-07-21 20:03:19 -07002844 nd_desc->module = THIS_MODULE;
Vishal Verma20985162015-10-27 16:58:27 -06002845 nd_desc->ndctl = acpi_nfit_ctl;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002846 nd_desc->flush_probe = acpi_nfit_flush_probe;
Dan Williams87bf5722016-02-22 21:50:31 -08002847 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
Vishal Verma20985162015-10-27 16:58:27 -06002848 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2849
Dan Williamsb94d5232015-05-19 22:54:31 -04002850 INIT_LIST_HEAD(&acpi_desc->spas);
2851 INIT_LIST_HEAD(&acpi_desc->dcrs);
2852 INIT_LIST_HEAD(&acpi_desc->bdws);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002853 INIT_LIST_HEAD(&acpi_desc->idts);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06002854 INIT_LIST_HEAD(&acpi_desc->flushes);
Dan Williamsb94d5232015-05-19 22:54:31 -04002855 INIT_LIST_HEAD(&acpi_desc->memdevs);
2856 INIT_LIST_HEAD(&acpi_desc->dimms);
Vishal Verma6839a6d2016-07-23 21:51:21 -07002857 INIT_LIST_HEAD(&acpi_desc->list);
Vishal Verma20985162015-10-27 16:58:27 -06002858 mutex_init(&acpi_desc->init_mutex);
Dan Williams1cf03c02016-02-17 13:01:23 -08002859 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
Dan Williamsb94d5232015-05-19 22:54:31 -04002860}
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002861EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
Dan Williamsb94d5232015-05-19 22:54:31 -04002862
Dan Williams3c87f372017-04-03 13:52:14 -07002863static void acpi_nfit_put_table(void *table)
2864{
2865 acpi_put_table(table);
2866}
2867
Dan Williamsfbabd822017-04-18 09:56:31 -07002868void acpi_nfit_shutdown(void *data)
2869{
2870 struct acpi_nfit_desc *acpi_desc = data;
2871 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
2872
2873 /*
2874 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
2875 * race teardown
2876 */
2877 mutex_lock(&acpi_desc_lock);
2878 list_del(&acpi_desc->list);
2879 mutex_unlock(&acpi_desc_lock);
2880
2881 mutex_lock(&acpi_desc->init_mutex);
2882 acpi_desc->cancel = 1;
2883 mutex_unlock(&acpi_desc->init_mutex);
2884
2885 /*
2886 * Bounce the nvdimm bus lock to make sure any in-flight
2887 * acpi_nfit_ars_rescan() submissions have had a chance to
2888 * either submit or see ->cancel set.
2889 */
2890 device_lock(bus_dev);
2891 device_unlock(bus_dev);
2892
2893 flush_workqueue(nfit_wq);
2894}
2895EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
2896
Dan Williamsb94d5232015-05-19 22:54:31 -04002897static int acpi_nfit_add(struct acpi_device *adev)
2898{
Vishal Verma20985162015-10-27 16:58:27 -06002899 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Dan Williamsb94d5232015-05-19 22:54:31 -04002900 struct acpi_nfit_desc *acpi_desc;
2901 struct device *dev = &adev->dev;
2902 struct acpi_table_header *tbl;
2903 acpi_status status = AE_OK;
2904 acpi_size sz;
Dan Williams31932042016-07-14 17:22:48 -07002905 int rc = 0;
Dan Williamsb94d5232015-05-19 22:54:31 -04002906
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002907 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
Dan Williamsb94d5232015-05-19 22:54:31 -04002908 if (ACPI_FAILURE(status)) {
Vishal Verma20985162015-10-27 16:58:27 -06002909 /* This is ok, we could have an nvdimm hotplugged later */
2910 dev_dbg(dev, "failed to find NFIT at startup\n");
2911 return 0;
Dan Williamsb94d5232015-05-19 22:54:31 -04002912 }
Dan Williams3c87f372017-04-03 13:52:14 -07002913
2914 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
2915 if (rc)
2916 return rc;
Lv Zheng6b11d1d2016-12-14 15:04:39 +08002917 sz = tbl->length;
Dan Williamsb94d5232015-05-19 22:54:31 -04002918
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002919 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2920 if (!acpi_desc)
2921 return -ENOMEM;
2922 acpi_nfit_desc_init(acpi_desc, &adev->dev);
Dan Williamsb94d5232015-05-19 22:54:31 -04002923
Dan Williamse7a11b42016-07-14 16:19:55 -07002924 /* Save the acpi header for exporting the revision via sysfs */
Linda Knippers6b577c92015-11-20 19:05:49 -05002925 acpi_desc->acpi_header = *tbl;
Dan Williamsb94d5232015-05-19 22:54:31 -04002926
Vishal Verma20985162015-10-27 16:58:27 -06002927 /* Evaluate _FIT and override with that if present */
2928 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2929 if (ACPI_SUCCESS(status) && buf.length > 0) {
Dan Williamse7a11b42016-07-14 16:19:55 -07002930 union acpi_object *obj = buf.pointer;
2931
2932 if (obj->type == ACPI_TYPE_BUFFER)
2933 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2934 obj->buffer.length);
2935 else
Linda Knippers6b577c92015-11-20 19:05:49 -05002936 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2937 __func__, (int) obj->type);
Dan Williams31932042016-07-14 17:22:48 -07002938 kfree(buf.pointer);
2939 } else
Dan Williamse7a11b42016-07-14 16:19:55 -07002940 /* skip over the lead-in header table */
2941 rc = acpi_nfit_init(acpi_desc, (void *) tbl
2942 + sizeof(struct acpi_table_nfit),
2943 sz - sizeof(struct acpi_table_nfit));
Dan Williamsfbabd822017-04-18 09:56:31 -07002944
2945 if (rc)
2946 return rc;
2947 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
Dan Williamsb94d5232015-05-19 22:54:31 -04002948}
2949
2950static int acpi_nfit_remove(struct acpi_device *adev)
2951{
Dan Williamsfbabd822017-04-18 09:56:31 -07002952 /* see acpi_nfit_unregister */
Dan Williamsb94d5232015-05-19 22:54:31 -04002953 return 0;
2954}
2955
Dan Williamsc14a8682016-08-18 22:15:04 -07002956void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
Vishal Verma20985162015-10-27 16:58:27 -06002957{
Dan Williamsc14a8682016-08-18 22:15:04 -07002958 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
Vishal Verma20985162015-10-27 16:58:27 -06002959 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Dan Williamse7a11b42016-07-14 16:19:55 -07002960 union acpi_object *obj;
Vishal Verma20985162015-10-27 16:58:27 -06002961 acpi_status status;
2962 int ret;
2963
2964 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2965
Vishal Vermac09f1212016-08-19 14:40:58 -06002966 if (event != NFIT_NOTIFY_UPDATE)
2967 return;
2968
Vishal Verma20985162015-10-27 16:58:27 -06002969 if (!dev->driver) {
2970 /* dev->driver may be null if we're being removed */
2971 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
Dan Williamsc14a8682016-08-18 22:15:04 -07002972 return;
Vishal Verma20985162015-10-27 16:58:27 -06002973 }
2974
2975 if (!acpi_desc) {
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002976 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2977 if (!acpi_desc)
Dan Williamsc14a8682016-08-18 22:15:04 -07002978 return;
2979 acpi_nfit_desc_init(acpi_desc, dev);
Dan Williams7ae0fa432016-02-19 12:16:34 -08002980 } else {
2981 /*
2982 * Finish previous registration before considering new
2983 * regions.
2984 */
2985 flush_workqueue(nfit_wq);
Vishal Verma20985162015-10-27 16:58:27 -06002986 }
2987
2988 /* Evaluate _FIT */
Dan Williamsc14a8682016-08-18 22:15:04 -07002989 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
Vishal Verma20985162015-10-27 16:58:27 -06002990 if (ACPI_FAILURE(status)) {
2991 dev_err(dev, "failed to evaluate _FIT\n");
Dan Williamsc14a8682016-08-18 22:15:04 -07002992 return;
Vishal Verma20985162015-10-27 16:58:27 -06002993 }
2994
Linda Knippers6b577c92015-11-20 19:05:49 -05002995 obj = buf.pointer;
2996 if (obj->type == ACPI_TYPE_BUFFER) {
Dan Williamse7a11b42016-07-14 16:19:55 -07002997 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
2998 obj->buffer.length);
Dan Williams31932042016-07-14 17:22:48 -07002999 if (ret)
Linda Knippers6b577c92015-11-20 19:05:49 -05003000 dev_err(dev, "failed to merge updated NFIT\n");
Dan Williams31932042016-07-14 17:22:48 -07003001 } else
Linda Knippers6b577c92015-11-20 19:05:49 -05003002 dev_err(dev, "Invalid _FIT\n");
Vishal Verma20985162015-10-27 16:58:27 -06003003 kfree(buf.pointer);
Dan Williamsc14a8682016-08-18 22:15:04 -07003004}
3005EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
Vishal Verma20985162015-10-27 16:58:27 -06003006
Dan Williamsc14a8682016-08-18 22:15:04 -07003007static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3008{
3009 device_lock(&adev->dev);
3010 __acpi_nfit_notify(&adev->dev, adev->handle, event);
3011 device_unlock(&adev->dev);
Vishal Verma20985162015-10-27 16:58:27 -06003012}
3013
Dan Williamsb94d5232015-05-19 22:54:31 -04003014static const struct acpi_device_id acpi_nfit_ids[] = {
3015 { "ACPI0012", 0 },
3016 { "", 0 },
3017};
3018MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3019
3020static struct acpi_driver acpi_nfit_driver = {
3021 .name = KBUILD_MODNAME,
3022 .ids = acpi_nfit_ids,
3023 .ops = {
3024 .add = acpi_nfit_add,
3025 .remove = acpi_nfit_remove,
Vishal Verma20985162015-10-27 16:58:27 -06003026 .notify = acpi_nfit_notify,
Dan Williamsb94d5232015-05-19 22:54:31 -04003027 },
3028};
3029
3030static __init int nfit_init(void)
3031{
3032 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3033 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3034 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3035 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3036 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3037 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3038 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3039
3040 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
3041 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
3042 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
3043 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
3044 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
3045 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
3046 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
3047 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
3048 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
3049 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
Dan Williams31eca762016-04-28 16:23:43 -07003050 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3051 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
stuart hayese02fb722016-05-26 11:38:41 -05003052 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
Dan Williamsb94d5232015-05-19 22:54:31 -04003053
Dan Williams7ae0fa432016-02-19 12:16:34 -08003054 nfit_wq = create_singlethread_workqueue("nfit");
3055 if (!nfit_wq)
3056 return -ENOMEM;
3057
Vishal Verma6839a6d2016-07-23 21:51:21 -07003058 nfit_mce_register();
3059
Dan Williamsb94d5232015-05-19 22:54:31 -04003060 return acpi_bus_register_driver(&acpi_nfit_driver);
3061}
3062
3063static __exit void nfit_exit(void)
3064{
Vishal Verma6839a6d2016-07-23 21:51:21 -07003065 nfit_mce_unregister();
Dan Williamsb94d5232015-05-19 22:54:31 -04003066 acpi_bus_unregister_driver(&acpi_nfit_driver);
Dan Williams7ae0fa432016-02-19 12:16:34 -08003067 destroy_workqueue(nfit_wq);
Vishal Verma6839a6d2016-07-23 21:51:21 -07003068 WARN_ON(!list_empty(&acpi_descs));
Dan Williamsb94d5232015-05-19 22:54:31 -04003069}
3070
3071module_init(nfit_init);
3072module_exit(nfit_exit);
3073MODULE_LICENSE("GPL v2");
3074MODULE_AUTHOR("Intel Corporation");