blob: da14c89f46679b441ec3a18e6e014074a7e21825 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040016#include <linux/mutex.h>
Dan Williams62232e452015-06-08 14:27:06 -040017#include <linux/ndctl.h>
Vishal Verma0caeef62015-12-24 19:21:43 -070018#include <linux/delay.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040019#include <linux/list.h>
20#include <linux/acpi.h>
Dan Williamseaf96152015-05-01 13:11:27 -040021#include <linux/sort.h>
Ross Zwislerc2ad2952015-07-10 11:06:13 -060022#include <linux/pmem.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040023#include <linux/io.h>
Dan Williams1cf03c02016-02-17 13:01:23 -080024#include <linux/nd.h>
Dan Williams96601ad2015-08-24 18:29:38 -040025#include <asm/cacheflush.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040026#include "nfit.h"
27
Ross Zwisler047fc8a2015-06-25 04:21:02 -040028/*
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30 * irrelevant.
31 */
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020032#include <linux/io-64-nonatomic-hi-lo.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040033
Dan Williams4d88a972015-05-31 14:41:48 -040034static bool force_enable_dimms;
35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
37
Dan Williams1cf03c02016-02-17 13:01:23 -080038static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42/* after three payloads of overflow, it's dead jim */
43static unsigned int scrub_overflow_abort = 3;
44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
47
Dan Williams87554092016-04-28 18:01:20 -070048static bool disable_vendor_specific;
49module_param(disable_vendor_specific, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vendor_specific,
51 "Limit commands to the publicly specified set\n");
52
Dan Williams7ae0fa432016-02-19 12:16:34 -080053static struct workqueue_struct *nfit_wq;
54
Vishal Verma20985162015-10-27 16:58:27 -060055struct nfit_table_prev {
56 struct list_head spas;
57 struct list_head memdevs;
58 struct list_head dcrs;
59 struct list_head bdws;
60 struct list_head idts;
61 struct list_head flushes;
62};
63
Dan Williamsb94d5232015-05-19 22:54:31 -040064static u8 nfit_uuid[NFIT_UUID_MAX][16];
65
Dan Williams6bc75612015-06-17 17:23:32 -040066const u8 *to_nfit_uuid(enum nfit_uuids id)
Dan Williamsb94d5232015-05-19 22:54:31 -040067{
68 return nfit_uuid[id];
69}
Dan Williams6bc75612015-06-17 17:23:32 -040070EXPORT_SYMBOL(to_nfit_uuid);
Dan Williamsb94d5232015-05-19 22:54:31 -040071
Dan Williams62232e452015-06-08 14:27:06 -040072static struct acpi_nfit_desc *to_acpi_nfit_desc(
73 struct nvdimm_bus_descriptor *nd_desc)
74{
75 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
76}
77
78static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
79{
80 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
81
82 /*
83 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
84 * acpi_device.
85 */
86 if (!nd_desc->provider_name
87 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
88 return NULL;
89
90 return to_acpi_device(acpi_desc->dev);
91}
92
Dan Williamsaef25332016-02-12 17:01:11 -080093static int xlat_status(void *buf, unsigned int cmd)
94{
Dan Williamsd4f32362016-03-03 16:08:54 -080095 struct nd_cmd_clear_error *clear_err;
Dan Williamsaef25332016-02-12 17:01:11 -080096 struct nd_cmd_ars_status *ars_status;
97 struct nd_cmd_ars_start *ars_start;
98 struct nd_cmd_ars_cap *ars_cap;
99 u16 flags;
100
101 switch (cmd) {
102 case ND_CMD_ARS_CAP:
103 ars_cap = buf;
104 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
105 return -ENOTTY;
106
107 /* Command failed */
108 if (ars_cap->status & 0xffff)
109 return -EIO;
110
111 /* No supported scan types for this range */
112 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
113 if ((ars_cap->status >> 16 & flags) == 0)
114 return -ENOTTY;
115 break;
116 case ND_CMD_ARS_START:
117 ars_start = buf;
118 /* ARS is in progress */
119 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
120 return -EBUSY;
121
122 /* Command failed */
123 if (ars_start->status & 0xffff)
124 return -EIO;
125 break;
126 case ND_CMD_ARS_STATUS:
127 ars_status = buf;
128 /* Command failed */
129 if (ars_status->status & 0xffff)
130 return -EIO;
131 /* Check extended status (Upper two bytes) */
132 if (ars_status->status == NFIT_ARS_STATUS_DONE)
133 return 0;
134
135 /* ARS is in progress */
136 if (ars_status->status == NFIT_ARS_STATUS_BUSY)
137 return -EBUSY;
138
139 /* No ARS performed for the current boot */
140 if (ars_status->status == NFIT_ARS_STATUS_NONE)
141 return -EAGAIN;
142
143 /*
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
147 */
148 if (ars_status->status == NFIT_ARS_STATUS_INTR) {
149 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
150 return -ENOSPC;
151 return 0;
152 }
153
154 /* Unknown status */
155 if (ars_status->status >> 16)
156 return -EIO;
157 break;
Dan Williamsd4f32362016-03-03 16:08:54 -0800158 case ND_CMD_CLEAR_ERROR:
159 clear_err = buf;
160 if (clear_err->status & 0xffff)
161 return -EIO;
162 if (!clear_err->cleared)
163 return -EIO;
164 if (clear_err->length > clear_err->cleared)
165 return clear_err->cleared;
166 break;
Dan Williamsaef25332016-02-12 17:01:11 -0800167 default:
168 break;
169 }
170
171 return 0;
172}
173
Dan Williamsb94d5232015-05-19 22:54:31 -0400174static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
175 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
Dan Williamsaef25332016-02-12 17:01:11 -0800176 unsigned int buf_len, int *cmd_rc)
Dan Williamsb94d5232015-05-19 22:54:31 -0400177{
Dan Williams62232e452015-06-08 14:27:06 -0400178 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
Dan Williams62232e452015-06-08 14:27:06 -0400179 union acpi_object in_obj, in_buf, *out_obj;
Dan Williams31eca762016-04-28 16:23:43 -0700180 const struct nd_cmd_desc *desc = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400181 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -0700182 struct nd_cmd_pkg *call_pkg = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400183 const char *cmd_name, *dimm_name;
Dan Williams31eca762016-04-28 16:23:43 -0700184 unsigned long cmd_mask, dsm_mask;
Dan Williams62232e452015-06-08 14:27:06 -0400185 acpi_handle handle;
Dan Williams31eca762016-04-28 16:23:43 -0700186 unsigned int func;
Dan Williams62232e452015-06-08 14:27:06 -0400187 const u8 *uuid;
188 u32 offset;
189 int rc, i;
190
Dan Williams31eca762016-04-28 16:23:43 -0700191 func = cmd;
192 if (cmd == ND_CMD_CALL) {
193 call_pkg = buf;
194 func = call_pkg->nd_command;
195 }
196
Dan Williams62232e452015-06-08 14:27:06 -0400197 if (nvdimm) {
198 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
199 struct acpi_device *adev = nfit_mem->adev;
200
201 if (!adev)
202 return -ENOTTY;
Dan Williams31eca762016-04-28 16:23:43 -0700203 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
204 return -ENOTTY;
205
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400206 dimm_name = nvdimm_name(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400207 cmd_name = nvdimm_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700208 cmd_mask = nvdimm_cmd_mask(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400209 dsm_mask = nfit_mem->dsm_mask;
210 desc = nd_cmd_dimm_desc(cmd);
Dan Williams31eca762016-04-28 16:23:43 -0700211 uuid = to_nfit_uuid(nfit_mem->family);
Dan Williams62232e452015-06-08 14:27:06 -0400212 handle = adev->handle;
213 } else {
214 struct acpi_device *adev = to_acpi_dev(acpi_desc);
215
216 cmd_name = nvdimm_bus_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700217 cmd_mask = nd_desc->cmd_mask;
Dan Williams31eca762016-04-28 16:23:43 -0700218 dsm_mask = cmd_mask;
Dan Williams62232e452015-06-08 14:27:06 -0400219 desc = nd_cmd_bus_desc(cmd);
220 uuid = to_nfit_uuid(NFIT_DEV_BUS);
221 handle = adev->handle;
222 dimm_name = "bus";
223 }
224
225 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
226 return -ENOTTY;
227
Dan Williams31eca762016-04-28 16:23:43 -0700228 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
Dan Williams62232e452015-06-08 14:27:06 -0400229 return -ENOTTY;
230
231 in_obj.type = ACPI_TYPE_PACKAGE;
232 in_obj.package.count = 1;
233 in_obj.package.elements = &in_buf;
234 in_buf.type = ACPI_TYPE_BUFFER;
235 in_buf.buffer.pointer = buf;
236 in_buf.buffer.length = 0;
237
238 /* libnvdimm has already validated the input envelope */
239 for (i = 0; i < desc->in_num; i++)
240 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
241 i, buf);
242
Dan Williams31eca762016-04-28 16:23:43 -0700243 if (call_pkg) {
244 /* skip over package wrapper */
245 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
246 in_buf.buffer.length = call_pkg->nd_size_in;
Dan Williams62232e452015-06-08 14:27:06 -0400247 }
248
Dan Williams31eca762016-04-28 16:23:43 -0700249 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
250 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
251 __func__, dimm_name, cmd, func,
252 in_buf.buffer.length);
253 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
254 in_buf.buffer.pointer,
255 min_t(u32, 256, in_buf.buffer.length), true);
256 }
257
258 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
Dan Williams62232e452015-06-08 14:27:06 -0400259 if (!out_obj) {
260 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
261 cmd_name);
262 return -EINVAL;
263 }
264
Dan Williams31eca762016-04-28 16:23:43 -0700265 if (call_pkg) {
266 call_pkg->nd_fw_size = out_obj->buffer.length;
267 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
268 out_obj->buffer.pointer,
269 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
270
271 ACPI_FREE(out_obj);
272 /*
273 * Need to support FW function w/o known size in advance.
274 * Caller can determine required size based upon nd_fw_size.
275 * If we return an error (like elsewhere) then caller wouldn't
276 * be able to rely upon data returned to make calculation.
277 */
278 return 0;
279 }
280
Dan Williams62232e452015-06-08 14:27:06 -0400281 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
282 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
283 __func__, dimm_name, cmd_name, out_obj->type);
284 rc = -EINVAL;
285 goto out;
286 }
287
288 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
289 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
290 dimm_name, cmd_name, out_obj->buffer.length);
291 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
292 4, out_obj->buffer.pointer, min_t(u32, 128,
293 out_obj->buffer.length), true);
294 }
295
296 for (i = 0, offset = 0; i < desc->out_num; i++) {
297 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
298 (u32 *) out_obj->buffer.pointer);
299
300 if (offset + out_size > out_obj->buffer.length) {
301 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
302 __func__, dimm_name, cmd_name, i);
303 break;
304 }
305
306 if (in_buf.buffer.length + offset + out_size > buf_len) {
307 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
308 __func__, dimm_name, cmd_name, i);
309 rc = -ENXIO;
310 goto out;
311 }
312 memcpy(buf + in_buf.buffer.length + offset,
313 out_obj->buffer.pointer + offset, out_size);
314 offset += out_size;
315 }
316 if (offset + in_buf.buffer.length < buf_len) {
317 if (i >= 1) {
318 /*
319 * status valid, return the number of bytes left
320 * unfilled in the output buffer
321 */
322 rc = buf_len - offset - in_buf.buffer.length;
Dan Williamsaef25332016-02-12 17:01:11 -0800323 if (cmd_rc)
324 *cmd_rc = xlat_status(buf, cmd);
Dan Williams62232e452015-06-08 14:27:06 -0400325 } else {
326 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
327 __func__, dimm_name, cmd_name, buf_len,
328 offset);
329 rc = -ENXIO;
330 }
Dan Williams2eea6582016-05-02 09:11:53 -0700331 } else {
Dan Williams62232e452015-06-08 14:27:06 -0400332 rc = 0;
Dan Williams2eea6582016-05-02 09:11:53 -0700333 if (cmd_rc)
334 *cmd_rc = xlat_status(buf, cmd);
335 }
Dan Williams62232e452015-06-08 14:27:06 -0400336
337 out:
338 ACPI_FREE(out_obj);
339
340 return rc;
Dan Williamsb94d5232015-05-19 22:54:31 -0400341}
342
343static const char *spa_type_name(u16 type)
344{
345 static const char *to_name[] = {
346 [NFIT_SPA_VOLATILE] = "volatile",
347 [NFIT_SPA_PM] = "pmem",
348 [NFIT_SPA_DCR] = "dimm-control-region",
349 [NFIT_SPA_BDW] = "block-data-window",
350 [NFIT_SPA_VDISK] = "volatile-disk",
351 [NFIT_SPA_VCD] = "volatile-cd",
352 [NFIT_SPA_PDISK] = "persistent-disk",
353 [NFIT_SPA_PCD] = "persistent-cd",
354
355 };
356
357 if (type > NFIT_SPA_PCD)
358 return "unknown";
359
360 return to_name[type];
361}
362
363static int nfit_spa_type(struct acpi_nfit_system_address *spa)
364{
365 int i;
366
367 for (i = 0; i < NFIT_UUID_MAX; i++)
368 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
369 return i;
370 return -1;
371}
372
373static bool add_spa(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600374 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400375 struct acpi_nfit_system_address *spa)
376{
Linda Knippers826c4162015-11-20 19:05:47 -0500377 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400378 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600379 struct nfit_spa *nfit_spa;
Dan Williamsb94d5232015-05-19 22:54:31 -0400380
Vishal Verma20985162015-10-27 16:58:27 -0600381 list_for_each_entry(nfit_spa, &prev->spas, list) {
Linda Knippers826c4162015-11-20 19:05:47 -0500382 if (memcmp(nfit_spa->spa, spa, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600383 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
384 return true;
385 }
386 }
387
388 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400389 if (!nfit_spa)
390 return false;
391 INIT_LIST_HEAD(&nfit_spa->list);
392 nfit_spa->spa = spa;
393 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
394 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
395 spa->range_index,
396 spa_type_name(nfit_spa_type(spa)));
397 return true;
398}
399
400static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600401 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400402 struct acpi_nfit_memory_map *memdev)
403{
Linda Knippers826c4162015-11-20 19:05:47 -0500404 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400405 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600406 struct nfit_memdev *nfit_memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400407
Vishal Verma20985162015-10-27 16:58:27 -0600408 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500409 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600410 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
411 return true;
412 }
413
414 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400415 if (!nfit_memdev)
416 return false;
417 INIT_LIST_HEAD(&nfit_memdev->list);
418 nfit_memdev->memdev = memdev;
419 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
420 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
421 __func__, memdev->device_handle, memdev->range_index,
422 memdev->region_index);
423 return true;
424}
425
426static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600427 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400428 struct acpi_nfit_control_region *dcr)
429{
Linda Knippers826c4162015-11-20 19:05:47 -0500430 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400431 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600432 struct nfit_dcr *nfit_dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400433
Vishal Verma20985162015-10-27 16:58:27 -0600434 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500435 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600436 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
437 return true;
438 }
439
440 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400441 if (!nfit_dcr)
442 return false;
443 INIT_LIST_HEAD(&nfit_dcr->list);
444 nfit_dcr->dcr = dcr;
445 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
446 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
447 dcr->region_index, dcr->windows);
448 return true;
449}
450
451static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600452 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400453 struct acpi_nfit_data_region *bdw)
454{
Linda Knippers826c4162015-11-20 19:05:47 -0500455 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400456 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600457 struct nfit_bdw *nfit_bdw;
Dan Williamsb94d5232015-05-19 22:54:31 -0400458
Vishal Verma20985162015-10-27 16:58:27 -0600459 list_for_each_entry(nfit_bdw, &prev->bdws, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500460 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600461 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
462 return true;
463 }
464
465 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400466 if (!nfit_bdw)
467 return false;
468 INIT_LIST_HEAD(&nfit_bdw->list);
469 nfit_bdw->bdw = bdw;
470 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
471 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
472 bdw->region_index, bdw->windows);
473 return true;
474}
475
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400476static bool add_idt(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600477 struct nfit_table_prev *prev,
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400478 struct acpi_nfit_interleave *idt)
479{
Linda Knippers826c4162015-11-20 19:05:47 -0500480 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400481 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600482 struct nfit_idt *nfit_idt;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400483
Vishal Verma20985162015-10-27 16:58:27 -0600484 list_for_each_entry(nfit_idt, &prev->idts, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500485 if (memcmp(nfit_idt->idt, idt, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600486 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
487 return true;
488 }
489
490 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400491 if (!nfit_idt)
492 return false;
493 INIT_LIST_HEAD(&nfit_idt->list);
494 nfit_idt->idt = idt;
495 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
496 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
497 idt->interleave_index, idt->line_count);
498 return true;
499}
500
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600501static bool add_flush(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600502 struct nfit_table_prev *prev,
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600503 struct acpi_nfit_flush_address *flush)
504{
Linda Knippers826c4162015-11-20 19:05:47 -0500505 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600506 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600507 struct nfit_flush *nfit_flush;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600508
Vishal Verma20985162015-10-27 16:58:27 -0600509 list_for_each_entry(nfit_flush, &prev->flushes, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500510 if (memcmp(nfit_flush->flush, flush, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600511 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
512 return true;
513 }
514
515 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600516 if (!nfit_flush)
517 return false;
518 INIT_LIST_HEAD(&nfit_flush->list);
519 nfit_flush->flush = flush;
520 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
521 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
522 flush->device_handle, flush->hint_count);
523 return true;
524}
525
Vishal Verma20985162015-10-27 16:58:27 -0600526static void *add_table(struct acpi_nfit_desc *acpi_desc,
527 struct nfit_table_prev *prev, void *table, const void *end)
Dan Williamsb94d5232015-05-19 22:54:31 -0400528{
529 struct device *dev = acpi_desc->dev;
530 struct acpi_nfit_header *hdr;
531 void *err = ERR_PTR(-ENOMEM);
532
533 if (table >= end)
534 return NULL;
535
536 hdr = table;
Vishal Verma564d5012015-10-27 16:58:26 -0600537 if (!hdr->length) {
538 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
539 hdr->type);
540 return NULL;
541 }
542
Dan Williamsb94d5232015-05-19 22:54:31 -0400543 switch (hdr->type) {
544 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600545 if (!add_spa(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400546 return err;
547 break;
548 case ACPI_NFIT_TYPE_MEMORY_MAP:
Vishal Verma20985162015-10-27 16:58:27 -0600549 if (!add_memdev(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400550 return err;
551 break;
552 case ACPI_NFIT_TYPE_CONTROL_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600553 if (!add_dcr(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400554 return err;
555 break;
556 case ACPI_NFIT_TYPE_DATA_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600557 if (!add_bdw(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400558 return err;
559 break;
Dan Williamsb94d5232015-05-19 22:54:31 -0400560 case ACPI_NFIT_TYPE_INTERLEAVE:
Vishal Verma20985162015-10-27 16:58:27 -0600561 if (!add_idt(acpi_desc, prev, table))
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400562 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400563 break;
564 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600565 if (!add_flush(acpi_desc, prev, table))
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600566 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400567 break;
568 case ACPI_NFIT_TYPE_SMBIOS:
569 dev_dbg(dev, "%s: smbios\n", __func__);
570 break;
571 default:
572 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
573 break;
574 }
575
576 return table + hdr->length;
577}
578
579static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
580 struct nfit_mem *nfit_mem)
581{
582 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
583 u16 dcr = nfit_mem->dcr->region_index;
584 struct nfit_spa *nfit_spa;
585
586 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
587 u16 range_index = nfit_spa->spa->range_index;
588 int type = nfit_spa_type(nfit_spa->spa);
589 struct nfit_memdev *nfit_memdev;
590
591 if (type != NFIT_SPA_BDW)
592 continue;
593
594 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
595 if (nfit_memdev->memdev->range_index != range_index)
596 continue;
597 if (nfit_memdev->memdev->device_handle != device_handle)
598 continue;
599 if (nfit_memdev->memdev->region_index != dcr)
600 continue;
601
602 nfit_mem->spa_bdw = nfit_spa->spa;
603 return;
604 }
605 }
606
607 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
608 nfit_mem->spa_dcr->range_index);
609 nfit_mem->bdw = NULL;
610}
611
Dan Williams6697b2c2016-02-04 16:51:00 -0800612static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
Dan Williamsb94d5232015-05-19 22:54:31 -0400613 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
614{
615 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400616 struct nfit_memdev *nfit_memdev;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600617 struct nfit_flush *nfit_flush;
Dan Williamsb94d5232015-05-19 22:54:31 -0400618 struct nfit_bdw *nfit_bdw;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400619 struct nfit_idt *nfit_idt;
620 u16 idt_idx, range_index;
Dan Williamsb94d5232015-05-19 22:54:31 -0400621
Dan Williamsb94d5232015-05-19 22:54:31 -0400622 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
623 if (nfit_bdw->bdw->region_index != dcr)
624 continue;
625 nfit_mem->bdw = nfit_bdw->bdw;
626 break;
627 }
628
629 if (!nfit_mem->bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800630 return;
Dan Williamsb94d5232015-05-19 22:54:31 -0400631
632 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400633
634 if (!nfit_mem->spa_bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800635 return;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400636
637 range_index = nfit_mem->spa_bdw->range_index;
638 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
639 if (nfit_memdev->memdev->range_index != range_index ||
640 nfit_memdev->memdev->region_index != dcr)
641 continue;
642 nfit_mem->memdev_bdw = nfit_memdev->memdev;
643 idt_idx = nfit_memdev->memdev->interleave_index;
644 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
645 if (nfit_idt->idt->interleave_index != idt_idx)
646 continue;
647 nfit_mem->idt_bdw = nfit_idt->idt;
648 break;
649 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600650
651 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
652 if (nfit_flush->flush->device_handle !=
653 nfit_memdev->memdev->device_handle)
654 continue;
655 nfit_mem->nfit_flush = nfit_flush;
656 break;
657 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400658 break;
659 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400660}
661
662static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
663 struct acpi_nfit_system_address *spa)
664{
665 struct nfit_mem *nfit_mem, *found;
666 struct nfit_memdev *nfit_memdev;
667 int type = nfit_spa_type(spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400668
669 switch (type) {
670 case NFIT_SPA_DCR:
671 case NFIT_SPA_PM:
672 break;
673 default:
674 return 0;
675 }
676
677 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
Dan Williams6697b2c2016-02-04 16:51:00 -0800678 struct nfit_dcr *nfit_dcr;
679 u32 device_handle;
680 u16 dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400681
682 if (nfit_memdev->memdev->range_index != spa->range_index)
683 continue;
684 found = NULL;
685 dcr = nfit_memdev->memdev->region_index;
Dan Williams6697b2c2016-02-04 16:51:00 -0800686 device_handle = nfit_memdev->memdev->device_handle;
Dan Williamsb94d5232015-05-19 22:54:31 -0400687 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
Dan Williams6697b2c2016-02-04 16:51:00 -0800688 if (__to_nfit_memdev(nfit_mem)->device_handle
689 == device_handle) {
Dan Williamsb94d5232015-05-19 22:54:31 -0400690 found = nfit_mem;
691 break;
692 }
693
694 if (found)
695 nfit_mem = found;
696 else {
697 nfit_mem = devm_kzalloc(acpi_desc->dev,
698 sizeof(*nfit_mem), GFP_KERNEL);
699 if (!nfit_mem)
700 return -ENOMEM;
701 INIT_LIST_HEAD(&nfit_mem->list);
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700702 nfit_mem->acpi_desc = acpi_desc;
Dan Williams6697b2c2016-02-04 16:51:00 -0800703 list_add(&nfit_mem->list, &acpi_desc->dimms);
704 }
705
706 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
707 if (nfit_dcr->dcr->region_index != dcr)
708 continue;
709 /*
710 * Record the control region for the dimm. For
711 * the ACPI 6.1 case, where there are separate
712 * control regions for the pmem vs blk
713 * interfaces, be sure to record the extended
714 * blk details.
715 */
716 if (!nfit_mem->dcr)
717 nfit_mem->dcr = nfit_dcr->dcr;
718 else if (nfit_mem->dcr->windows == 0
719 && nfit_dcr->dcr->windows)
720 nfit_mem->dcr = nfit_dcr->dcr;
721 break;
722 }
723
724 if (dcr && !nfit_mem->dcr) {
725 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
726 spa->range_index, dcr);
727 return -ENODEV;
Dan Williamsb94d5232015-05-19 22:54:31 -0400728 }
729
730 if (type == NFIT_SPA_DCR) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400731 struct nfit_idt *nfit_idt;
732 u16 idt_idx;
733
Dan Williamsb94d5232015-05-19 22:54:31 -0400734 /* multiple dimms may share a SPA when interleaved */
735 nfit_mem->spa_dcr = spa;
736 nfit_mem->memdev_dcr = nfit_memdev->memdev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400737 idt_idx = nfit_memdev->memdev->interleave_index;
738 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
739 if (nfit_idt->idt->interleave_index != idt_idx)
740 continue;
741 nfit_mem->idt_dcr = nfit_idt->idt;
742 break;
743 }
Dan Williams6697b2c2016-02-04 16:51:00 -0800744 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400745 } else {
746 /*
747 * A single dimm may belong to multiple SPA-PM
748 * ranges, record at least one in addition to
749 * any SPA-DCR range.
750 */
751 nfit_mem->memdev_pmem = nfit_memdev->memdev;
752 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400753 }
754
755 return 0;
756}
757
758static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
759{
760 struct nfit_mem *a = container_of(_a, typeof(*a), list);
761 struct nfit_mem *b = container_of(_b, typeof(*b), list);
762 u32 handleA, handleB;
763
764 handleA = __to_nfit_memdev(a)->device_handle;
765 handleB = __to_nfit_memdev(b)->device_handle;
766 if (handleA < handleB)
767 return -1;
768 else if (handleA > handleB)
769 return 1;
770 return 0;
771}
772
773static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
774{
775 struct nfit_spa *nfit_spa;
776
777 /*
778 * For each SPA-DCR or SPA-PMEM address range find its
779 * corresponding MEMDEV(s). From each MEMDEV find the
780 * corresponding DCR. Then, if we're operating on a SPA-DCR,
781 * try to find a SPA-BDW and a corresponding BDW that references
782 * the DCR. Throw it all into an nfit_mem object. Note, that
783 * BDWs are optional.
784 */
785 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
786 int rc;
787
788 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
789 if (rc)
790 return rc;
791 }
792
793 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
794
795 return 0;
796}
797
Dan Williams45def222015-04-26 19:26:48 -0400798static ssize_t revision_show(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
802 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
803 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
804
Linda Knippers6b577c92015-11-20 19:05:49 -0500805 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
Dan Williams45def222015-04-26 19:26:48 -0400806}
807static DEVICE_ATTR_RO(revision);
808
809static struct attribute *acpi_nfit_attributes[] = {
810 &dev_attr_revision.attr,
811 NULL,
812};
813
814static struct attribute_group acpi_nfit_attribute_group = {
815 .name = "nfit",
816 .attrs = acpi_nfit_attributes,
817};
818
Dan Williamsa61fe6f2016-02-19 12:29:32 -0800819static const struct attribute_group *acpi_nfit_attribute_groups[] = {
Dan Williams45def222015-04-26 19:26:48 -0400820 &nvdimm_bus_attribute_group,
821 &acpi_nfit_attribute_group,
822 NULL,
823};
824
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400825static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
826{
827 struct nvdimm *nvdimm = to_nvdimm(dev);
828 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
829
830 return __to_nfit_memdev(nfit_mem);
831}
832
833static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
834{
835 struct nvdimm *nvdimm = to_nvdimm(dev);
836 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
837
838 return nfit_mem->dcr;
839}
840
841static ssize_t handle_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
843{
844 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
845
846 return sprintf(buf, "%#x\n", memdev->device_handle);
847}
848static DEVICE_ATTR_RO(handle);
849
850static ssize_t phys_id_show(struct device *dev,
851 struct device_attribute *attr, char *buf)
852{
853 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
854
855 return sprintf(buf, "%#x\n", memdev->physical_id);
856}
857static DEVICE_ATTR_RO(phys_id);
858
859static ssize_t vendor_show(struct device *dev,
860 struct device_attribute *attr, char *buf)
861{
862 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
863
Toshi Kani5ad9a7f2016-04-25 15:34:58 -0600864 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400865}
866static DEVICE_ATTR_RO(vendor);
867
868static ssize_t rev_id_show(struct device *dev,
869 struct device_attribute *attr, char *buf)
870{
871 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
872
Toshi Kani5ad9a7f2016-04-25 15:34:58 -0600873 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400874}
875static DEVICE_ATTR_RO(rev_id);
876
877static ssize_t device_show(struct device *dev,
878 struct device_attribute *attr, char *buf)
879{
880 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
881
Toshi Kani5ad9a7f2016-04-25 15:34:58 -0600882 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400883}
884static DEVICE_ATTR_RO(device);
885
Dan Williams6ca72082016-04-29 10:33:23 -0700886static ssize_t subsystem_vendor_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
888{
889 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
890
891 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
892}
893static DEVICE_ATTR_RO(subsystem_vendor);
894
895static ssize_t subsystem_rev_id_show(struct device *dev,
896 struct device_attribute *attr, char *buf)
897{
898 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
899
900 return sprintf(buf, "0x%04x\n",
901 be16_to_cpu(dcr->subsystem_revision_id));
902}
903static DEVICE_ATTR_RO(subsystem_rev_id);
904
905static ssize_t subsystem_device_show(struct device *dev,
906 struct device_attribute *attr, char *buf)
907{
908 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
909
910 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
911}
912static DEVICE_ATTR_RO(subsystem_device);
913
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700914static int num_nvdimm_formats(struct nvdimm *nvdimm)
915{
916 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
917 int formats = 0;
918
919 if (nfit_mem->memdev_pmem)
920 formats++;
921 if (nfit_mem->memdev_bdw)
922 formats++;
923 return formats;
924}
925
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400926static ssize_t format_show(struct device *dev,
927 struct device_attribute *attr, char *buf)
928{
929 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
930
Dan Williams1b982ba2016-05-18 10:07:19 -0700931 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400932}
933static DEVICE_ATTR_RO(format);
934
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700935static ssize_t format1_show(struct device *dev,
936 struct device_attribute *attr, char *buf)
937{
938 u32 handle;
939 ssize_t rc = -ENXIO;
940 struct nfit_mem *nfit_mem;
941 struct nfit_memdev *nfit_memdev;
942 struct acpi_nfit_desc *acpi_desc;
943 struct nvdimm *nvdimm = to_nvdimm(dev);
944 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
945
946 nfit_mem = nvdimm_provider_data(nvdimm);
947 acpi_desc = nfit_mem->acpi_desc;
948 handle = to_nfit_memdev(dev)->device_handle;
949
950 /* assumes DIMMs have at most 2 published interface codes */
951 mutex_lock(&acpi_desc->init_mutex);
952 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
953 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
954 struct nfit_dcr *nfit_dcr;
955
956 if (memdev->device_handle != handle)
957 continue;
958
959 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
960 if (nfit_dcr->dcr->region_index != memdev->region_index)
961 continue;
962 if (nfit_dcr->dcr->code == dcr->code)
963 continue;
Dan Williams1b982ba2016-05-18 10:07:19 -0700964 rc = sprintf(buf, "%#x\n",
965 be16_to_cpu(nfit_dcr->dcr->code));
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700966 break;
967 }
968 if (rc != ENXIO)
969 break;
970 }
971 mutex_unlock(&acpi_desc->init_mutex);
972 return rc;
973}
974static DEVICE_ATTR_RO(format1);
975
976static ssize_t formats_show(struct device *dev,
977 struct device_attribute *attr, char *buf)
978{
979 struct nvdimm *nvdimm = to_nvdimm(dev);
980
981 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
982}
983static DEVICE_ATTR_RO(formats);
984
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400985static ssize_t serial_show(struct device *dev,
986 struct device_attribute *attr, char *buf)
987{
988 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
989
Toshi Kani5ad9a7f2016-04-25 15:34:58 -0600990 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400991}
992static DEVICE_ATTR_RO(serial);
993
Dan Williamsa94e3fb2016-04-28 18:18:05 -0700994static ssize_t family_show(struct device *dev,
995 struct device_attribute *attr, char *buf)
996{
997 struct nvdimm *nvdimm = to_nvdimm(dev);
998 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
999
1000 if (nfit_mem->family < 0)
1001 return -ENXIO;
1002 return sprintf(buf, "%d\n", nfit_mem->family);
1003}
1004static DEVICE_ATTR_RO(family);
1005
1006static ssize_t dsm_mask_show(struct device *dev,
1007 struct device_attribute *attr, char *buf)
1008{
1009 struct nvdimm *nvdimm = to_nvdimm(dev);
1010 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1011
1012 if (nfit_mem->family < 0)
1013 return -ENXIO;
1014 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1015}
1016static DEVICE_ATTR_RO(dsm_mask);
1017
Dan Williams58138822015-06-23 20:08:34 -04001018static ssize_t flags_show(struct device *dev,
1019 struct device_attribute *attr, char *buf)
1020{
1021 u16 flags = to_nfit_memdev(dev)->flags;
1022
1023 return sprintf(buf, "%s%s%s%s%s\n",
Toshi Kani402bae52015-08-26 10:20:23 -06001024 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1025 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1026 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
Bob Mooreca321d12015-10-19 10:24:52 +08001027 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
Toshi Kani402bae52015-08-26 10:20:23 -06001028 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
Dan Williams58138822015-06-23 20:08:34 -04001029}
1030static DEVICE_ATTR_RO(flags);
1031
Toshi Kani38a879b2016-04-25 15:34:59 -06001032static ssize_t id_show(struct device *dev,
1033 struct device_attribute *attr, char *buf)
1034{
1035 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1036
1037 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1038 return sprintf(buf, "%04x-%02x-%04x-%08x\n",
1039 be16_to_cpu(dcr->vendor_id),
1040 dcr->manufacturing_location,
1041 be16_to_cpu(dcr->manufacturing_date),
1042 be32_to_cpu(dcr->serial_number));
1043 else
1044 return sprintf(buf, "%04x-%08x\n",
1045 be16_to_cpu(dcr->vendor_id),
1046 be32_to_cpu(dcr->serial_number));
1047}
1048static DEVICE_ATTR_RO(id);
1049
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001050static struct attribute *acpi_nfit_dimm_attributes[] = {
1051 &dev_attr_handle.attr,
1052 &dev_attr_phys_id.attr,
1053 &dev_attr_vendor.attr,
1054 &dev_attr_device.attr,
Dan Williams6ca72082016-04-29 10:33:23 -07001055 &dev_attr_rev_id.attr,
1056 &dev_attr_subsystem_vendor.attr,
1057 &dev_attr_subsystem_device.attr,
1058 &dev_attr_subsystem_rev_id.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001059 &dev_attr_format.attr,
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001060 &dev_attr_formats.attr,
1061 &dev_attr_format1.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001062 &dev_attr_serial.attr,
Dan Williams58138822015-06-23 20:08:34 -04001063 &dev_attr_flags.attr,
Toshi Kani38a879b2016-04-25 15:34:59 -06001064 &dev_attr_id.attr,
Dan Williamsa94e3fb2016-04-28 18:18:05 -07001065 &dev_attr_family.attr,
1066 &dev_attr_dsm_mask.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001067 NULL,
1068};
1069
1070static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1071 struct attribute *a, int n)
1072{
1073 struct device *dev = container_of(kobj, struct device, kobj);
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001074 struct nvdimm *nvdimm = to_nvdimm(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001075
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001076 if (!to_nfit_dcr(dev))
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001077 return 0;
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001078 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1079 return 0;
1080 return a->mode;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001081}
1082
1083static struct attribute_group acpi_nfit_dimm_attribute_group = {
1084 .name = "nfit",
1085 .attrs = acpi_nfit_dimm_attributes,
1086 .is_visible = acpi_nfit_dimm_attr_visible,
1087};
1088
1089static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
Dan Williams62232e452015-06-08 14:27:06 -04001090 &nvdimm_attribute_group,
Dan Williams4d88a972015-05-31 14:41:48 -04001091 &nd_device_attribute_group,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001092 &acpi_nfit_dimm_attribute_group,
1093 NULL,
1094};
1095
1096static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1097 u32 device_handle)
1098{
1099 struct nfit_mem *nfit_mem;
1100
1101 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1102 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1103 return nfit_mem->nvdimm;
1104
1105 return NULL;
1106}
1107
Dan Williams62232e452015-06-08 14:27:06 -04001108static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1109 struct nfit_mem *nfit_mem, u32 device_handle)
1110{
1111 struct acpi_device *adev, *adev_dimm;
1112 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -07001113 unsigned long dsm_mask;
1114 const u8 *uuid;
Linda Knippers60e95f42015-07-22 16:17:22 -04001115 int i;
Dan Williams62232e452015-06-08 14:27:06 -04001116
Dan Williamse3654ec2016-04-28 16:17:07 -07001117 /* nfit test assumes 1:1 relationship between commands and dsms */
1118 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
Dan Williams31eca762016-04-28 16:23:43 -07001119 nfit_mem->family = NVDIMM_FAMILY_INTEL;
Dan Williams62232e452015-06-08 14:27:06 -04001120 adev = to_acpi_dev(acpi_desc);
1121 if (!adev)
1122 return 0;
1123
1124 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1125 nfit_mem->adev = adev_dimm;
1126 if (!adev_dimm) {
1127 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1128 device_handle);
Dan Williams4d88a972015-05-31 14:41:48 -04001129 return force_enable_dimms ? 0 : -ENODEV;
Dan Williams62232e452015-06-08 14:27:06 -04001130 }
1131
Dan Williams31eca762016-04-28 16:23:43 -07001132 /*
stuart hayese02fb722016-05-26 11:38:41 -05001133 * Until standardization materializes we need to consider 4
Dan Williams31eca762016-04-28 16:23:43 -07001134 * different command sets. Note, that checking for function0 (bit0)
1135 * tells us if any commands are reachable through this uuid.
1136 */
stuart hayese02fb722016-05-26 11:38:41 -05001137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
Dan Williams31eca762016-04-28 16:23:43 -07001138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1139 break;
1140
1141 /* limit the supported commands to those that are publicly documented */
1142 nfit_mem->family = i;
Dan Williams87554092016-04-28 18:01:20 -07001143 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
Dan Williams31eca762016-04-28 16:23:43 -07001144 dsm_mask = 0x3fe;
Dan Williams87554092016-04-28 18:01:20 -07001145 if (disable_vendor_specific)
1146 dsm_mask &= ~(1 << ND_CMD_VENDOR);
stuart hayese02fb722016-05-26 11:38:41 -05001147 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
Dan Williams31eca762016-04-28 16:23:43 -07001148 dsm_mask = 0x1c3c76;
stuart hayese02fb722016-05-26 11:38:41 -05001149 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
Dan Williams31eca762016-04-28 16:23:43 -07001150 dsm_mask = 0x1fe;
Dan Williams87554092016-04-28 18:01:20 -07001151 if (disable_vendor_specific)
1152 dsm_mask &= ~(1 << 8);
stuart hayese02fb722016-05-26 11:38:41 -05001153 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1154 dsm_mask = 0xffffffff;
Dan Williams87554092016-04-28 18:01:20 -07001155 } else {
Dan Williams31eca762016-04-28 16:23:43 -07001156 dev_err(dev, "unknown dimm command family\n");
1157 nfit_mem->family = -1;
1158 return force_enable_dimms ? 0 : -ENODEV;
1159 }
1160
1161 uuid = to_nfit_uuid(nfit_mem->family);
1162 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -04001163 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1164 set_bit(i, &nfit_mem->dsm_mask);
1165
Linda Knippers60e95f42015-07-22 16:17:22 -04001166 return 0;
Dan Williams62232e452015-06-08 14:27:06 -04001167}
1168
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001169static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1170{
1171 struct nfit_mem *nfit_mem;
Dan Williams4d88a972015-05-31 14:41:48 -04001172 int dimm_count = 0;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001173
1174 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
Dan Williams31eca762016-04-28 16:23:43 -07001175 unsigned long flags = 0, cmd_mask;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001176 struct nvdimm *nvdimm;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001177 u32 device_handle;
Dan Williams58138822015-06-23 20:08:34 -04001178 u16 mem_flags;
Dan Williams62232e452015-06-08 14:27:06 -04001179 int rc;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001180
1181 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1182 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1183 if (nvdimm) {
Vishal Verma20985162015-10-27 16:58:27 -06001184 dimm_count++;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001185 continue;
1186 }
1187
1188 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1189 flags |= NDD_ALIASING;
1190
Dan Williams58138822015-06-23 20:08:34 -04001191 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
Bob Mooreca321d12015-10-19 10:24:52 +08001192 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
Dan Williams58138822015-06-23 20:08:34 -04001193 flags |= NDD_UNARMED;
1194
Dan Williams62232e452015-06-08 14:27:06 -04001195 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1196 if (rc)
1197 continue;
1198
Dan Williamse3654ec2016-04-28 16:17:07 -07001199 /*
Dan Williams31eca762016-04-28 16:23:43 -07001200 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1201 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1202 * userspace interface.
Dan Williamse3654ec2016-04-28 16:17:07 -07001203 */
Dan Williams31eca762016-04-28 16:23:43 -07001204 cmd_mask = 1UL << ND_CMD_CALL;
1205 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1206 cmd_mask |= nfit_mem->dsm_mask;
1207
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001208 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
Dan Williams62232e452015-06-08 14:27:06 -04001209 acpi_nfit_dimm_attribute_groups,
Dan Williams31eca762016-04-28 16:23:43 -07001210 flags, cmd_mask);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001211 if (!nvdimm)
1212 return -ENOMEM;
1213
1214 nfit_mem->nvdimm = nvdimm;
Dan Williams4d88a972015-05-31 14:41:48 -04001215 dimm_count++;
Dan Williams58138822015-06-23 20:08:34 -04001216
1217 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1218 continue;
1219
Toshi Kani402bae52015-08-26 10:20:23 -06001220 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
Dan Williams58138822015-06-23 20:08:34 -04001221 nvdimm_name(nvdimm),
Toshi Kani402bae52015-08-26 10:20:23 -06001222 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1223 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1224 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
Bob Mooreca321d12015-10-19 10:24:52 +08001225 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
Dan Williams58138822015-06-23 20:08:34 -04001226
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001227 }
1228
Dan Williams4d88a972015-05-31 14:41:48 -04001229 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001230}
1231
Dan Williams62232e452015-06-08 14:27:06 -04001232static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1233{
1234 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1235 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1236 struct acpi_device *adev;
1237 int i;
1238
Dan Williamse3654ec2016-04-28 16:17:07 -07001239 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
Dan Williams62232e452015-06-08 14:27:06 -04001240 adev = to_acpi_dev(acpi_desc);
1241 if (!adev)
1242 return;
1243
Dan Williamsd4f32362016-03-03 16:08:54 -08001244 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
Dan Williams62232e452015-06-08 14:27:06 -04001245 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
Dan Williamse3654ec2016-04-28 16:17:07 -07001246 set_bit(i, &nd_desc->cmd_mask);
Dan Williams62232e452015-06-08 14:27:06 -04001247}
1248
Dan Williams1f7df6f2015-06-09 20:13:14 -04001249static ssize_t range_index_show(struct device *dev,
1250 struct device_attribute *attr, char *buf)
1251{
1252 struct nd_region *nd_region = to_nd_region(dev);
1253 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1254
1255 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1256}
1257static DEVICE_ATTR_RO(range_index);
1258
1259static struct attribute *acpi_nfit_region_attributes[] = {
1260 &dev_attr_range_index.attr,
1261 NULL,
1262};
1263
1264static struct attribute_group acpi_nfit_region_attribute_group = {
1265 .name = "nfit",
1266 .attrs = acpi_nfit_region_attributes,
1267};
1268
1269static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1270 &nd_region_attribute_group,
1271 &nd_mapping_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001272 &nd_device_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001273 &nd_numa_attribute_group,
Dan Williams1f7df6f2015-06-09 20:13:14 -04001274 &acpi_nfit_region_attribute_group,
1275 NULL,
1276};
1277
Dan Williamseaf96152015-05-01 13:11:27 -04001278/* enough info to uniquely specify an interleave set */
1279struct nfit_set_info {
1280 struct nfit_set_info_map {
1281 u64 region_offset;
1282 u32 serial_number;
1283 u32 pad;
1284 } mapping[0];
1285};
1286
1287static size_t sizeof_nfit_set_info(int num_mappings)
1288{
1289 return sizeof(struct nfit_set_info)
1290 + num_mappings * sizeof(struct nfit_set_info_map);
1291}
1292
1293static int cmp_map(const void *m0, const void *m1)
1294{
1295 const struct nfit_set_info_map *map0 = m0;
1296 const struct nfit_set_info_map *map1 = m1;
1297
1298 return memcmp(&map0->region_offset, &map1->region_offset,
1299 sizeof(u64));
1300}
1301
1302/* Retrieve the nth entry referencing this spa */
1303static struct acpi_nfit_memory_map *memdev_from_spa(
1304 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1305{
1306 struct nfit_memdev *nfit_memdev;
1307
1308 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1309 if (nfit_memdev->memdev->range_index == range_index)
1310 if (n-- == 0)
1311 return nfit_memdev->memdev;
1312 return NULL;
1313}
1314
1315static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1316 struct nd_region_desc *ndr_desc,
1317 struct acpi_nfit_system_address *spa)
1318{
1319 int i, spa_type = nfit_spa_type(spa);
1320 struct device *dev = acpi_desc->dev;
1321 struct nd_interleave_set *nd_set;
1322 u16 nr = ndr_desc->num_mappings;
1323 struct nfit_set_info *info;
1324
1325 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1326 /* pass */;
1327 else
1328 return 0;
1329
1330 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1331 if (!nd_set)
1332 return -ENOMEM;
1333
1334 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1335 if (!info)
1336 return -ENOMEM;
1337 for (i = 0; i < nr; i++) {
1338 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1339 struct nfit_set_info_map *map = &info->mapping[i];
1340 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1341 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1342 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1343 spa->range_index, i);
1344
1345 if (!memdev || !nfit_mem->dcr) {
1346 dev_err(dev, "%s: failed to find DCR\n", __func__);
1347 return -ENODEV;
1348 }
1349
1350 map->region_offset = memdev->region_offset;
1351 map->serial_number = nfit_mem->dcr->serial_number;
1352 }
1353
1354 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1355 cmp_map, NULL);
1356 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1357 ndr_desc->nd_set = nd_set;
1358 devm_kfree(dev, info);
1359
1360 return 0;
1361}
1362
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001363static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1364{
1365 struct acpi_nfit_interleave *idt = mmio->idt;
1366 u32 sub_line_offset, line_index, line_offset;
1367 u64 line_no, table_skip_count, table_offset;
1368
1369 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1370 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1371 line_offset = idt->line_offset[line_index]
1372 * mmio->line_size;
1373 table_offset = table_skip_count * mmio->table_size;
1374
1375 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1376}
1377
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001378static void wmb_blk(struct nfit_blk *nfit_blk)
1379{
1380
1381 if (nfit_blk->nvdimm_flush) {
1382 /*
1383 * The first wmb() is needed to 'sfence' all previous writes
1384 * such that they are architecturally visible for the platform
1385 * buffer flush. Note that we've already arranged for pmem
1386 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1387 * final wmb() ensures ordering for the NVDIMM flush write.
1388 */
1389 wmb();
1390 writeq(1, nfit_blk->nvdimm_flush);
1391 wmb();
1392 } else
1393 wmb_pmem();
1394}
1395
Ross Zwislerde4a1962015-08-20 16:27:38 -06001396static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001397{
1398 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1399 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1400
1401 if (mmio->num_lines)
1402 offset = to_interleave_offset(offset, mmio);
1403
Linus Torvalds12f03ee2015-09-08 14:35:59 -07001404 return readl(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001405}
1406
1407static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1408 resource_size_t dpa, unsigned int len, unsigned int write)
1409{
1410 u64 cmd, offset;
1411 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1412
1413 enum {
1414 BCW_OFFSET_MASK = (1ULL << 48)-1,
1415 BCW_LEN_SHIFT = 48,
1416 BCW_LEN_MASK = (1ULL << 8) - 1,
1417 BCW_CMD_SHIFT = 56,
1418 };
1419
1420 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1421 len = len >> L1_CACHE_SHIFT;
1422 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1423 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1424
1425 offset = nfit_blk->cmd_offset + mmio->size * bw;
1426 if (mmio->num_lines)
1427 offset = to_interleave_offset(offset, mmio);
1428
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001429 writeq(cmd, mmio->addr.base + offset);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001430 wmb_blk(nfit_blk);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001431
Dan Williamsaef25332016-02-12 17:01:11 -08001432 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001433 readq(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001434}
1435
1436static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1437 resource_size_t dpa, void *iobuf, size_t len, int rw,
1438 unsigned int lane)
1439{
1440 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1441 unsigned int copied = 0;
1442 u64 base_offset;
1443 int rc;
1444
1445 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1446 + lane * mmio->size;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001447 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1448 while (len) {
1449 unsigned int c;
1450 u64 offset;
1451
1452 if (mmio->num_lines) {
1453 u32 line_offset;
1454
1455 offset = to_interleave_offset(base_offset + copied,
1456 mmio);
1457 div_u64_rem(offset, mmio->line_size, &line_offset);
1458 c = min_t(size_t, len, mmio->line_size - line_offset);
1459 } else {
1460 offset = base_offset + nfit_blk->bdw_offset;
1461 c = len;
1462 }
1463
1464 if (rw)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001465 memcpy_to_pmem(mmio->addr.aperture + offset,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001466 iobuf + copied, c);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001467 else {
Dan Williamsaef25332016-02-12 17:01:11 -08001468 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001469 mmio_flush_range((void __force *)
1470 mmio->addr.aperture + offset, c);
1471
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001472 memcpy_from_pmem(iobuf + copied,
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001473 mmio->addr.aperture + offset, c);
1474 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001475
1476 copied += c;
1477 len -= c;
1478 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001479
1480 if (rw)
1481 wmb_blk(nfit_blk);
1482
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001483 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1484 return rc;
1485}
1486
1487static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1488 resource_size_t dpa, void *iobuf, u64 len, int rw)
1489{
1490 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1491 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1492 struct nd_region *nd_region = nfit_blk->nd_region;
1493 unsigned int lane, copied = 0;
1494 int rc = 0;
1495
1496 lane = nd_region_acquire_lane(nd_region);
1497 while (len) {
1498 u64 c = min(len, mmio->size);
1499
1500 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1501 iobuf + copied, c, rw, lane);
1502 if (rc)
1503 break;
1504
1505 copied += c;
1506 len -= c;
1507 }
1508 nd_region_release_lane(nd_region, lane);
1509
1510 return rc;
1511}
1512
1513static void nfit_spa_mapping_release(struct kref *kref)
1514{
1515 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1516 struct acpi_nfit_system_address *spa = spa_map->spa;
1517 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1518
1519 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1520 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001521 if (spa_map->type == SPA_MAP_APERTURE)
1522 memunmap((void __force *)spa_map->addr.aperture);
1523 else
1524 iounmap(spa_map->addr.base);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001525 release_mem_region(spa->address, spa->length);
1526 list_del(&spa_map->list);
1527 kfree(spa_map);
1528}
1529
1530static struct nfit_spa_mapping *find_spa_mapping(
1531 struct acpi_nfit_desc *acpi_desc,
1532 struct acpi_nfit_system_address *spa)
1533{
1534 struct nfit_spa_mapping *spa_map;
1535
1536 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1537 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1538 if (spa_map->spa == spa)
1539 return spa_map;
1540
1541 return NULL;
1542}
1543
1544static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1545 struct acpi_nfit_system_address *spa)
1546{
1547 struct nfit_spa_mapping *spa_map;
1548
1549 mutex_lock(&acpi_desc->spa_map_mutex);
1550 spa_map = find_spa_mapping(acpi_desc, spa);
1551
1552 if (spa_map)
1553 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1554 mutex_unlock(&acpi_desc->spa_map_mutex);
1555}
1556
1557static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001558 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001559{
1560 resource_size_t start = spa->address;
1561 resource_size_t n = spa->length;
1562 struct nfit_spa_mapping *spa_map;
1563 struct resource *res;
1564
1565 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1566
1567 spa_map = find_spa_mapping(acpi_desc, spa);
1568 if (spa_map) {
1569 kref_get(&spa_map->kref);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001570 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001571 }
1572
1573 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1574 if (!spa_map)
1575 return NULL;
1576
1577 INIT_LIST_HEAD(&spa_map->list);
1578 spa_map->spa = spa;
1579 kref_init(&spa_map->kref);
1580 spa_map->acpi_desc = acpi_desc;
1581
1582 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1583 if (!res)
1584 goto err_mem;
1585
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001586 spa_map->type = type;
1587 if (type == SPA_MAP_APERTURE)
1588 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1589 ARCH_MEMREMAP_PMEM);
1590 else
1591 spa_map->addr.base = ioremap_nocache(start, n);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001592
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001593
1594 if (!spa_map->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001595 goto err_map;
1596
1597 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001598 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001599
1600 err_map:
1601 release_mem_region(start, n);
1602 err_mem:
1603 kfree(spa_map);
1604 return NULL;
1605}
1606
1607/**
1608 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1609 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1610 * @nfit_spa: spa table to map
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001611 * @type: aperture or control region
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001612 *
1613 * In the case where block-data-window apertures and
1614 * dimm-control-regions are interleaved they will end up sharing a
1615 * single request_mem_region() + ioremap() for the address range. In
1616 * the style of devm nfit_spa_map() mappings are automatically dropped
1617 * when all region devices referencing the same mapping are disabled /
1618 * unbound.
1619 */
1620static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001621 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001622{
1623 void __iomem *iomem;
1624
1625 mutex_lock(&acpi_desc->spa_map_mutex);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001626 iomem = __nfit_spa_map(acpi_desc, spa, type);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001627 mutex_unlock(&acpi_desc->spa_map_mutex);
1628
1629 return iomem;
1630}
1631
1632static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1633 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1634{
1635 if (idt) {
1636 mmio->num_lines = idt->line_count;
1637 mmio->line_size = idt->line_size;
1638 if (interleave_ways == 0)
1639 return -ENXIO;
1640 mmio->table_size = mmio->num_lines * interleave_ways
1641 * mmio->line_size;
1642 }
1643
1644 return 0;
1645}
1646
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001647static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1648 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1649{
1650 struct nd_cmd_dimm_flags flags;
1651 int rc;
1652
1653 memset(&flags, 0, sizeof(flags));
1654 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
Dan Williamsaef25332016-02-12 17:01:11 -08001655 sizeof(flags), NULL);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001656
1657 if (rc >= 0 && flags.status == 0)
1658 nfit_blk->dimm_flags = flags.flags;
1659 else if (rc == -ENOTTY) {
1660 /* fall back to a conservative default */
Dan Williamsaef25332016-02-12 17:01:11 -08001661 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001662 rc = 0;
1663 } else
1664 rc = -ENXIO;
1665
1666 return rc;
1667}
1668
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001669static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1670 struct device *dev)
1671{
1672 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1673 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1674 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001675 struct nfit_flush *nfit_flush;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001676 struct nfit_blk_mmio *mmio;
1677 struct nfit_blk *nfit_blk;
1678 struct nfit_mem *nfit_mem;
1679 struct nvdimm *nvdimm;
1680 int rc;
1681
1682 nvdimm = nd_blk_region_to_dimm(ndbr);
1683 nfit_mem = nvdimm_provider_data(nvdimm);
1684 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1685 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1686 nfit_mem ? "" : " nfit_mem",
Dan Williams193ccca2015-06-30 16:09:39 -04001687 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1688 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001689 return -ENXIO;
1690 }
1691
1692 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1693 if (!nfit_blk)
1694 return -ENOMEM;
1695 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1696 nfit_blk->nd_region = to_nd_region(dev);
1697
1698 /* map block aperture memory */
1699 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1700 mmio = &nfit_blk->mmio[BDW];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001701 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001702 SPA_MAP_APERTURE);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001703 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001704 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1705 nvdimm_name(nvdimm));
1706 return -ENOMEM;
1707 }
1708 mmio->size = nfit_mem->bdw->size;
1709 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1710 mmio->idt = nfit_mem->idt_bdw;
1711 mmio->spa = nfit_mem->spa_bdw;
1712 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1713 nfit_mem->memdev_bdw->interleave_ways);
1714 if (rc) {
1715 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1716 __func__, nvdimm_name(nvdimm));
1717 return rc;
1718 }
1719
1720 /* map block control memory */
1721 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1722 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1723 mmio = &nfit_blk->mmio[DCR];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001724 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001725 SPA_MAP_CONTROL);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001726 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001727 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1728 nvdimm_name(nvdimm));
1729 return -ENOMEM;
1730 }
1731 mmio->size = nfit_mem->dcr->window_size;
1732 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1733 mmio->idt = nfit_mem->idt_dcr;
1734 mmio->spa = nfit_mem->spa_dcr;
1735 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1736 nfit_mem->memdev_dcr->interleave_ways);
1737 if (rc) {
1738 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1739 __func__, nvdimm_name(nvdimm));
1740 return rc;
1741 }
1742
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001743 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1744 if (rc < 0) {
1745 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1746 __func__, nvdimm_name(nvdimm));
1747 return rc;
1748 }
1749
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001750 nfit_flush = nfit_mem->nfit_flush;
1751 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1752 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1753 nfit_flush->flush->hint_address[0], 8);
1754 if (!nfit_blk->nvdimm_flush)
1755 return -ENOMEM;
1756 }
1757
Dan Williams96601ad2015-08-24 18:29:38 -04001758 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001759 dev_warn(dev, "unable to guarantee persistence of writes\n");
1760
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001761 if (mmio->line_size == 0)
1762 return 0;
1763
1764 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1765 + 8 > mmio->line_size) {
1766 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1767 return -ENXIO;
1768 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1769 + 8 > mmio->line_size) {
1770 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1771 return -ENXIO;
1772 }
1773
1774 return 0;
1775}
1776
1777static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1778 struct device *dev)
1779{
1780 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1781 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1782 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1783 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1784 int i;
1785
1786 if (!nfit_blk)
1787 return; /* never enabled */
1788
1789 /* auto-free BLK spa mappings */
1790 for (i = 0; i < 2; i++) {
1791 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1792
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001793 if (mmio->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001794 nfit_spa_unmap(acpi_desc, mmio->spa);
1795 }
1796 nd_blk_region_set_provider_data(ndbr, NULL);
1797 /* devm will free nfit_blk */
1798}
1799
Dan Williamsaef25332016-02-12 17:01:11 -08001800static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08001801 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001802{
Dan Williamsaef25332016-02-12 17:01:11 -08001803 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001804 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Dan Williamsaef25332016-02-12 17:01:11 -08001805 int cmd_rc, rc;
1806
Dan Williams1cf03c02016-02-17 13:01:23 -08001807 cmd->address = spa->address;
1808 cmd->length = spa->length;
Dan Williamsaef25332016-02-12 17:01:11 -08001809 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1810 sizeof(*cmd), &cmd_rc);
1811 if (rc < 0)
1812 return rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001813 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001814}
1815
Dan Williams1cf03c02016-02-17 13:01:23 -08001816static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001817{
1818 int rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001819 int cmd_rc;
1820 struct nd_cmd_ars_start ars_start;
1821 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1822 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001823
Dan Williams1cf03c02016-02-17 13:01:23 -08001824 memset(&ars_start, 0, sizeof(ars_start));
1825 ars_start.address = spa->address;
1826 ars_start.length = spa->length;
1827 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1828 ars_start.type = ND_ARS_PERSISTENT;
1829 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1830 ars_start.type = ND_ARS_VOLATILE;
1831 else
1832 return -ENOTTY;
Vishal Verma0caeef62015-12-24 19:21:43 -07001833
Dan Williams1cf03c02016-02-17 13:01:23 -08001834 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1835 sizeof(ars_start), &cmd_rc);
Dan Williamsaef25332016-02-12 17:01:11 -08001836
Dan Williams1cf03c02016-02-17 13:01:23 -08001837 if (rc < 0)
1838 return rc;
1839 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001840}
1841
Dan Williams1cf03c02016-02-17 13:01:23 -08001842static int ars_continue(struct acpi_nfit_desc *acpi_desc)
Vishal Verma0caeef62015-12-24 19:21:43 -07001843{
Dan Williamsaef25332016-02-12 17:01:11 -08001844 int rc, cmd_rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001845 struct nd_cmd_ars_start ars_start;
1846 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1847 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
Vishal Verma0caeef62015-12-24 19:21:43 -07001848
Dan Williams1cf03c02016-02-17 13:01:23 -08001849 memset(&ars_start, 0, sizeof(ars_start));
1850 ars_start.address = ars_status->restart_address;
1851 ars_start.length = ars_status->restart_length;
1852 ars_start.type = ars_status->type;
1853 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1854 sizeof(ars_start), &cmd_rc);
1855 if (rc < 0)
1856 return rc;
1857 return cmd_rc;
1858}
Dan Williamsaef25332016-02-12 17:01:11 -08001859
Dan Williams1cf03c02016-02-17 13:01:23 -08001860static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1861{
1862 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1863 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1864 int rc, cmd_rc;
Dan Williamsaef25332016-02-12 17:01:11 -08001865
Dan Williams1cf03c02016-02-17 13:01:23 -08001866 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1867 acpi_desc->ars_status_size, &cmd_rc);
1868 if (rc < 0)
1869 return rc;
1870 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001871}
1872
1873static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
Dan Williams1cf03c02016-02-17 13:01:23 -08001874 struct nd_cmd_ars_status *ars_status)
Vishal Verma0caeef62015-12-24 19:21:43 -07001875{
1876 int rc;
1877 u32 i;
1878
Vishal Verma0caeef62015-12-24 19:21:43 -07001879 for (i = 0; i < ars_status->num_records; i++) {
1880 rc = nvdimm_bus_add_poison(nvdimm_bus,
1881 ars_status->records[i].err_address,
1882 ars_status->records[i].length);
1883 if (rc)
1884 return rc;
1885 }
1886
1887 return 0;
1888}
1889
Toshi Kaniaf1996e2016-03-09 12:47:06 -07001890static void acpi_nfit_remove_resource(void *data)
1891{
1892 struct resource *res = data;
1893
1894 remove_resource(res);
1895}
1896
1897static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1898 struct nd_region_desc *ndr_desc)
1899{
1900 struct resource *res, *nd_res = ndr_desc->res;
1901 int is_pmem, ret;
1902
1903 /* No operation if the region is already registered as PMEM */
1904 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
1905 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
1906 if (is_pmem == REGION_INTERSECTS)
1907 return 0;
1908
1909 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
1910 if (!res)
1911 return -ENOMEM;
1912
1913 res->name = "Persistent Memory";
1914 res->start = nd_res->start;
1915 res->end = nd_res->end;
1916 res->flags = IORESOURCE_MEM;
1917 res->desc = IORES_DESC_PERSISTENT_MEMORY;
1918
1919 ret = insert_resource(&iomem_resource, res);
1920 if (ret)
1921 return ret;
1922
1923 ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
1924 if (ret) {
1925 remove_resource(res);
1926 return ret;
1927 }
1928
1929 return 0;
1930}
1931
Dan Williams1f7df6f2015-06-09 20:13:14 -04001932static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1933 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1934 struct acpi_nfit_memory_map *memdev,
Dan Williams1cf03c02016-02-17 13:01:23 -08001935 struct nfit_spa *nfit_spa)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001936{
1937 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1938 memdev->device_handle);
Dan Williams1cf03c02016-02-17 13:01:23 -08001939 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001940 struct nd_blk_region_desc *ndbr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001941 struct nfit_mem *nfit_mem;
1942 int blk_valid = 0;
1943
1944 if (!nvdimm) {
1945 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1946 spa->range_index, memdev->device_handle);
1947 return -ENODEV;
1948 }
1949
1950 nd_mapping->nvdimm = nvdimm;
1951 switch (nfit_spa_type(spa)) {
1952 case NFIT_SPA_PM:
1953 case NFIT_SPA_VOLATILE:
1954 nd_mapping->start = memdev->address;
1955 nd_mapping->size = memdev->region_size;
1956 break;
1957 case NFIT_SPA_DCR:
1958 nfit_mem = nvdimm_provider_data(nvdimm);
1959 if (!nfit_mem || !nfit_mem->bdw) {
1960 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1961 spa->range_index, nvdimm_name(nvdimm));
1962 } else {
1963 nd_mapping->size = nfit_mem->bdw->capacity;
1964 nd_mapping->start = nfit_mem->bdw->start_address;
Vishal Verma5212e112015-06-25 04:20:32 -04001965 ndr_desc->num_lanes = nfit_mem->bdw->windows;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001966 blk_valid = 1;
1967 }
1968
1969 ndr_desc->nd_mapping = nd_mapping;
1970 ndr_desc->num_mappings = blk_valid;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001971 ndbr_desc = to_blk_region_desc(ndr_desc);
1972 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1973 ndbr_desc->disable = acpi_nfit_blk_region_disable;
Dan Williams6bc75612015-06-17 17:23:32 -04001974 ndbr_desc->do_io = acpi_desc->blk_do_io;
Dan Williams1cf03c02016-02-17 13:01:23 -08001975 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1976 ndr_desc);
1977 if (!nfit_spa->nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001978 return -ENOMEM;
1979 break;
1980 }
1981
1982 return 0;
1983}
1984
1985static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1986 struct nfit_spa *nfit_spa)
1987{
1988 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1989 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001990 struct nd_blk_region_desc ndbr_desc;
1991 struct nd_region_desc *ndr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001992 struct nfit_memdev *nfit_memdev;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001993 struct nvdimm_bus *nvdimm_bus;
1994 struct resource res;
Dan Williamseaf96152015-05-01 13:11:27 -04001995 int count = 0, rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001996
Dan Williams1cf03c02016-02-17 13:01:23 -08001997 if (nfit_spa->nd_region)
Vishal Verma20985162015-10-27 16:58:27 -06001998 return 0;
1999
Dan Williams1f7df6f2015-06-09 20:13:14 -04002000 if (spa->range_index == 0) {
2001 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
2002 __func__);
2003 return 0;
2004 }
2005
2006 memset(&res, 0, sizeof(res));
2007 memset(&nd_mappings, 0, sizeof(nd_mappings));
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002008 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
Dan Williams1f7df6f2015-06-09 20:13:14 -04002009 res.start = spa->address;
2010 res.end = res.start + spa->length - 1;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002011 ndr_desc = &ndbr_desc.ndr_desc;
2012 ndr_desc->res = &res;
2013 ndr_desc->provider_data = nfit_spa;
2014 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06002015 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2016 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2017 spa->proximity_domain);
2018 else
2019 ndr_desc->numa_node = NUMA_NO_NODE;
2020
Dan Williams1f7df6f2015-06-09 20:13:14 -04002021 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2022 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2023 struct nd_mapping *nd_mapping;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002024
2025 if (memdev->range_index != spa->range_index)
2026 continue;
2027 if (count >= ND_MAX_MAPPINGS) {
2028 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2029 spa->range_index, ND_MAX_MAPPINGS);
2030 return -ENXIO;
2031 }
2032 nd_mapping = &nd_mappings[count++];
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002033 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08002034 memdev, nfit_spa);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002035 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08002036 goto out;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002037 }
2038
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002039 ndr_desc->nd_mapping = nd_mappings;
2040 ndr_desc->num_mappings = count;
2041 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
Dan Williamseaf96152015-05-01 13:11:27 -04002042 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08002043 goto out;
Dan Williamseaf96152015-05-01 13:11:27 -04002044
Dan Williams1f7df6f2015-06-09 20:13:14 -04002045 nvdimm_bus = acpi_desc->nvdimm_bus;
2046 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002047 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
Dan Williams48901162016-03-09 17:15:43 -08002048 if (rc) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07002049 dev_warn(acpi_desc->dev,
2050 "failed to insert pmem resource to iomem: %d\n",
2051 rc);
Dan Williams48901162016-03-09 17:15:43 -08002052 goto out;
Vishal Verma0caeef62015-12-24 19:21:43 -07002053 }
Dan Williams48901162016-03-09 17:15:43 -08002054
Dan Williams1cf03c02016-02-17 13:01:23 -08002055 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2056 ndr_desc);
2057 if (!nfit_spa->nd_region)
2058 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002059 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
Dan Williams1cf03c02016-02-17 13:01:23 -08002060 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2061 ndr_desc);
2062 if (!nfit_spa->nd_region)
2063 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002064 }
Vishal Verma20985162015-10-27 16:58:27 -06002065
Dan Williams1cf03c02016-02-17 13:01:23 -08002066 out:
2067 if (rc)
2068 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2069 nfit_spa->spa->range_index);
2070 return rc;
2071}
2072
2073static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2074 u32 max_ars)
2075{
2076 struct device *dev = acpi_desc->dev;
2077 struct nd_cmd_ars_status *ars_status;
2078
2079 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2080 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2081 return 0;
2082 }
2083
2084 if (acpi_desc->ars_status)
2085 devm_kfree(dev, acpi_desc->ars_status);
2086 acpi_desc->ars_status = NULL;
2087 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2088 if (!ars_status)
2089 return -ENOMEM;
2090 acpi_desc->ars_status = ars_status;
2091 acpi_desc->ars_status_size = max_ars;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002092 return 0;
2093}
2094
Dan Williams1cf03c02016-02-17 13:01:23 -08002095static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2096 struct nfit_spa *nfit_spa)
2097{
2098 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2099 int rc;
2100
2101 if (!nfit_spa->max_ars) {
2102 struct nd_cmd_ars_cap ars_cap;
2103
2104 memset(&ars_cap, 0, sizeof(ars_cap));
2105 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2106 if (rc < 0)
2107 return rc;
2108 nfit_spa->max_ars = ars_cap.max_ars_out;
2109 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2110 /* check that the supported scrub types match the spa type */
2111 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2112 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2113 return -ENOTTY;
2114 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2115 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2116 return -ENOTTY;
2117 }
2118
2119 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2120 return -ENOMEM;
2121
2122 rc = ars_get_status(acpi_desc);
2123 if (rc < 0 && rc != -ENOSPC)
2124 return rc;
2125
2126 if (ars_status_process_records(acpi_desc->nvdimm_bus,
2127 acpi_desc->ars_status))
2128 return -ENOMEM;
2129
2130 return 0;
2131}
2132
2133static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2134 struct nfit_spa *nfit_spa)
2135{
2136 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2137 unsigned int overflow_retry = scrub_overflow_abort;
2138 u64 init_ars_start = 0, init_ars_len = 0;
2139 struct device *dev = acpi_desc->dev;
2140 unsigned int tmo = scrub_timeout;
2141 int rc;
2142
2143 if (nfit_spa->ars_done || !nfit_spa->nd_region)
2144 return;
2145
2146 rc = ars_start(acpi_desc, nfit_spa);
2147 /*
2148 * If we timed out the initial scan we'll still be busy here,
2149 * and will wait another timeout before giving up permanently.
2150 */
2151 if (rc < 0 && rc != -EBUSY)
2152 return;
2153
2154 do {
2155 u64 ars_start, ars_len;
2156
2157 if (acpi_desc->cancel)
2158 break;
2159 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2160 if (rc == -ENOTTY)
2161 break;
2162 if (rc == -EBUSY && !tmo) {
2163 dev_warn(dev, "range %d ars timeout, aborting\n",
2164 spa->range_index);
2165 break;
2166 }
2167
2168 if (rc == -EBUSY) {
2169 /*
2170 * Note, entries may be appended to the list
2171 * while the lock is dropped, but the workqueue
2172 * being active prevents entries being deleted /
2173 * freed.
2174 */
2175 mutex_unlock(&acpi_desc->init_mutex);
2176 ssleep(1);
2177 tmo--;
2178 mutex_lock(&acpi_desc->init_mutex);
2179 continue;
2180 }
2181
2182 /* we got some results, but there are more pending... */
2183 if (rc == -ENOSPC && overflow_retry--) {
2184 if (!init_ars_len) {
2185 init_ars_len = acpi_desc->ars_status->length;
2186 init_ars_start = acpi_desc->ars_status->address;
2187 }
2188 rc = ars_continue(acpi_desc);
2189 }
2190
2191 if (rc < 0) {
2192 dev_warn(dev, "range %d ars continuation failed\n",
2193 spa->range_index);
2194 break;
2195 }
2196
2197 if (init_ars_len) {
2198 ars_start = init_ars_start;
2199 ars_len = init_ars_len;
2200 } else {
2201 ars_start = acpi_desc->ars_status->address;
2202 ars_len = acpi_desc->ars_status->length;
2203 }
2204 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2205 spa->range_index, ars_start, ars_len);
2206 /* notify the region about new poison entries */
2207 nvdimm_region_notify(nfit_spa->nd_region,
2208 NVDIMM_REVALIDATE_POISON);
2209 break;
2210 } while (1);
2211}
2212
2213static void acpi_nfit_scrub(struct work_struct *work)
2214{
2215 struct device *dev;
2216 u64 init_scrub_length = 0;
2217 struct nfit_spa *nfit_spa;
2218 u64 init_scrub_address = 0;
2219 bool init_ars_done = false;
2220 struct acpi_nfit_desc *acpi_desc;
2221 unsigned int tmo = scrub_timeout;
2222 unsigned int overflow_retry = scrub_overflow_abort;
2223
2224 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2225 dev = acpi_desc->dev;
2226
2227 /*
2228 * We scrub in 2 phases. The first phase waits for any platform
2229 * firmware initiated scrubs to complete and then we go search for the
2230 * affected spa regions to mark them scanned. In the second phase we
2231 * initiate a directed scrub for every range that was not scrubbed in
2232 * phase 1.
2233 */
2234
2235 /* process platform firmware initiated scrubs */
2236 retry:
2237 mutex_lock(&acpi_desc->init_mutex);
2238 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2239 struct nd_cmd_ars_status *ars_status;
2240 struct acpi_nfit_system_address *spa;
2241 u64 ars_start, ars_len;
2242 int rc;
2243
2244 if (acpi_desc->cancel)
2245 break;
2246
2247 if (nfit_spa->nd_region)
2248 continue;
2249
2250 if (init_ars_done) {
2251 /*
2252 * No need to re-query, we're now just
2253 * reconciling all the ranges covered by the
2254 * initial scrub
2255 */
2256 rc = 0;
2257 } else
2258 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2259
2260 if (rc == -ENOTTY) {
2261 /* no ars capability, just register spa and move on */
2262 acpi_nfit_register_region(acpi_desc, nfit_spa);
2263 continue;
2264 }
2265
2266 if (rc == -EBUSY && !tmo) {
2267 /* fallthrough to directed scrub in phase 2 */
2268 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2269 break;
2270 } else if (rc == -EBUSY) {
2271 mutex_unlock(&acpi_desc->init_mutex);
2272 ssleep(1);
2273 tmo--;
2274 goto retry;
2275 }
2276
2277 /* we got some results, but there are more pending... */
2278 if (rc == -ENOSPC && overflow_retry--) {
2279 ars_status = acpi_desc->ars_status;
2280 /*
2281 * Record the original scrub range, so that we
2282 * can recall all the ranges impacted by the
2283 * initial scrub.
2284 */
2285 if (!init_scrub_length) {
2286 init_scrub_length = ars_status->length;
2287 init_scrub_address = ars_status->address;
2288 }
2289 rc = ars_continue(acpi_desc);
2290 if (rc == 0) {
2291 mutex_unlock(&acpi_desc->init_mutex);
2292 goto retry;
2293 }
2294 }
2295
2296 if (rc < 0) {
2297 /*
2298 * Initial scrub failed, we'll give it one more
2299 * try below...
2300 */
2301 break;
2302 }
2303
2304 /* We got some final results, record completed ranges */
2305 ars_status = acpi_desc->ars_status;
2306 if (init_scrub_length) {
2307 ars_start = init_scrub_address;
2308 ars_len = ars_start + init_scrub_length;
2309 } else {
2310 ars_start = ars_status->address;
2311 ars_len = ars_status->length;
2312 }
2313 spa = nfit_spa->spa;
2314
2315 if (!init_ars_done) {
2316 init_ars_done = true;
2317 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2318 ars_start, ars_len);
2319 }
2320 if (ars_start <= spa->address && ars_start + ars_len
2321 >= spa->address + spa->length)
2322 acpi_nfit_register_region(acpi_desc, nfit_spa);
2323 }
2324
2325 /*
2326 * For all the ranges not covered by an initial scrub we still
2327 * want to see if there are errors, but it's ok to discover them
2328 * asynchronously.
2329 */
2330 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2331 /*
2332 * Flag all the ranges that still need scrubbing, but
2333 * register them now to make data available.
2334 */
2335 if (nfit_spa->nd_region)
2336 nfit_spa->ars_done = 1;
2337 else
2338 acpi_nfit_register_region(acpi_desc, nfit_spa);
2339 }
2340
2341 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2342 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2343 mutex_unlock(&acpi_desc->init_mutex);
2344}
2345
Dan Williams1f7df6f2015-06-09 20:13:14 -04002346static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2347{
2348 struct nfit_spa *nfit_spa;
Dan Williams1cf03c02016-02-17 13:01:23 -08002349 int rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002350
Dan Williams1cf03c02016-02-17 13:01:23 -08002351 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2352 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2353 /* BLK regions don't need to wait for ars results */
2354 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2355 if (rc)
2356 return rc;
2357 }
Dan Williams1f7df6f2015-06-09 20:13:14 -04002358
Dan Williams1cf03c02016-02-17 13:01:23 -08002359 queue_work(nfit_wq, &acpi_desc->work);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002360 return 0;
2361}
2362
Vishal Verma20985162015-10-27 16:58:27 -06002363static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2364 struct nfit_table_prev *prev)
2365{
2366 struct device *dev = acpi_desc->dev;
2367
2368 if (!list_empty(&prev->spas) ||
2369 !list_empty(&prev->memdevs) ||
2370 !list_empty(&prev->dcrs) ||
2371 !list_empty(&prev->bdws) ||
2372 !list_empty(&prev->idts) ||
2373 !list_empty(&prev->flushes)) {
2374 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2375 return -ENXIO;
2376 }
2377 return 0;
2378}
2379
Dan Williams6bc75612015-06-17 17:23:32 -04002380int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
Dan Williamsb94d5232015-05-19 22:54:31 -04002381{
2382 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -06002383 struct nfit_table_prev prev;
Dan Williamsb94d5232015-05-19 22:54:31 -04002384 const void *end;
2385 u8 *data;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002386 int rc;
Dan Williamsb94d5232015-05-19 22:54:31 -04002387
Vishal Verma20985162015-10-27 16:58:27 -06002388 mutex_lock(&acpi_desc->init_mutex);
2389
2390 INIT_LIST_HEAD(&prev.spas);
2391 INIT_LIST_HEAD(&prev.memdevs);
2392 INIT_LIST_HEAD(&prev.dcrs);
2393 INIT_LIST_HEAD(&prev.bdws);
2394 INIT_LIST_HEAD(&prev.idts);
2395 INIT_LIST_HEAD(&prev.flushes);
2396
2397 list_cut_position(&prev.spas, &acpi_desc->spas,
2398 acpi_desc->spas.prev);
2399 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2400 acpi_desc->memdevs.prev);
2401 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2402 acpi_desc->dcrs.prev);
2403 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2404 acpi_desc->bdws.prev);
2405 list_cut_position(&prev.idts, &acpi_desc->idts,
2406 acpi_desc->idts.prev);
2407 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2408 acpi_desc->flushes.prev);
2409
2410 data = (u8 *) acpi_desc->nfit;
2411 end = data + sz;
Vishal Verma20985162015-10-27 16:58:27 -06002412 while (!IS_ERR_OR_NULL(data))
2413 data = add_table(acpi_desc, &prev, data, end);
2414
2415 if (IS_ERR(data)) {
2416 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2417 PTR_ERR(data));
2418 rc = PTR_ERR(data);
2419 goto out_unlock;
2420 }
2421
2422 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2423 if (rc)
2424 goto out_unlock;
2425
2426 if (nfit_mem_init(acpi_desc) != 0) {
2427 rc = -ENOMEM;
2428 goto out_unlock;
2429 }
2430
2431 acpi_nfit_init_dsms(acpi_desc);
2432
2433 rc = acpi_nfit_register_dimms(acpi_desc);
2434 if (rc)
2435 goto out_unlock;
2436
2437 rc = acpi_nfit_register_regions(acpi_desc);
2438
2439 out_unlock:
2440 mutex_unlock(&acpi_desc->init_mutex);
2441 return rc;
2442}
2443EXPORT_SYMBOL_GPL(acpi_nfit_init);
2444
Dan Williams7ae0fa432016-02-19 12:16:34 -08002445struct acpi_nfit_flush_work {
2446 struct work_struct work;
2447 struct completion cmp;
2448};
2449
2450static void flush_probe(struct work_struct *work)
2451{
2452 struct acpi_nfit_flush_work *flush;
2453
2454 flush = container_of(work, typeof(*flush), work);
2455 complete(&flush->cmp);
2456}
2457
2458static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2459{
2460 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2461 struct device *dev = acpi_desc->dev;
2462 struct acpi_nfit_flush_work flush;
2463
2464 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2465 device_lock(dev);
2466 device_unlock(dev);
2467
2468 /*
2469 * Scrub work could take 10s of seconds, userspace may give up so we
2470 * need to be interruptible while waiting.
2471 */
2472 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2473 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2474 queue_work(nfit_wq, &flush.work);
2475 return wait_for_completion_interruptible(&flush.cmp);
2476}
2477
Dan Williams87bf5722016-02-22 21:50:31 -08002478static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2479 struct nvdimm *nvdimm, unsigned int cmd)
2480{
2481 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2482
2483 if (nvdimm)
2484 return 0;
2485 if (cmd != ND_CMD_ARS_START)
2486 return 0;
2487
2488 /*
2489 * The kernel and userspace may race to initiate a scrub, but
2490 * the scrub thread is prepared to lose that initial race. It
2491 * just needs guarantees that any ars it initiates are not
2492 * interrupted by any intervening start reqeusts from userspace.
2493 */
2494 if (work_busy(&acpi_desc->work))
2495 return -EBUSY;
2496
2497 return 0;
2498}
2499
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002500void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
Vishal Verma20985162015-10-27 16:58:27 -06002501{
2502 struct nvdimm_bus_descriptor *nd_desc;
Vishal Verma20985162015-10-27 16:58:27 -06002503
2504 dev_set_drvdata(dev, acpi_desc);
2505 acpi_desc->dev = dev;
2506 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2507 nd_desc = &acpi_desc->nd_desc;
2508 nd_desc->provider_name = "ACPI.NFIT";
2509 nd_desc->ndctl = acpi_nfit_ctl;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002510 nd_desc->flush_probe = acpi_nfit_flush_probe;
Dan Williams87bf5722016-02-22 21:50:31 -08002511 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
Vishal Verma20985162015-10-27 16:58:27 -06002512 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2513
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002514 INIT_LIST_HEAD(&acpi_desc->spa_maps);
Dan Williamsb94d5232015-05-19 22:54:31 -04002515 INIT_LIST_HEAD(&acpi_desc->spas);
2516 INIT_LIST_HEAD(&acpi_desc->dcrs);
2517 INIT_LIST_HEAD(&acpi_desc->bdws);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002518 INIT_LIST_HEAD(&acpi_desc->idts);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06002519 INIT_LIST_HEAD(&acpi_desc->flushes);
Dan Williamsb94d5232015-05-19 22:54:31 -04002520 INIT_LIST_HEAD(&acpi_desc->memdevs);
2521 INIT_LIST_HEAD(&acpi_desc->dimms);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002522 mutex_init(&acpi_desc->spa_map_mutex);
Vishal Verma20985162015-10-27 16:58:27 -06002523 mutex_init(&acpi_desc->init_mutex);
Dan Williams1cf03c02016-02-17 13:01:23 -08002524 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
Dan Williamsb94d5232015-05-19 22:54:31 -04002525}
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002526EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
Dan Williamsb94d5232015-05-19 22:54:31 -04002527
2528static int acpi_nfit_add(struct acpi_device *adev)
2529{
Vishal Verma20985162015-10-27 16:58:27 -06002530 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Dan Williamsb94d5232015-05-19 22:54:31 -04002531 struct acpi_nfit_desc *acpi_desc;
2532 struct device *dev = &adev->dev;
2533 struct acpi_table_header *tbl;
2534 acpi_status status = AE_OK;
2535 acpi_size sz;
2536 int rc;
2537
Lee, Chun-Yi82595422016-01-21 20:32:10 +08002538 status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
Dan Williamsb94d5232015-05-19 22:54:31 -04002539 if (ACPI_FAILURE(status)) {
Vishal Verma20985162015-10-27 16:58:27 -06002540 /* This is ok, we could have an nvdimm hotplugged later */
2541 dev_dbg(dev, "failed to find NFIT at startup\n");
2542 return 0;
Dan Williamsb94d5232015-05-19 22:54:31 -04002543 }
2544
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002545 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2546 if (!acpi_desc)
2547 return -ENOMEM;
2548 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2549 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2550 if (!acpi_desc->nvdimm_bus)
2551 return -ENOMEM;
Dan Williamsb94d5232015-05-19 22:54:31 -04002552
Linda Knippers6b577c92015-11-20 19:05:49 -05002553 /*
2554 * Save the acpi header for later and then skip it,
2555 * making nfit point to the first nfit table header.
2556 */
2557 acpi_desc->acpi_header = *tbl;
2558 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2559 sz -= sizeof(struct acpi_table_nfit);
Dan Williamsb94d5232015-05-19 22:54:31 -04002560
Vishal Verma20985162015-10-27 16:58:27 -06002561 /* Evaluate _FIT and override with that if present */
2562 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2563 if (ACPI_SUCCESS(status) && buf.length > 0) {
Linda Knippers6b577c92015-11-20 19:05:49 -05002564 union acpi_object *obj;
2565 /*
2566 * Adjust for the acpi_object header of the _FIT
2567 */
2568 obj = buf.pointer;
2569 if (obj->type == ACPI_TYPE_BUFFER) {
2570 acpi_desc->nfit =
2571 (struct acpi_nfit_header *)obj->buffer.pointer;
2572 sz = obj->buffer.length;
2573 } else
2574 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2575 __func__, (int) obj->type);
Vishal Verma20985162015-10-27 16:58:27 -06002576 }
Dan Williamsb94d5232015-05-19 22:54:31 -04002577
2578 rc = acpi_nfit_init(acpi_desc, sz);
2579 if (rc) {
2580 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2581 return rc;
2582 }
2583 return 0;
2584}
2585
2586static int acpi_nfit_remove(struct acpi_device *adev)
2587{
2588 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2589
Dan Williams7ae0fa432016-02-19 12:16:34 -08002590 acpi_desc->cancel = 1;
2591 flush_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002592 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2593 return 0;
2594}
2595
Vishal Verma20985162015-10-27 16:58:27 -06002596static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2597{
2598 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2599 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Linda Knippers6b577c92015-11-20 19:05:49 -05002600 struct acpi_nfit_header *nfit_saved;
2601 union acpi_object *obj;
Vishal Verma20985162015-10-27 16:58:27 -06002602 struct device *dev = &adev->dev;
2603 acpi_status status;
2604 int ret;
2605
2606 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2607
2608 device_lock(dev);
2609 if (!dev->driver) {
2610 /* dev->driver may be null if we're being removed */
2611 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
Alexey Khoroshilovd91e8922015-12-11 23:24:10 +03002612 goto out_unlock;
Vishal Verma20985162015-10-27 16:58:27 -06002613 }
2614
2615 if (!acpi_desc) {
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002616 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2617 if (!acpi_desc)
Vishal Verma20985162015-10-27 16:58:27 -06002618 goto out_unlock;
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002619 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2620 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2621 if (!acpi_desc->nvdimm_bus)
2622 goto out_unlock;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002623 } else {
2624 /*
2625 * Finish previous registration before considering new
2626 * regions.
2627 */
2628 flush_workqueue(nfit_wq);
Vishal Verma20985162015-10-27 16:58:27 -06002629 }
2630
2631 /* Evaluate _FIT */
2632 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2633 if (ACPI_FAILURE(status)) {
2634 dev_err(dev, "failed to evaluate _FIT\n");
2635 goto out_unlock;
2636 }
2637
2638 nfit_saved = acpi_desc->nfit;
Linda Knippers6b577c92015-11-20 19:05:49 -05002639 obj = buf.pointer;
2640 if (obj->type == ACPI_TYPE_BUFFER) {
2641 acpi_desc->nfit =
2642 (struct acpi_nfit_header *)obj->buffer.pointer;
2643 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
2644 if (ret) {
2645 /* Merge failed, restore old nfit, and exit */
2646 acpi_desc->nfit = nfit_saved;
2647 dev_err(dev, "failed to merge updated NFIT\n");
2648 }
2649 } else {
2650 /* Bad _FIT, restore old nfit */
2651 dev_err(dev, "Invalid _FIT\n");
Vishal Verma20985162015-10-27 16:58:27 -06002652 }
2653 kfree(buf.pointer);
2654
2655 out_unlock:
2656 device_unlock(dev);
2657}
2658
Dan Williamsb94d5232015-05-19 22:54:31 -04002659static const struct acpi_device_id acpi_nfit_ids[] = {
2660 { "ACPI0012", 0 },
2661 { "", 0 },
2662};
2663MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2664
2665static struct acpi_driver acpi_nfit_driver = {
2666 .name = KBUILD_MODNAME,
2667 .ids = acpi_nfit_ids,
2668 .ops = {
2669 .add = acpi_nfit_add,
2670 .remove = acpi_nfit_remove,
Vishal Verma20985162015-10-27 16:58:27 -06002671 .notify = acpi_nfit_notify,
Dan Williamsb94d5232015-05-19 22:54:31 -04002672 },
2673};
2674
2675static __init int nfit_init(void)
2676{
2677 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2678 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2679 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2680 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2681 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2682 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2683 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2684
2685 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2686 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2687 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2688 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2689 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2690 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2691 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2692 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2693 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2694 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
Dan Williams31eca762016-04-28 16:23:43 -07002695 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2696 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
stuart hayese02fb722016-05-26 11:38:41 -05002697 acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
Dan Williamsb94d5232015-05-19 22:54:31 -04002698
Dan Williams7ae0fa432016-02-19 12:16:34 -08002699 nfit_wq = create_singlethread_workqueue("nfit");
2700 if (!nfit_wq)
2701 return -ENOMEM;
2702
Dan Williamsb94d5232015-05-19 22:54:31 -04002703 return acpi_bus_register_driver(&acpi_nfit_driver);
2704}
2705
2706static __exit void nfit_exit(void)
2707{
2708 acpi_bus_unregister_driver(&acpi_nfit_driver);
Dan Williams7ae0fa432016-02-19 12:16:34 -08002709 destroy_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002710}
2711
2712module_init(nfit_init);
2713module_exit(nfit_exit);
2714MODULE_LICENSE("GPL v2");
2715MODULE_AUTHOR("Intel Corporation");