blob: 2564f330a93e44c1eff3a7435572df78c639a1a7 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040016#include <linux/mutex.h>
Dan Williams62232e452015-06-08 14:27:06 -040017#include <linux/ndctl.h>
Vishal Verma0caeef62015-12-24 19:21:43 -070018#include <linux/delay.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040019#include <linux/list.h>
20#include <linux/acpi.h>
Dan Williamseaf96152015-05-01 13:11:27 -040021#include <linux/sort.h>
Ross Zwislerc2ad2952015-07-10 11:06:13 -060022#include <linux/pmem.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040023#include <linux/io.h>
Dan Williams1cf03c02016-02-17 13:01:23 -080024#include <linux/nd.h>
Dan Williams96601ad2015-08-24 18:29:38 -040025#include <asm/cacheflush.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040026#include "nfit.h"
27
Ross Zwisler047fc8a2015-06-25 04:21:02 -040028/*
29 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
30 * irrelevant.
31 */
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020032#include <linux/io-64-nonatomic-hi-lo.h>
Ross Zwisler047fc8a2015-06-25 04:21:02 -040033
Dan Williams4d88a972015-05-31 14:41:48 -040034static bool force_enable_dimms;
35module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
36MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
37
Dan Williams1cf03c02016-02-17 13:01:23 -080038static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
39module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
40MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
41
42/* after three payloads of overflow, it's dead jim */
43static unsigned int scrub_overflow_abort = 3;
44module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(scrub_overflow_abort,
46 "Number of times we overflow ARS results before abort");
47
Dan Williams87554092016-04-28 18:01:20 -070048static bool disable_vendor_specific;
49module_param(disable_vendor_specific, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vendor_specific,
51 "Limit commands to the publicly specified set\n");
52
Dan Williams7ae0fa432016-02-19 12:16:34 -080053static struct workqueue_struct *nfit_wq;
54
Vishal Verma20985162015-10-27 16:58:27 -060055struct nfit_table_prev {
56 struct list_head spas;
57 struct list_head memdevs;
58 struct list_head dcrs;
59 struct list_head bdws;
60 struct list_head idts;
61 struct list_head flushes;
62};
63
Dan Williamsb94d5232015-05-19 22:54:31 -040064static u8 nfit_uuid[NFIT_UUID_MAX][16];
65
Dan Williams6bc75612015-06-17 17:23:32 -040066const u8 *to_nfit_uuid(enum nfit_uuids id)
Dan Williamsb94d5232015-05-19 22:54:31 -040067{
68 return nfit_uuid[id];
69}
Dan Williams6bc75612015-06-17 17:23:32 -040070EXPORT_SYMBOL(to_nfit_uuid);
Dan Williamsb94d5232015-05-19 22:54:31 -040071
Dan Williams62232e452015-06-08 14:27:06 -040072static struct acpi_nfit_desc *to_acpi_nfit_desc(
73 struct nvdimm_bus_descriptor *nd_desc)
74{
75 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
76}
77
78static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
79{
80 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
81
82 /*
83 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
84 * acpi_device.
85 */
86 if (!nd_desc->provider_name
87 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
88 return NULL;
89
90 return to_acpi_device(acpi_desc->dev);
91}
92
Dan Williamsaef25332016-02-12 17:01:11 -080093static int xlat_status(void *buf, unsigned int cmd)
94{
Dan Williamsd4f32362016-03-03 16:08:54 -080095 struct nd_cmd_clear_error *clear_err;
Dan Williamsaef25332016-02-12 17:01:11 -080096 struct nd_cmd_ars_status *ars_status;
97 struct nd_cmd_ars_start *ars_start;
98 struct nd_cmd_ars_cap *ars_cap;
99 u16 flags;
100
101 switch (cmd) {
102 case ND_CMD_ARS_CAP:
103 ars_cap = buf;
104 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
105 return -ENOTTY;
106
107 /* Command failed */
108 if (ars_cap->status & 0xffff)
109 return -EIO;
110
111 /* No supported scan types for this range */
112 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
113 if ((ars_cap->status >> 16 & flags) == 0)
114 return -ENOTTY;
115 break;
116 case ND_CMD_ARS_START:
117 ars_start = buf;
118 /* ARS is in progress */
119 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
120 return -EBUSY;
121
122 /* Command failed */
123 if (ars_start->status & 0xffff)
124 return -EIO;
125 break;
126 case ND_CMD_ARS_STATUS:
127 ars_status = buf;
128 /* Command failed */
129 if (ars_status->status & 0xffff)
130 return -EIO;
131 /* Check extended status (Upper two bytes) */
132 if (ars_status->status == NFIT_ARS_STATUS_DONE)
133 return 0;
134
135 /* ARS is in progress */
136 if (ars_status->status == NFIT_ARS_STATUS_BUSY)
137 return -EBUSY;
138
139 /* No ARS performed for the current boot */
140 if (ars_status->status == NFIT_ARS_STATUS_NONE)
141 return -EAGAIN;
142
143 /*
144 * ARS interrupted, either we overflowed or some other
145 * agent wants the scan to stop. If we didn't overflow
146 * then just continue with the returned results.
147 */
148 if (ars_status->status == NFIT_ARS_STATUS_INTR) {
149 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
150 return -ENOSPC;
151 return 0;
152 }
153
154 /* Unknown status */
155 if (ars_status->status >> 16)
156 return -EIO;
157 break;
Dan Williamsd4f32362016-03-03 16:08:54 -0800158 case ND_CMD_CLEAR_ERROR:
159 clear_err = buf;
160 if (clear_err->status & 0xffff)
161 return -EIO;
162 if (!clear_err->cleared)
163 return -EIO;
164 if (clear_err->length > clear_err->cleared)
165 return clear_err->cleared;
166 break;
Dan Williamsaef25332016-02-12 17:01:11 -0800167 default:
168 break;
169 }
170
171 return 0;
172}
173
Dan Williamsb94d5232015-05-19 22:54:31 -0400174static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
175 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
Dan Williamsaef25332016-02-12 17:01:11 -0800176 unsigned int buf_len, int *cmd_rc)
Dan Williamsb94d5232015-05-19 22:54:31 -0400177{
Dan Williams62232e452015-06-08 14:27:06 -0400178 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
Dan Williams62232e452015-06-08 14:27:06 -0400179 union acpi_object in_obj, in_buf, *out_obj;
Dan Williams31eca762016-04-28 16:23:43 -0700180 const struct nd_cmd_desc *desc = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400181 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -0700182 struct nd_cmd_pkg *call_pkg = NULL;
Dan Williams62232e452015-06-08 14:27:06 -0400183 const char *cmd_name, *dimm_name;
Dan Williams31eca762016-04-28 16:23:43 -0700184 unsigned long cmd_mask, dsm_mask;
Dan Williams62232e452015-06-08 14:27:06 -0400185 acpi_handle handle;
Dan Williams31eca762016-04-28 16:23:43 -0700186 unsigned int func;
Dan Williams62232e452015-06-08 14:27:06 -0400187 const u8 *uuid;
188 u32 offset;
189 int rc, i;
190
Dan Williams31eca762016-04-28 16:23:43 -0700191 func = cmd;
192 if (cmd == ND_CMD_CALL) {
193 call_pkg = buf;
194 func = call_pkg->nd_command;
195 }
196
Dan Williams62232e452015-06-08 14:27:06 -0400197 if (nvdimm) {
198 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
199 struct acpi_device *adev = nfit_mem->adev;
200
201 if (!adev)
202 return -ENOTTY;
Dan Williams31eca762016-04-28 16:23:43 -0700203 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
204 return -ENOTTY;
205
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400206 dimm_name = nvdimm_name(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400207 cmd_name = nvdimm_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700208 cmd_mask = nvdimm_cmd_mask(nvdimm);
Dan Williams62232e452015-06-08 14:27:06 -0400209 dsm_mask = nfit_mem->dsm_mask;
210 desc = nd_cmd_dimm_desc(cmd);
Dan Williams31eca762016-04-28 16:23:43 -0700211 uuid = to_nfit_uuid(nfit_mem->family);
Dan Williams62232e452015-06-08 14:27:06 -0400212 handle = adev->handle;
213 } else {
214 struct acpi_device *adev = to_acpi_dev(acpi_desc);
215
216 cmd_name = nvdimm_bus_cmd_name(cmd);
Dan Williamse3654ec2016-04-28 16:17:07 -0700217 cmd_mask = nd_desc->cmd_mask;
Dan Williams31eca762016-04-28 16:23:43 -0700218 dsm_mask = cmd_mask;
Dan Williams62232e452015-06-08 14:27:06 -0400219 desc = nd_cmd_bus_desc(cmd);
220 uuid = to_nfit_uuid(NFIT_DEV_BUS);
221 handle = adev->handle;
222 dimm_name = "bus";
223 }
224
225 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
226 return -ENOTTY;
227
Dan Williams31eca762016-04-28 16:23:43 -0700228 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
Dan Williams62232e452015-06-08 14:27:06 -0400229 return -ENOTTY;
230
231 in_obj.type = ACPI_TYPE_PACKAGE;
232 in_obj.package.count = 1;
233 in_obj.package.elements = &in_buf;
234 in_buf.type = ACPI_TYPE_BUFFER;
235 in_buf.buffer.pointer = buf;
236 in_buf.buffer.length = 0;
237
238 /* libnvdimm has already validated the input envelope */
239 for (i = 0; i < desc->in_num; i++)
240 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
241 i, buf);
242
Dan Williams31eca762016-04-28 16:23:43 -0700243 if (call_pkg) {
244 /* skip over package wrapper */
245 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
246 in_buf.buffer.length = call_pkg->nd_size_in;
Dan Williams62232e452015-06-08 14:27:06 -0400247 }
248
Dan Williams31eca762016-04-28 16:23:43 -0700249 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
250 dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
251 __func__, dimm_name, cmd, func,
252 in_buf.buffer.length);
253 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
254 in_buf.buffer.pointer,
255 min_t(u32, 256, in_buf.buffer.length), true);
256 }
257
258 out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
Dan Williams62232e452015-06-08 14:27:06 -0400259 if (!out_obj) {
260 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
261 cmd_name);
262 return -EINVAL;
263 }
264
Dan Williams31eca762016-04-28 16:23:43 -0700265 if (call_pkg) {
266 call_pkg->nd_fw_size = out_obj->buffer.length;
267 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
268 out_obj->buffer.pointer,
269 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
270
271 ACPI_FREE(out_obj);
272 /*
273 * Need to support FW function w/o known size in advance.
274 * Caller can determine required size based upon nd_fw_size.
275 * If we return an error (like elsewhere) then caller wouldn't
276 * be able to rely upon data returned to make calculation.
277 */
278 return 0;
279 }
280
Dan Williams62232e452015-06-08 14:27:06 -0400281 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
282 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
283 __func__, dimm_name, cmd_name, out_obj->type);
284 rc = -EINVAL;
285 goto out;
286 }
287
288 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
289 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
290 dimm_name, cmd_name, out_obj->buffer.length);
291 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
292 4, out_obj->buffer.pointer, min_t(u32, 128,
293 out_obj->buffer.length), true);
294 }
295
296 for (i = 0, offset = 0; i < desc->out_num; i++) {
297 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
298 (u32 *) out_obj->buffer.pointer);
299
300 if (offset + out_size > out_obj->buffer.length) {
301 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
302 __func__, dimm_name, cmd_name, i);
303 break;
304 }
305
306 if (in_buf.buffer.length + offset + out_size > buf_len) {
307 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
308 __func__, dimm_name, cmd_name, i);
309 rc = -ENXIO;
310 goto out;
311 }
312 memcpy(buf + in_buf.buffer.length + offset,
313 out_obj->buffer.pointer + offset, out_size);
314 offset += out_size;
315 }
316 if (offset + in_buf.buffer.length < buf_len) {
317 if (i >= 1) {
318 /*
319 * status valid, return the number of bytes left
320 * unfilled in the output buffer
321 */
322 rc = buf_len - offset - in_buf.buffer.length;
Dan Williamsaef25332016-02-12 17:01:11 -0800323 if (cmd_rc)
324 *cmd_rc = xlat_status(buf, cmd);
Dan Williams62232e452015-06-08 14:27:06 -0400325 } else {
326 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
327 __func__, dimm_name, cmd_name, buf_len,
328 offset);
329 rc = -ENXIO;
330 }
Dan Williams2eea6582016-05-02 09:11:53 -0700331 } else {
Dan Williams62232e452015-06-08 14:27:06 -0400332 rc = 0;
Dan Williams2eea6582016-05-02 09:11:53 -0700333 if (cmd_rc)
334 *cmd_rc = xlat_status(buf, cmd);
335 }
Dan Williams62232e452015-06-08 14:27:06 -0400336
337 out:
338 ACPI_FREE(out_obj);
339
340 return rc;
Dan Williamsb94d5232015-05-19 22:54:31 -0400341}
342
343static const char *spa_type_name(u16 type)
344{
345 static const char *to_name[] = {
346 [NFIT_SPA_VOLATILE] = "volatile",
347 [NFIT_SPA_PM] = "pmem",
348 [NFIT_SPA_DCR] = "dimm-control-region",
349 [NFIT_SPA_BDW] = "block-data-window",
350 [NFIT_SPA_VDISK] = "volatile-disk",
351 [NFIT_SPA_VCD] = "volatile-cd",
352 [NFIT_SPA_PDISK] = "persistent-disk",
353 [NFIT_SPA_PCD] = "persistent-cd",
354
355 };
356
357 if (type > NFIT_SPA_PCD)
358 return "unknown";
359
360 return to_name[type];
361}
362
363static int nfit_spa_type(struct acpi_nfit_system_address *spa)
364{
365 int i;
366
367 for (i = 0; i < NFIT_UUID_MAX; i++)
368 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
369 return i;
370 return -1;
371}
372
373static bool add_spa(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600374 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400375 struct acpi_nfit_system_address *spa)
376{
Linda Knippers826c4162015-11-20 19:05:47 -0500377 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400378 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600379 struct nfit_spa *nfit_spa;
Dan Williamsb94d5232015-05-19 22:54:31 -0400380
Vishal Verma20985162015-10-27 16:58:27 -0600381 list_for_each_entry(nfit_spa, &prev->spas, list) {
Linda Knippers826c4162015-11-20 19:05:47 -0500382 if (memcmp(nfit_spa->spa, spa, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600383 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
384 return true;
385 }
386 }
387
388 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400389 if (!nfit_spa)
390 return false;
391 INIT_LIST_HEAD(&nfit_spa->list);
392 nfit_spa->spa = spa;
393 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
394 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
395 spa->range_index,
396 spa_type_name(nfit_spa_type(spa)));
397 return true;
398}
399
400static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600401 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400402 struct acpi_nfit_memory_map *memdev)
403{
Linda Knippers826c4162015-11-20 19:05:47 -0500404 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400405 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600406 struct nfit_memdev *nfit_memdev;
Dan Williamsb94d5232015-05-19 22:54:31 -0400407
Vishal Verma20985162015-10-27 16:58:27 -0600408 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500409 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600410 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
411 return true;
412 }
413
414 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400415 if (!nfit_memdev)
416 return false;
417 INIT_LIST_HEAD(&nfit_memdev->list);
418 nfit_memdev->memdev = memdev;
419 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
420 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
421 __func__, memdev->device_handle, memdev->range_index,
422 memdev->region_index);
423 return true;
424}
425
426static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600427 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400428 struct acpi_nfit_control_region *dcr)
429{
Linda Knippers826c4162015-11-20 19:05:47 -0500430 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400431 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600432 struct nfit_dcr *nfit_dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400433
Vishal Verma20985162015-10-27 16:58:27 -0600434 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500435 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600436 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
437 return true;
438 }
439
440 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400441 if (!nfit_dcr)
442 return false;
443 INIT_LIST_HEAD(&nfit_dcr->list);
444 nfit_dcr->dcr = dcr;
445 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
446 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
447 dcr->region_index, dcr->windows);
448 return true;
449}
450
451static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600452 struct nfit_table_prev *prev,
Dan Williamsb94d5232015-05-19 22:54:31 -0400453 struct acpi_nfit_data_region *bdw)
454{
Linda Knippers826c4162015-11-20 19:05:47 -0500455 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
Dan Williamsb94d5232015-05-19 22:54:31 -0400456 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600457 struct nfit_bdw *nfit_bdw;
Dan Williamsb94d5232015-05-19 22:54:31 -0400458
Vishal Verma20985162015-10-27 16:58:27 -0600459 list_for_each_entry(nfit_bdw, &prev->bdws, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500460 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600461 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
462 return true;
463 }
464
465 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
Dan Williamsb94d5232015-05-19 22:54:31 -0400466 if (!nfit_bdw)
467 return false;
468 INIT_LIST_HEAD(&nfit_bdw->list);
469 nfit_bdw->bdw = bdw;
470 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
471 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
472 bdw->region_index, bdw->windows);
473 return true;
474}
475
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400476static bool add_idt(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600477 struct nfit_table_prev *prev,
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400478 struct acpi_nfit_interleave *idt)
479{
Linda Knippers826c4162015-11-20 19:05:47 -0500480 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400481 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600482 struct nfit_idt *nfit_idt;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400483
Vishal Verma20985162015-10-27 16:58:27 -0600484 list_for_each_entry(nfit_idt, &prev->idts, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500485 if (memcmp(nfit_idt->idt, idt, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600486 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
487 return true;
488 }
489
490 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400491 if (!nfit_idt)
492 return false;
493 INIT_LIST_HEAD(&nfit_idt->list);
494 nfit_idt->idt = idt;
495 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
496 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
497 idt->interleave_index, idt->line_count);
498 return true;
499}
500
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600501static bool add_flush(struct acpi_nfit_desc *acpi_desc,
Vishal Verma20985162015-10-27 16:58:27 -0600502 struct nfit_table_prev *prev,
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600503 struct acpi_nfit_flush_address *flush)
504{
Linda Knippers826c4162015-11-20 19:05:47 -0500505 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600506 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -0600507 struct nfit_flush *nfit_flush;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600508
Vishal Verma20985162015-10-27 16:58:27 -0600509 list_for_each_entry(nfit_flush, &prev->flushes, list)
Linda Knippers826c4162015-11-20 19:05:47 -0500510 if (memcmp(nfit_flush->flush, flush, length) == 0) {
Vishal Verma20985162015-10-27 16:58:27 -0600511 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
512 return true;
513 }
514
515 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600516 if (!nfit_flush)
517 return false;
518 INIT_LIST_HEAD(&nfit_flush->list);
519 nfit_flush->flush = flush;
520 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
521 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
522 flush->device_handle, flush->hint_count);
523 return true;
524}
525
Vishal Verma20985162015-10-27 16:58:27 -0600526static void *add_table(struct acpi_nfit_desc *acpi_desc,
527 struct nfit_table_prev *prev, void *table, const void *end)
Dan Williamsb94d5232015-05-19 22:54:31 -0400528{
529 struct device *dev = acpi_desc->dev;
530 struct acpi_nfit_header *hdr;
531 void *err = ERR_PTR(-ENOMEM);
532
533 if (table >= end)
534 return NULL;
535
536 hdr = table;
Vishal Verma564d5012015-10-27 16:58:26 -0600537 if (!hdr->length) {
538 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
539 hdr->type);
540 return NULL;
541 }
542
Dan Williamsb94d5232015-05-19 22:54:31 -0400543 switch (hdr->type) {
544 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600545 if (!add_spa(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400546 return err;
547 break;
548 case ACPI_NFIT_TYPE_MEMORY_MAP:
Vishal Verma20985162015-10-27 16:58:27 -0600549 if (!add_memdev(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400550 return err;
551 break;
552 case ACPI_NFIT_TYPE_CONTROL_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600553 if (!add_dcr(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400554 return err;
555 break;
556 case ACPI_NFIT_TYPE_DATA_REGION:
Vishal Verma20985162015-10-27 16:58:27 -0600557 if (!add_bdw(acpi_desc, prev, table))
Dan Williamsb94d5232015-05-19 22:54:31 -0400558 return err;
559 break;
Dan Williamsb94d5232015-05-19 22:54:31 -0400560 case ACPI_NFIT_TYPE_INTERLEAVE:
Vishal Verma20985162015-10-27 16:58:27 -0600561 if (!add_idt(acpi_desc, prev, table))
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400562 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400563 break;
564 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
Vishal Verma20985162015-10-27 16:58:27 -0600565 if (!add_flush(acpi_desc, prev, table))
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600566 return err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400567 break;
568 case ACPI_NFIT_TYPE_SMBIOS:
569 dev_dbg(dev, "%s: smbios\n", __func__);
570 break;
571 default:
572 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
573 break;
574 }
575
576 return table + hdr->length;
577}
578
579static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
580 struct nfit_mem *nfit_mem)
581{
582 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
583 u16 dcr = nfit_mem->dcr->region_index;
584 struct nfit_spa *nfit_spa;
585
586 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
587 u16 range_index = nfit_spa->spa->range_index;
588 int type = nfit_spa_type(nfit_spa->spa);
589 struct nfit_memdev *nfit_memdev;
590
591 if (type != NFIT_SPA_BDW)
592 continue;
593
594 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
595 if (nfit_memdev->memdev->range_index != range_index)
596 continue;
597 if (nfit_memdev->memdev->device_handle != device_handle)
598 continue;
599 if (nfit_memdev->memdev->region_index != dcr)
600 continue;
601
602 nfit_mem->spa_bdw = nfit_spa->spa;
603 return;
604 }
605 }
606
607 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
608 nfit_mem->spa_dcr->range_index);
609 nfit_mem->bdw = NULL;
610}
611
Dan Williams6697b2c2016-02-04 16:51:00 -0800612static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
Dan Williamsb94d5232015-05-19 22:54:31 -0400613 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
614{
615 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400616 struct nfit_memdev *nfit_memdev;
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600617 struct nfit_flush *nfit_flush;
Dan Williamsb94d5232015-05-19 22:54:31 -0400618 struct nfit_bdw *nfit_bdw;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400619 struct nfit_idt *nfit_idt;
620 u16 idt_idx, range_index;
Dan Williamsb94d5232015-05-19 22:54:31 -0400621
Dan Williamsb94d5232015-05-19 22:54:31 -0400622 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
623 if (nfit_bdw->bdw->region_index != dcr)
624 continue;
625 nfit_mem->bdw = nfit_bdw->bdw;
626 break;
627 }
628
629 if (!nfit_mem->bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800630 return;
Dan Williamsb94d5232015-05-19 22:54:31 -0400631
632 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400633
634 if (!nfit_mem->spa_bdw)
Dan Williams6697b2c2016-02-04 16:51:00 -0800635 return;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400636
637 range_index = nfit_mem->spa_bdw->range_index;
638 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
639 if (nfit_memdev->memdev->range_index != range_index ||
640 nfit_memdev->memdev->region_index != dcr)
641 continue;
642 nfit_mem->memdev_bdw = nfit_memdev->memdev;
643 idt_idx = nfit_memdev->memdev->interleave_index;
644 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
645 if (nfit_idt->idt->interleave_index != idt_idx)
646 continue;
647 nfit_mem->idt_bdw = nfit_idt->idt;
648 break;
649 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -0600650
651 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
652 if (nfit_flush->flush->device_handle !=
653 nfit_memdev->memdev->device_handle)
654 continue;
655 nfit_mem->nfit_flush = nfit_flush;
656 break;
657 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400658 break;
659 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400660}
661
662static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
663 struct acpi_nfit_system_address *spa)
664{
665 struct nfit_mem *nfit_mem, *found;
666 struct nfit_memdev *nfit_memdev;
667 int type = nfit_spa_type(spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400668
669 switch (type) {
670 case NFIT_SPA_DCR:
671 case NFIT_SPA_PM:
672 break;
673 default:
674 return 0;
675 }
676
677 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
Dan Williams6697b2c2016-02-04 16:51:00 -0800678 struct nfit_dcr *nfit_dcr;
679 u32 device_handle;
680 u16 dcr;
Dan Williamsb94d5232015-05-19 22:54:31 -0400681
682 if (nfit_memdev->memdev->range_index != spa->range_index)
683 continue;
684 found = NULL;
685 dcr = nfit_memdev->memdev->region_index;
Dan Williams6697b2c2016-02-04 16:51:00 -0800686 device_handle = nfit_memdev->memdev->device_handle;
Dan Williamsb94d5232015-05-19 22:54:31 -0400687 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
Dan Williams6697b2c2016-02-04 16:51:00 -0800688 if (__to_nfit_memdev(nfit_mem)->device_handle
689 == device_handle) {
Dan Williamsb94d5232015-05-19 22:54:31 -0400690 found = nfit_mem;
691 break;
692 }
693
694 if (found)
695 nfit_mem = found;
696 else {
697 nfit_mem = devm_kzalloc(acpi_desc->dev,
698 sizeof(*nfit_mem), GFP_KERNEL);
699 if (!nfit_mem)
700 return -ENOMEM;
701 INIT_LIST_HEAD(&nfit_mem->list);
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700702 nfit_mem->acpi_desc = acpi_desc;
Dan Williams6697b2c2016-02-04 16:51:00 -0800703 list_add(&nfit_mem->list, &acpi_desc->dimms);
704 }
705
706 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
707 if (nfit_dcr->dcr->region_index != dcr)
708 continue;
709 /*
710 * Record the control region for the dimm. For
711 * the ACPI 6.1 case, where there are separate
712 * control regions for the pmem vs blk
713 * interfaces, be sure to record the extended
714 * blk details.
715 */
716 if (!nfit_mem->dcr)
717 nfit_mem->dcr = nfit_dcr->dcr;
718 else if (nfit_mem->dcr->windows == 0
719 && nfit_dcr->dcr->windows)
720 nfit_mem->dcr = nfit_dcr->dcr;
721 break;
722 }
723
724 if (dcr && !nfit_mem->dcr) {
725 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
726 spa->range_index, dcr);
727 return -ENODEV;
Dan Williamsb94d5232015-05-19 22:54:31 -0400728 }
729
730 if (type == NFIT_SPA_DCR) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400731 struct nfit_idt *nfit_idt;
732 u16 idt_idx;
733
Dan Williamsb94d5232015-05-19 22:54:31 -0400734 /* multiple dimms may share a SPA when interleaved */
735 nfit_mem->spa_dcr = spa;
736 nfit_mem->memdev_dcr = nfit_memdev->memdev;
Ross Zwisler047fc8a2015-06-25 04:21:02 -0400737 idt_idx = nfit_memdev->memdev->interleave_index;
738 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
739 if (nfit_idt->idt->interleave_index != idt_idx)
740 continue;
741 nfit_mem->idt_dcr = nfit_idt->idt;
742 break;
743 }
Dan Williams6697b2c2016-02-04 16:51:00 -0800744 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
Dan Williamsb94d5232015-05-19 22:54:31 -0400745 } else {
746 /*
747 * A single dimm may belong to multiple SPA-PM
748 * ranges, record at least one in addition to
749 * any SPA-DCR range.
750 */
751 nfit_mem->memdev_pmem = nfit_memdev->memdev;
752 }
Dan Williamsb94d5232015-05-19 22:54:31 -0400753 }
754
755 return 0;
756}
757
758static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
759{
760 struct nfit_mem *a = container_of(_a, typeof(*a), list);
761 struct nfit_mem *b = container_of(_b, typeof(*b), list);
762 u32 handleA, handleB;
763
764 handleA = __to_nfit_memdev(a)->device_handle;
765 handleB = __to_nfit_memdev(b)->device_handle;
766 if (handleA < handleB)
767 return -1;
768 else if (handleA > handleB)
769 return 1;
770 return 0;
771}
772
773static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
774{
775 struct nfit_spa *nfit_spa;
776
777 /*
778 * For each SPA-DCR or SPA-PMEM address range find its
779 * corresponding MEMDEV(s). From each MEMDEV find the
780 * corresponding DCR. Then, if we're operating on a SPA-DCR,
781 * try to find a SPA-BDW and a corresponding BDW that references
782 * the DCR. Throw it all into an nfit_mem object. Note, that
783 * BDWs are optional.
784 */
785 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
786 int rc;
787
788 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
789 if (rc)
790 return rc;
791 }
792
793 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
794
795 return 0;
796}
797
Dan Williams45def222015-04-26 19:26:48 -0400798static ssize_t revision_show(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
802 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
803 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
804
Linda Knippers6b577c92015-11-20 19:05:49 -0500805 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
Dan Williams45def222015-04-26 19:26:48 -0400806}
807static DEVICE_ATTR_RO(revision);
808
809static struct attribute *acpi_nfit_attributes[] = {
810 &dev_attr_revision.attr,
811 NULL,
812};
813
814static struct attribute_group acpi_nfit_attribute_group = {
815 .name = "nfit",
816 .attrs = acpi_nfit_attributes,
817};
818
Dan Williamsa61fe6f2016-02-19 12:29:32 -0800819static const struct attribute_group *acpi_nfit_attribute_groups[] = {
Dan Williams45def222015-04-26 19:26:48 -0400820 &nvdimm_bus_attribute_group,
821 &acpi_nfit_attribute_group,
822 NULL,
823};
824
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400825static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
826{
827 struct nvdimm *nvdimm = to_nvdimm(dev);
828 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
829
830 return __to_nfit_memdev(nfit_mem);
831}
832
833static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
834{
835 struct nvdimm *nvdimm = to_nvdimm(dev);
836 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
837
838 return nfit_mem->dcr;
839}
840
841static ssize_t handle_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
843{
844 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
845
846 return sprintf(buf, "%#x\n", memdev->device_handle);
847}
848static DEVICE_ATTR_RO(handle);
849
850static ssize_t phys_id_show(struct device *dev,
851 struct device_attribute *attr, char *buf)
852{
853 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
854
855 return sprintf(buf, "%#x\n", memdev->physical_id);
856}
857static DEVICE_ATTR_RO(phys_id);
858
859static ssize_t vendor_show(struct device *dev,
860 struct device_attribute *attr, char *buf)
861{
862 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
863
864 return sprintf(buf, "%#x\n", dcr->vendor_id);
865}
866static DEVICE_ATTR_RO(vendor);
867
868static ssize_t rev_id_show(struct device *dev,
869 struct device_attribute *attr, char *buf)
870{
871 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
872
873 return sprintf(buf, "%#x\n", dcr->revision_id);
874}
875static DEVICE_ATTR_RO(rev_id);
876
877static ssize_t device_show(struct device *dev,
878 struct device_attribute *attr, char *buf)
879{
880 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
881
882 return sprintf(buf, "%#x\n", dcr->device_id);
883}
884static DEVICE_ATTR_RO(device);
885
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700886static int num_nvdimm_formats(struct nvdimm *nvdimm)
887{
888 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
889 int formats = 0;
890
891 if (nfit_mem->memdev_pmem)
892 formats++;
893 if (nfit_mem->memdev_bdw)
894 formats++;
895 return formats;
896}
897
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400898static ssize_t format_show(struct device *dev,
899 struct device_attribute *attr, char *buf)
900{
901 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
902
903 return sprintf(buf, "%#x\n", dcr->code);
904}
905static DEVICE_ATTR_RO(format);
906
Dan Williams8cc6ddf2016-04-05 15:26:50 -0700907static ssize_t format1_show(struct device *dev,
908 struct device_attribute *attr, char *buf)
909{
910 u32 handle;
911 ssize_t rc = -ENXIO;
912 struct nfit_mem *nfit_mem;
913 struct nfit_memdev *nfit_memdev;
914 struct acpi_nfit_desc *acpi_desc;
915 struct nvdimm *nvdimm = to_nvdimm(dev);
916 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
917
918 nfit_mem = nvdimm_provider_data(nvdimm);
919 acpi_desc = nfit_mem->acpi_desc;
920 handle = to_nfit_memdev(dev)->device_handle;
921
922 /* assumes DIMMs have at most 2 published interface codes */
923 mutex_lock(&acpi_desc->init_mutex);
924 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
925 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
926 struct nfit_dcr *nfit_dcr;
927
928 if (memdev->device_handle != handle)
929 continue;
930
931 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
932 if (nfit_dcr->dcr->region_index != memdev->region_index)
933 continue;
934 if (nfit_dcr->dcr->code == dcr->code)
935 continue;
936 rc = sprintf(buf, "%#x\n", nfit_dcr->dcr->code);
937 break;
938 }
939 if (rc != ENXIO)
940 break;
941 }
942 mutex_unlock(&acpi_desc->init_mutex);
943 return rc;
944}
945static DEVICE_ATTR_RO(format1);
946
947static ssize_t formats_show(struct device *dev,
948 struct device_attribute *attr, char *buf)
949{
950 struct nvdimm *nvdimm = to_nvdimm(dev);
951
952 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
953}
954static DEVICE_ATTR_RO(formats);
955
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400956static ssize_t serial_show(struct device *dev,
957 struct device_attribute *attr, char *buf)
958{
959 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
960
961 return sprintf(buf, "%#x\n", dcr->serial_number);
962}
963static DEVICE_ATTR_RO(serial);
964
Dan Williamsa94e3fb2016-04-28 18:18:05 -0700965static ssize_t family_show(struct device *dev,
966 struct device_attribute *attr, char *buf)
967{
968 struct nvdimm *nvdimm = to_nvdimm(dev);
969 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
970
971 if (nfit_mem->family < 0)
972 return -ENXIO;
973 return sprintf(buf, "%d\n", nfit_mem->family);
974}
975static DEVICE_ATTR_RO(family);
976
977static ssize_t dsm_mask_show(struct device *dev,
978 struct device_attribute *attr, char *buf)
979{
980 struct nvdimm *nvdimm = to_nvdimm(dev);
981 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
982
983 if (nfit_mem->family < 0)
984 return -ENXIO;
985 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
986}
987static DEVICE_ATTR_RO(dsm_mask);
988
Dan Williams58138822015-06-23 20:08:34 -0400989static ssize_t flags_show(struct device *dev,
990 struct device_attribute *attr, char *buf)
991{
992 u16 flags = to_nfit_memdev(dev)->flags;
993
994 return sprintf(buf, "%s%s%s%s%s\n",
Toshi Kani402bae52015-08-26 10:20:23 -0600995 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
996 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
997 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
Bob Mooreca321d12015-10-19 10:24:52 +0800998 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
Toshi Kani402bae52015-08-26 10:20:23 -0600999 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
Dan Williams58138822015-06-23 20:08:34 -04001000}
1001static DEVICE_ATTR_RO(flags);
1002
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001003static struct attribute *acpi_nfit_dimm_attributes[] = {
1004 &dev_attr_handle.attr,
1005 &dev_attr_phys_id.attr,
1006 &dev_attr_vendor.attr,
1007 &dev_attr_device.attr,
1008 &dev_attr_format.attr,
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001009 &dev_attr_formats.attr,
1010 &dev_attr_format1.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001011 &dev_attr_serial.attr,
1012 &dev_attr_rev_id.attr,
Dan Williams58138822015-06-23 20:08:34 -04001013 &dev_attr_flags.attr,
Dan Williamsa94e3fb2016-04-28 18:18:05 -07001014 &dev_attr_family.attr,
1015 &dev_attr_dsm_mask.attr,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001016 NULL,
1017};
1018
1019static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1020 struct attribute *a, int n)
1021{
1022 struct device *dev = container_of(kobj, struct device, kobj);
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001023 struct nvdimm *nvdimm = to_nvdimm(dev);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001024
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001025 if (!to_nfit_dcr(dev))
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001026 return 0;
Dan Williams8cc6ddf2016-04-05 15:26:50 -07001027 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1028 return 0;
1029 return a->mode;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001030}
1031
1032static struct attribute_group acpi_nfit_dimm_attribute_group = {
1033 .name = "nfit",
1034 .attrs = acpi_nfit_dimm_attributes,
1035 .is_visible = acpi_nfit_dimm_attr_visible,
1036};
1037
1038static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
Dan Williams62232e452015-06-08 14:27:06 -04001039 &nvdimm_attribute_group,
Dan Williams4d88a972015-05-31 14:41:48 -04001040 &nd_device_attribute_group,
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001041 &acpi_nfit_dimm_attribute_group,
1042 NULL,
1043};
1044
1045static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1046 u32 device_handle)
1047{
1048 struct nfit_mem *nfit_mem;
1049
1050 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1051 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1052 return nfit_mem->nvdimm;
1053
1054 return NULL;
1055}
1056
Dan Williams62232e452015-06-08 14:27:06 -04001057static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1058 struct nfit_mem *nfit_mem, u32 device_handle)
1059{
1060 struct acpi_device *adev, *adev_dimm;
1061 struct device *dev = acpi_desc->dev;
Dan Williams31eca762016-04-28 16:23:43 -07001062 unsigned long dsm_mask;
1063 const u8 *uuid;
Linda Knippers60e95f432015-07-22 16:17:22 -04001064 int i;
Dan Williams62232e452015-06-08 14:27:06 -04001065
Dan Williamse3654ec2016-04-28 16:17:07 -07001066 /* nfit test assumes 1:1 relationship between commands and dsms */
1067 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
Dan Williams31eca762016-04-28 16:23:43 -07001068 nfit_mem->family = NVDIMM_FAMILY_INTEL;
Dan Williams62232e452015-06-08 14:27:06 -04001069 adev = to_acpi_dev(acpi_desc);
1070 if (!adev)
1071 return 0;
1072
1073 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1074 nfit_mem->adev = adev_dimm;
1075 if (!adev_dimm) {
1076 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1077 device_handle);
Dan Williams4d88a972015-05-31 14:41:48 -04001078 return force_enable_dimms ? 0 : -ENODEV;
Dan Williams62232e452015-06-08 14:27:06 -04001079 }
1080
Dan Williams31eca762016-04-28 16:23:43 -07001081 /*
1082 * Until standardization materializes we need to consider up to 3
1083 * different command sets. Note, that checking for function0 (bit0)
1084 * tells us if any commands are reachable through this uuid.
1085 */
1086 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
1087 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1088 break;
1089
1090 /* limit the supported commands to those that are publicly documented */
1091 nfit_mem->family = i;
Dan Williams87554092016-04-28 18:01:20 -07001092 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
Dan Williams31eca762016-04-28 16:23:43 -07001093 dsm_mask = 0x3fe;
Dan Williams87554092016-04-28 18:01:20 -07001094 if (disable_vendor_specific)
1095 dsm_mask &= ~(1 << ND_CMD_VENDOR);
1096 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1)
Dan Williams31eca762016-04-28 16:23:43 -07001097 dsm_mask = 0x1c3c76;
Dan Williams87554092016-04-28 18:01:20 -07001098 else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
Dan Williams31eca762016-04-28 16:23:43 -07001099 dsm_mask = 0x1fe;
Dan Williams87554092016-04-28 18:01:20 -07001100 if (disable_vendor_specific)
1101 dsm_mask &= ~(1 << 8);
1102 } else {
Dan Williams31eca762016-04-28 16:23:43 -07001103 dev_err(dev, "unknown dimm command family\n");
1104 nfit_mem->family = -1;
1105 return force_enable_dimms ? 0 : -ENODEV;
1106 }
1107
1108 uuid = to_nfit_uuid(nfit_mem->family);
1109 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -04001110 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
1111 set_bit(i, &nfit_mem->dsm_mask);
1112
Linda Knippers60e95f432015-07-22 16:17:22 -04001113 return 0;
Dan Williams62232e452015-06-08 14:27:06 -04001114}
1115
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001116static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1117{
1118 struct nfit_mem *nfit_mem;
Dan Williams4d88a972015-05-31 14:41:48 -04001119 int dimm_count = 0;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001120
1121 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
Dan Williams31eca762016-04-28 16:23:43 -07001122 unsigned long flags = 0, cmd_mask;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001123 struct nvdimm *nvdimm;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001124 u32 device_handle;
Dan Williams58138822015-06-23 20:08:34 -04001125 u16 mem_flags;
Dan Williams62232e452015-06-08 14:27:06 -04001126 int rc;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001127
1128 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1129 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1130 if (nvdimm) {
Vishal Verma20985162015-10-27 16:58:27 -06001131 dimm_count++;
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001132 continue;
1133 }
1134
1135 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1136 flags |= NDD_ALIASING;
1137
Dan Williams58138822015-06-23 20:08:34 -04001138 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
Bob Mooreca321d12015-10-19 10:24:52 +08001139 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
Dan Williams58138822015-06-23 20:08:34 -04001140 flags |= NDD_UNARMED;
1141
Dan Williams62232e452015-06-08 14:27:06 -04001142 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1143 if (rc)
1144 continue;
1145
Dan Williamse3654ec2016-04-28 16:17:07 -07001146 /*
Dan Williams31eca762016-04-28 16:23:43 -07001147 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1148 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1149 * userspace interface.
Dan Williamse3654ec2016-04-28 16:17:07 -07001150 */
Dan Williams31eca762016-04-28 16:23:43 -07001151 cmd_mask = 1UL << ND_CMD_CALL;
1152 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1153 cmd_mask |= nfit_mem->dsm_mask;
1154
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001155 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
Dan Williams62232e452015-06-08 14:27:06 -04001156 acpi_nfit_dimm_attribute_groups,
Dan Williams31eca762016-04-28 16:23:43 -07001157 flags, cmd_mask);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001158 if (!nvdimm)
1159 return -ENOMEM;
1160
1161 nfit_mem->nvdimm = nvdimm;
Dan Williams4d88a972015-05-31 14:41:48 -04001162 dimm_count++;
Dan Williams58138822015-06-23 20:08:34 -04001163
1164 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
1165 continue;
1166
Toshi Kani402bae52015-08-26 10:20:23 -06001167 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
Dan Williams58138822015-06-23 20:08:34 -04001168 nvdimm_name(nvdimm),
Toshi Kani402bae52015-08-26 10:20:23 -06001169 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
1170 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
1171 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
Bob Mooreca321d12015-10-19 10:24:52 +08001172 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
Dan Williams58138822015-06-23 20:08:34 -04001173
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001174 }
1175
Dan Williams4d88a972015-05-31 14:41:48 -04001176 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
Dan Williamse6dfb2d2015-04-25 03:56:17 -04001177}
1178
Dan Williams62232e452015-06-08 14:27:06 -04001179static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
1180{
1181 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1182 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
1183 struct acpi_device *adev;
1184 int i;
1185
Dan Williamse3654ec2016-04-28 16:17:07 -07001186 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
Dan Williams62232e452015-06-08 14:27:06 -04001187 adev = to_acpi_dev(acpi_desc);
1188 if (!adev)
1189 return;
1190
Dan Williamsd4f32362016-03-03 16:08:54 -08001191 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
Dan Williams62232e452015-06-08 14:27:06 -04001192 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
Dan Williamse3654ec2016-04-28 16:17:07 -07001193 set_bit(i, &nd_desc->cmd_mask);
Dan Williams62232e452015-06-08 14:27:06 -04001194}
1195
Dan Williams1f7df6f2015-06-09 20:13:14 -04001196static ssize_t range_index_show(struct device *dev,
1197 struct device_attribute *attr, char *buf)
1198{
1199 struct nd_region *nd_region = to_nd_region(dev);
1200 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
1201
1202 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
1203}
1204static DEVICE_ATTR_RO(range_index);
1205
1206static struct attribute *acpi_nfit_region_attributes[] = {
1207 &dev_attr_range_index.attr,
1208 NULL,
1209};
1210
1211static struct attribute_group acpi_nfit_region_attribute_group = {
1212 .name = "nfit",
1213 .attrs = acpi_nfit_region_attributes,
1214};
1215
1216static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
1217 &nd_region_attribute_group,
1218 &nd_mapping_attribute_group,
Dan Williams3d880022015-05-31 15:02:11 -04001219 &nd_device_attribute_group,
Toshi Kani74ae66c2015-06-19 12:18:34 -06001220 &nd_numa_attribute_group,
Dan Williams1f7df6f2015-06-09 20:13:14 -04001221 &acpi_nfit_region_attribute_group,
1222 NULL,
1223};
1224
Dan Williamseaf96152015-05-01 13:11:27 -04001225/* enough info to uniquely specify an interleave set */
1226struct nfit_set_info {
1227 struct nfit_set_info_map {
1228 u64 region_offset;
1229 u32 serial_number;
1230 u32 pad;
1231 } mapping[0];
1232};
1233
1234static size_t sizeof_nfit_set_info(int num_mappings)
1235{
1236 return sizeof(struct nfit_set_info)
1237 + num_mappings * sizeof(struct nfit_set_info_map);
1238}
1239
1240static int cmp_map(const void *m0, const void *m1)
1241{
1242 const struct nfit_set_info_map *map0 = m0;
1243 const struct nfit_set_info_map *map1 = m1;
1244
1245 return memcmp(&map0->region_offset, &map1->region_offset,
1246 sizeof(u64));
1247}
1248
1249/* Retrieve the nth entry referencing this spa */
1250static struct acpi_nfit_memory_map *memdev_from_spa(
1251 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
1252{
1253 struct nfit_memdev *nfit_memdev;
1254
1255 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
1256 if (nfit_memdev->memdev->range_index == range_index)
1257 if (n-- == 0)
1258 return nfit_memdev->memdev;
1259 return NULL;
1260}
1261
1262static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
1263 struct nd_region_desc *ndr_desc,
1264 struct acpi_nfit_system_address *spa)
1265{
1266 int i, spa_type = nfit_spa_type(spa);
1267 struct device *dev = acpi_desc->dev;
1268 struct nd_interleave_set *nd_set;
1269 u16 nr = ndr_desc->num_mappings;
1270 struct nfit_set_info *info;
1271
1272 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1273 /* pass */;
1274 else
1275 return 0;
1276
1277 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1278 if (!nd_set)
1279 return -ENOMEM;
1280
1281 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1282 if (!info)
1283 return -ENOMEM;
1284 for (i = 0; i < nr; i++) {
1285 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1286 struct nfit_set_info_map *map = &info->mapping[i];
1287 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1288 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1289 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1290 spa->range_index, i);
1291
1292 if (!memdev || !nfit_mem->dcr) {
1293 dev_err(dev, "%s: failed to find DCR\n", __func__);
1294 return -ENODEV;
1295 }
1296
1297 map->region_offset = memdev->region_offset;
1298 map->serial_number = nfit_mem->dcr->serial_number;
1299 }
1300
1301 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1302 cmp_map, NULL);
1303 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1304 ndr_desc->nd_set = nd_set;
1305 devm_kfree(dev, info);
1306
1307 return 0;
1308}
1309
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001310static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1311{
1312 struct acpi_nfit_interleave *idt = mmio->idt;
1313 u32 sub_line_offset, line_index, line_offset;
1314 u64 line_no, table_skip_count, table_offset;
1315
1316 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1317 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1318 line_offset = idt->line_offset[line_index]
1319 * mmio->line_size;
1320 table_offset = table_skip_count * mmio->table_size;
1321
1322 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1323}
1324
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001325static void wmb_blk(struct nfit_blk *nfit_blk)
1326{
1327
1328 if (nfit_blk->nvdimm_flush) {
1329 /*
1330 * The first wmb() is needed to 'sfence' all previous writes
1331 * such that they are architecturally visible for the platform
1332 * buffer flush. Note that we've already arranged for pmem
1333 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1334 * final wmb() ensures ordering for the NVDIMM flush write.
1335 */
1336 wmb();
1337 writeq(1, nfit_blk->nvdimm_flush);
1338 wmb();
1339 } else
1340 wmb_pmem();
1341}
1342
Ross Zwislerde4a1962015-08-20 16:27:38 -06001343static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001344{
1345 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1346 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1347
1348 if (mmio->num_lines)
1349 offset = to_interleave_offset(offset, mmio);
1350
Linus Torvalds12f03ee2015-09-08 14:35:59 -07001351 return readl(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001352}
1353
1354static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1355 resource_size_t dpa, unsigned int len, unsigned int write)
1356{
1357 u64 cmd, offset;
1358 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1359
1360 enum {
1361 BCW_OFFSET_MASK = (1ULL << 48)-1,
1362 BCW_LEN_SHIFT = 48,
1363 BCW_LEN_MASK = (1ULL << 8) - 1,
1364 BCW_CMD_SHIFT = 56,
1365 };
1366
1367 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1368 len = len >> L1_CACHE_SHIFT;
1369 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1370 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1371
1372 offset = nfit_blk->cmd_offset + mmio->size * bw;
1373 if (mmio->num_lines)
1374 offset = to_interleave_offset(offset, mmio);
1375
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001376 writeq(cmd, mmio->addr.base + offset);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001377 wmb_blk(nfit_blk);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001378
Dan Williamsaef25332016-02-12 17:01:11 -08001379 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001380 readq(mmio->addr.base + offset);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001381}
1382
1383static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1384 resource_size_t dpa, void *iobuf, size_t len, int rw,
1385 unsigned int lane)
1386{
1387 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1388 unsigned int copied = 0;
1389 u64 base_offset;
1390 int rc;
1391
1392 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1393 + lane * mmio->size;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001394 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1395 while (len) {
1396 unsigned int c;
1397 u64 offset;
1398
1399 if (mmio->num_lines) {
1400 u32 line_offset;
1401
1402 offset = to_interleave_offset(base_offset + copied,
1403 mmio);
1404 div_u64_rem(offset, mmio->line_size, &line_offset);
1405 c = min_t(size_t, len, mmio->line_size - line_offset);
1406 } else {
1407 offset = base_offset + nfit_blk->bdw_offset;
1408 c = len;
1409 }
1410
1411 if (rw)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001412 memcpy_to_pmem(mmio->addr.aperture + offset,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001413 iobuf + copied, c);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001414 else {
Dan Williamsaef25332016-02-12 17:01:11 -08001415 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001416 mmio_flush_range((void __force *)
1417 mmio->addr.aperture + offset, c);
1418
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001419 memcpy_from_pmem(iobuf + copied,
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001420 mmio->addr.aperture + offset, c);
1421 }
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001422
1423 copied += c;
1424 len -= c;
1425 }
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001426
1427 if (rw)
1428 wmb_blk(nfit_blk);
1429
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001430 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1431 return rc;
1432}
1433
1434static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1435 resource_size_t dpa, void *iobuf, u64 len, int rw)
1436{
1437 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1438 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1439 struct nd_region *nd_region = nfit_blk->nd_region;
1440 unsigned int lane, copied = 0;
1441 int rc = 0;
1442
1443 lane = nd_region_acquire_lane(nd_region);
1444 while (len) {
1445 u64 c = min(len, mmio->size);
1446
1447 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1448 iobuf + copied, c, rw, lane);
1449 if (rc)
1450 break;
1451
1452 copied += c;
1453 len -= c;
1454 }
1455 nd_region_release_lane(nd_region, lane);
1456
1457 return rc;
1458}
1459
1460static void nfit_spa_mapping_release(struct kref *kref)
1461{
1462 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1463 struct acpi_nfit_system_address *spa = spa_map->spa;
1464 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1465
1466 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1467 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001468 if (spa_map->type == SPA_MAP_APERTURE)
1469 memunmap((void __force *)spa_map->addr.aperture);
1470 else
1471 iounmap(spa_map->addr.base);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001472 release_mem_region(spa->address, spa->length);
1473 list_del(&spa_map->list);
1474 kfree(spa_map);
1475}
1476
1477static struct nfit_spa_mapping *find_spa_mapping(
1478 struct acpi_nfit_desc *acpi_desc,
1479 struct acpi_nfit_system_address *spa)
1480{
1481 struct nfit_spa_mapping *spa_map;
1482
1483 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1484 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1485 if (spa_map->spa == spa)
1486 return spa_map;
1487
1488 return NULL;
1489}
1490
1491static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1492 struct acpi_nfit_system_address *spa)
1493{
1494 struct nfit_spa_mapping *spa_map;
1495
1496 mutex_lock(&acpi_desc->spa_map_mutex);
1497 spa_map = find_spa_mapping(acpi_desc, spa);
1498
1499 if (spa_map)
1500 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1501 mutex_unlock(&acpi_desc->spa_map_mutex);
1502}
1503
1504static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001505 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001506{
1507 resource_size_t start = spa->address;
1508 resource_size_t n = spa->length;
1509 struct nfit_spa_mapping *spa_map;
1510 struct resource *res;
1511
1512 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1513
1514 spa_map = find_spa_mapping(acpi_desc, spa);
1515 if (spa_map) {
1516 kref_get(&spa_map->kref);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001517 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001518 }
1519
1520 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1521 if (!spa_map)
1522 return NULL;
1523
1524 INIT_LIST_HEAD(&spa_map->list);
1525 spa_map->spa = spa;
1526 kref_init(&spa_map->kref);
1527 spa_map->acpi_desc = acpi_desc;
1528
1529 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1530 if (!res)
1531 goto err_mem;
1532
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001533 spa_map->type = type;
1534 if (type == SPA_MAP_APERTURE)
1535 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1536 ARCH_MEMREMAP_PMEM);
1537 else
1538 spa_map->addr.base = ioremap_nocache(start, n);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001539
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001540
1541 if (!spa_map->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001542 goto err_map;
1543
1544 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001545 return spa_map->addr.base;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001546
1547 err_map:
1548 release_mem_region(start, n);
1549 err_mem:
1550 kfree(spa_map);
1551 return NULL;
1552}
1553
1554/**
1555 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1556 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1557 * @nfit_spa: spa table to map
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001558 * @type: aperture or control region
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001559 *
1560 * In the case where block-data-window apertures and
1561 * dimm-control-regions are interleaved they will end up sharing a
1562 * single request_mem_region() + ioremap() for the address range. In
1563 * the style of devm nfit_spa_map() mappings are automatically dropped
1564 * when all region devices referencing the same mapping are disabled /
1565 * unbound.
1566 */
1567static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001568 struct acpi_nfit_system_address *spa, enum spa_map_type type)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001569{
1570 void __iomem *iomem;
1571
1572 mutex_lock(&acpi_desc->spa_map_mutex);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001573 iomem = __nfit_spa_map(acpi_desc, spa, type);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001574 mutex_unlock(&acpi_desc->spa_map_mutex);
1575
1576 return iomem;
1577}
1578
1579static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1580 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1581{
1582 if (idt) {
1583 mmio->num_lines = idt->line_count;
1584 mmio->line_size = idt->line_size;
1585 if (interleave_ways == 0)
1586 return -ENXIO;
1587 mmio->table_size = mmio->num_lines * interleave_ways
1588 * mmio->line_size;
1589 }
1590
1591 return 0;
1592}
1593
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001594static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1595 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1596{
1597 struct nd_cmd_dimm_flags flags;
1598 int rc;
1599
1600 memset(&flags, 0, sizeof(flags));
1601 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
Dan Williamsaef25332016-02-12 17:01:11 -08001602 sizeof(flags), NULL);
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001603
1604 if (rc >= 0 && flags.status == 0)
1605 nfit_blk->dimm_flags = flags.flags;
1606 else if (rc == -ENOTTY) {
1607 /* fall back to a conservative default */
Dan Williamsaef25332016-02-12 17:01:11 -08001608 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001609 rc = 0;
1610 } else
1611 rc = -ENXIO;
1612
1613 return rc;
1614}
1615
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001616static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1617 struct device *dev)
1618{
1619 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1620 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1621 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001622 struct nfit_flush *nfit_flush;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001623 struct nfit_blk_mmio *mmio;
1624 struct nfit_blk *nfit_blk;
1625 struct nfit_mem *nfit_mem;
1626 struct nvdimm *nvdimm;
1627 int rc;
1628
1629 nvdimm = nd_blk_region_to_dimm(ndbr);
1630 nfit_mem = nvdimm_provider_data(nvdimm);
1631 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1632 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1633 nfit_mem ? "" : " nfit_mem",
Dan Williams193ccca2015-06-30 16:09:39 -04001634 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1635 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001636 return -ENXIO;
1637 }
1638
1639 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1640 if (!nfit_blk)
1641 return -ENOMEM;
1642 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1643 nfit_blk->nd_region = to_nd_region(dev);
1644
1645 /* map block aperture memory */
1646 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1647 mmio = &nfit_blk->mmio[BDW];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001648 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001649 SPA_MAP_APERTURE);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001650 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001651 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1652 nvdimm_name(nvdimm));
1653 return -ENOMEM;
1654 }
1655 mmio->size = nfit_mem->bdw->size;
1656 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1657 mmio->idt = nfit_mem->idt_bdw;
1658 mmio->spa = nfit_mem->spa_bdw;
1659 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1660 nfit_mem->memdev_bdw->interleave_ways);
1661 if (rc) {
1662 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1663 __func__, nvdimm_name(nvdimm));
1664 return rc;
1665 }
1666
1667 /* map block control memory */
1668 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1669 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1670 mmio = &nfit_blk->mmio[DCR];
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001671 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001672 SPA_MAP_CONTROL);
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001673 if (!mmio->addr.base) {
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001674 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1675 nvdimm_name(nvdimm));
1676 return -ENOMEM;
1677 }
1678 mmio->size = nfit_mem->dcr->window_size;
1679 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1680 mmio->idt = nfit_mem->idt_dcr;
1681 mmio->spa = nfit_mem->spa_dcr;
1682 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1683 nfit_mem->memdev_dcr->interleave_ways);
1684 if (rc) {
1685 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1686 __func__, nvdimm_name(nvdimm));
1687 return rc;
1688 }
1689
Ross Zwislerf0f2c072015-07-10 11:06:14 -06001690 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1691 if (rc < 0) {
1692 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1693 __func__, nvdimm_name(nvdimm));
1694 return rc;
1695 }
1696
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001697 nfit_flush = nfit_mem->nfit_flush;
1698 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1699 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1700 nfit_flush->flush->hint_address[0], 8);
1701 if (!nfit_blk->nvdimm_flush)
1702 return -ENOMEM;
1703 }
1704
Dan Williams96601ad2015-08-24 18:29:38 -04001705 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
Ross Zwislerc2ad2952015-07-10 11:06:13 -06001706 dev_warn(dev, "unable to guarantee persistence of writes\n");
1707
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001708 if (mmio->line_size == 0)
1709 return 0;
1710
1711 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1712 + 8 > mmio->line_size) {
1713 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1714 return -ENXIO;
1715 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1716 + 8 > mmio->line_size) {
1717 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1718 return -ENXIO;
1719 }
1720
1721 return 0;
1722}
1723
1724static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1725 struct device *dev)
1726{
1727 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1728 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1729 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1730 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1731 int i;
1732
1733 if (!nfit_blk)
1734 return; /* never enabled */
1735
1736 /* auto-free BLK spa mappings */
1737 for (i = 0; i < 2; i++) {
1738 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1739
Ross Zwisler67a3e8f2015-08-27 13:14:20 -06001740 if (mmio->addr.base)
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001741 nfit_spa_unmap(acpi_desc, mmio->spa);
1742 }
1743 nd_blk_region_set_provider_data(ndbr, NULL);
1744 /* devm will free nfit_blk */
1745}
1746
Dan Williamsaef25332016-02-12 17:01:11 -08001747static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08001748 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001749{
Dan Williamsaef25332016-02-12 17:01:11 -08001750 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001751 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Dan Williamsaef25332016-02-12 17:01:11 -08001752 int cmd_rc, rc;
1753
Dan Williams1cf03c02016-02-17 13:01:23 -08001754 cmd->address = spa->address;
1755 cmd->length = spa->length;
Dan Williamsaef25332016-02-12 17:01:11 -08001756 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
1757 sizeof(*cmd), &cmd_rc);
1758 if (rc < 0)
1759 return rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001760 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001761}
1762
Dan Williams1cf03c02016-02-17 13:01:23 -08001763static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
Vishal Verma0caeef62015-12-24 19:21:43 -07001764{
1765 int rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001766 int cmd_rc;
1767 struct nd_cmd_ars_start ars_start;
1768 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1769 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001770
Dan Williams1cf03c02016-02-17 13:01:23 -08001771 memset(&ars_start, 0, sizeof(ars_start));
1772 ars_start.address = spa->address;
1773 ars_start.length = spa->length;
1774 if (nfit_spa_type(spa) == NFIT_SPA_PM)
1775 ars_start.type = ND_ARS_PERSISTENT;
1776 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
1777 ars_start.type = ND_ARS_VOLATILE;
1778 else
1779 return -ENOTTY;
Vishal Verma0caeef62015-12-24 19:21:43 -07001780
Dan Williams1cf03c02016-02-17 13:01:23 -08001781 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1782 sizeof(ars_start), &cmd_rc);
Dan Williamsaef25332016-02-12 17:01:11 -08001783
Dan Williams1cf03c02016-02-17 13:01:23 -08001784 if (rc < 0)
1785 return rc;
1786 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001787}
1788
Dan Williams1cf03c02016-02-17 13:01:23 -08001789static int ars_continue(struct acpi_nfit_desc *acpi_desc)
Vishal Verma0caeef62015-12-24 19:21:43 -07001790{
Dan Williamsaef25332016-02-12 17:01:11 -08001791 int rc, cmd_rc;
Dan Williams1cf03c02016-02-17 13:01:23 -08001792 struct nd_cmd_ars_start ars_start;
1793 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1794 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
Vishal Verma0caeef62015-12-24 19:21:43 -07001795
Dan Williams1cf03c02016-02-17 13:01:23 -08001796 memset(&ars_start, 0, sizeof(ars_start));
1797 ars_start.address = ars_status->restart_address;
1798 ars_start.length = ars_status->restart_length;
1799 ars_start.type = ars_status->type;
1800 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
1801 sizeof(ars_start), &cmd_rc);
1802 if (rc < 0)
1803 return rc;
1804 return cmd_rc;
1805}
Dan Williamsaef25332016-02-12 17:01:11 -08001806
Dan Williams1cf03c02016-02-17 13:01:23 -08001807static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
1808{
1809 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1810 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
1811 int rc, cmd_rc;
Dan Williamsaef25332016-02-12 17:01:11 -08001812
Dan Williams1cf03c02016-02-17 13:01:23 -08001813 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
1814 acpi_desc->ars_status_size, &cmd_rc);
1815 if (rc < 0)
1816 return rc;
1817 return cmd_rc;
Vishal Verma0caeef62015-12-24 19:21:43 -07001818}
1819
1820static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
Dan Williams1cf03c02016-02-17 13:01:23 -08001821 struct nd_cmd_ars_status *ars_status)
Vishal Verma0caeef62015-12-24 19:21:43 -07001822{
1823 int rc;
1824 u32 i;
1825
Vishal Verma0caeef62015-12-24 19:21:43 -07001826 for (i = 0; i < ars_status->num_records; i++) {
1827 rc = nvdimm_bus_add_poison(nvdimm_bus,
1828 ars_status->records[i].err_address,
1829 ars_status->records[i].length);
1830 if (rc)
1831 return rc;
1832 }
1833
1834 return 0;
1835}
1836
Toshi Kaniaf1996e2016-03-09 12:47:06 -07001837static void acpi_nfit_remove_resource(void *data)
1838{
1839 struct resource *res = data;
1840
1841 remove_resource(res);
1842}
1843
1844static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
1845 struct nd_region_desc *ndr_desc)
1846{
1847 struct resource *res, *nd_res = ndr_desc->res;
1848 int is_pmem, ret;
1849
1850 /* No operation if the region is already registered as PMEM */
1851 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
1852 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
1853 if (is_pmem == REGION_INTERSECTS)
1854 return 0;
1855
1856 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
1857 if (!res)
1858 return -ENOMEM;
1859
1860 res->name = "Persistent Memory";
1861 res->start = nd_res->start;
1862 res->end = nd_res->end;
1863 res->flags = IORESOURCE_MEM;
1864 res->desc = IORES_DESC_PERSISTENT_MEMORY;
1865
1866 ret = insert_resource(&iomem_resource, res);
1867 if (ret)
1868 return ret;
1869
1870 ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
1871 if (ret) {
1872 remove_resource(res);
1873 return ret;
1874 }
1875
1876 return 0;
1877}
1878
Dan Williams1f7df6f2015-06-09 20:13:14 -04001879static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1880 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1881 struct acpi_nfit_memory_map *memdev,
Dan Williams1cf03c02016-02-17 13:01:23 -08001882 struct nfit_spa *nfit_spa)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001883{
1884 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1885 memdev->device_handle);
Dan Williams1cf03c02016-02-17 13:01:23 -08001886 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001887 struct nd_blk_region_desc *ndbr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001888 struct nfit_mem *nfit_mem;
1889 int blk_valid = 0;
1890
1891 if (!nvdimm) {
1892 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1893 spa->range_index, memdev->device_handle);
1894 return -ENODEV;
1895 }
1896
1897 nd_mapping->nvdimm = nvdimm;
1898 switch (nfit_spa_type(spa)) {
1899 case NFIT_SPA_PM:
1900 case NFIT_SPA_VOLATILE:
1901 nd_mapping->start = memdev->address;
1902 nd_mapping->size = memdev->region_size;
1903 break;
1904 case NFIT_SPA_DCR:
1905 nfit_mem = nvdimm_provider_data(nvdimm);
1906 if (!nfit_mem || !nfit_mem->bdw) {
1907 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1908 spa->range_index, nvdimm_name(nvdimm));
1909 } else {
1910 nd_mapping->size = nfit_mem->bdw->capacity;
1911 nd_mapping->start = nfit_mem->bdw->start_address;
Vishal Verma5212e112015-06-25 04:20:32 -04001912 ndr_desc->num_lanes = nfit_mem->bdw->windows;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001913 blk_valid = 1;
1914 }
1915
1916 ndr_desc->nd_mapping = nd_mapping;
1917 ndr_desc->num_mappings = blk_valid;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001918 ndbr_desc = to_blk_region_desc(ndr_desc);
1919 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1920 ndbr_desc->disable = acpi_nfit_blk_region_disable;
Dan Williams6bc75612015-06-17 17:23:32 -04001921 ndbr_desc->do_io = acpi_desc->blk_do_io;
Dan Williams1cf03c02016-02-17 13:01:23 -08001922 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1923 ndr_desc);
1924 if (!nfit_spa->nd_region)
Dan Williams1f7df6f2015-06-09 20:13:14 -04001925 return -ENOMEM;
1926 break;
1927 }
1928
1929 return 0;
1930}
1931
1932static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1933 struct nfit_spa *nfit_spa)
1934{
1935 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1936 struct acpi_nfit_system_address *spa = nfit_spa->spa;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001937 struct nd_blk_region_desc ndbr_desc;
1938 struct nd_region_desc *ndr_desc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001939 struct nfit_memdev *nfit_memdev;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001940 struct nvdimm_bus *nvdimm_bus;
1941 struct resource res;
Dan Williamseaf96152015-05-01 13:11:27 -04001942 int count = 0, rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001943
Dan Williams1cf03c02016-02-17 13:01:23 -08001944 if (nfit_spa->nd_region)
Vishal Verma20985162015-10-27 16:58:27 -06001945 return 0;
1946
Dan Williams1f7df6f2015-06-09 20:13:14 -04001947 if (spa->range_index == 0) {
1948 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1949 __func__);
1950 return 0;
1951 }
1952
1953 memset(&res, 0, sizeof(res));
1954 memset(&nd_mappings, 0, sizeof(nd_mappings));
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001955 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
Dan Williams1f7df6f2015-06-09 20:13:14 -04001956 res.start = spa->address;
1957 res.end = res.start + spa->length - 1;
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001958 ndr_desc = &ndbr_desc.ndr_desc;
1959 ndr_desc->res = &res;
1960 ndr_desc->provider_data = nfit_spa;
1961 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
Toshi Kani41d7a6d2015-06-19 12:18:33 -06001962 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1963 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1964 spa->proximity_domain);
1965 else
1966 ndr_desc->numa_node = NUMA_NO_NODE;
1967
Dan Williams1f7df6f2015-06-09 20:13:14 -04001968 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1969 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1970 struct nd_mapping *nd_mapping;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001971
1972 if (memdev->range_index != spa->range_index)
1973 continue;
1974 if (count >= ND_MAX_MAPPINGS) {
1975 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1976 spa->range_index, ND_MAX_MAPPINGS);
1977 return -ENXIO;
1978 }
1979 nd_mapping = &nd_mappings[count++];
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001980 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
Dan Williams1cf03c02016-02-17 13:01:23 -08001981 memdev, nfit_spa);
Dan Williams1f7df6f2015-06-09 20:13:14 -04001982 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08001983 goto out;
Dan Williams1f7df6f2015-06-09 20:13:14 -04001984 }
1985
Ross Zwisler047fc8a2015-06-25 04:21:02 -04001986 ndr_desc->nd_mapping = nd_mappings;
1987 ndr_desc->num_mappings = count;
1988 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
Dan Williamseaf96152015-05-01 13:11:27 -04001989 if (rc)
Dan Williams1cf03c02016-02-17 13:01:23 -08001990 goto out;
Dan Williamseaf96152015-05-01 13:11:27 -04001991
Dan Williams1f7df6f2015-06-09 20:13:14 -04001992 nvdimm_bus = acpi_desc->nvdimm_bus;
1993 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07001994 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
Dan Williams48901162016-03-09 17:15:43 -08001995 if (rc) {
Toshi Kaniaf1996e2016-03-09 12:47:06 -07001996 dev_warn(acpi_desc->dev,
1997 "failed to insert pmem resource to iomem: %d\n",
1998 rc);
Dan Williams48901162016-03-09 17:15:43 -08001999 goto out;
Vishal Verma0caeef62015-12-24 19:21:43 -07002000 }
Dan Williams48901162016-03-09 17:15:43 -08002001
Dan Williams1cf03c02016-02-17 13:01:23 -08002002 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2003 ndr_desc);
2004 if (!nfit_spa->nd_region)
2005 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002006 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
Dan Williams1cf03c02016-02-17 13:01:23 -08002007 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2008 ndr_desc);
2009 if (!nfit_spa->nd_region)
2010 rc = -ENOMEM;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002011 }
Vishal Verma20985162015-10-27 16:58:27 -06002012
Dan Williams1cf03c02016-02-17 13:01:23 -08002013 out:
2014 if (rc)
2015 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2016 nfit_spa->spa->range_index);
2017 return rc;
2018}
2019
2020static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
2021 u32 max_ars)
2022{
2023 struct device *dev = acpi_desc->dev;
2024 struct nd_cmd_ars_status *ars_status;
2025
2026 if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
2027 memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
2028 return 0;
2029 }
2030
2031 if (acpi_desc->ars_status)
2032 devm_kfree(dev, acpi_desc->ars_status);
2033 acpi_desc->ars_status = NULL;
2034 ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
2035 if (!ars_status)
2036 return -ENOMEM;
2037 acpi_desc->ars_status = ars_status;
2038 acpi_desc->ars_status_size = max_ars;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002039 return 0;
2040}
2041
Dan Williams1cf03c02016-02-17 13:01:23 -08002042static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
2043 struct nfit_spa *nfit_spa)
2044{
2045 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2046 int rc;
2047
2048 if (!nfit_spa->max_ars) {
2049 struct nd_cmd_ars_cap ars_cap;
2050
2051 memset(&ars_cap, 0, sizeof(ars_cap));
2052 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
2053 if (rc < 0)
2054 return rc;
2055 nfit_spa->max_ars = ars_cap.max_ars_out;
2056 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
2057 /* check that the supported scrub types match the spa type */
2058 if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
2059 ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
2060 return -ENOTTY;
2061 else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
2062 ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
2063 return -ENOTTY;
2064 }
2065
2066 if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
2067 return -ENOMEM;
2068
2069 rc = ars_get_status(acpi_desc);
2070 if (rc < 0 && rc != -ENOSPC)
2071 return rc;
2072
2073 if (ars_status_process_records(acpi_desc->nvdimm_bus,
2074 acpi_desc->ars_status))
2075 return -ENOMEM;
2076
2077 return 0;
2078}
2079
2080static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2081 struct nfit_spa *nfit_spa)
2082{
2083 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2084 unsigned int overflow_retry = scrub_overflow_abort;
2085 u64 init_ars_start = 0, init_ars_len = 0;
2086 struct device *dev = acpi_desc->dev;
2087 unsigned int tmo = scrub_timeout;
2088 int rc;
2089
2090 if (nfit_spa->ars_done || !nfit_spa->nd_region)
2091 return;
2092
2093 rc = ars_start(acpi_desc, nfit_spa);
2094 /*
2095 * If we timed out the initial scan we'll still be busy here,
2096 * and will wait another timeout before giving up permanently.
2097 */
2098 if (rc < 0 && rc != -EBUSY)
2099 return;
2100
2101 do {
2102 u64 ars_start, ars_len;
2103
2104 if (acpi_desc->cancel)
2105 break;
2106 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2107 if (rc == -ENOTTY)
2108 break;
2109 if (rc == -EBUSY && !tmo) {
2110 dev_warn(dev, "range %d ars timeout, aborting\n",
2111 spa->range_index);
2112 break;
2113 }
2114
2115 if (rc == -EBUSY) {
2116 /*
2117 * Note, entries may be appended to the list
2118 * while the lock is dropped, but the workqueue
2119 * being active prevents entries being deleted /
2120 * freed.
2121 */
2122 mutex_unlock(&acpi_desc->init_mutex);
2123 ssleep(1);
2124 tmo--;
2125 mutex_lock(&acpi_desc->init_mutex);
2126 continue;
2127 }
2128
2129 /* we got some results, but there are more pending... */
2130 if (rc == -ENOSPC && overflow_retry--) {
2131 if (!init_ars_len) {
2132 init_ars_len = acpi_desc->ars_status->length;
2133 init_ars_start = acpi_desc->ars_status->address;
2134 }
2135 rc = ars_continue(acpi_desc);
2136 }
2137
2138 if (rc < 0) {
2139 dev_warn(dev, "range %d ars continuation failed\n",
2140 spa->range_index);
2141 break;
2142 }
2143
2144 if (init_ars_len) {
2145 ars_start = init_ars_start;
2146 ars_len = init_ars_len;
2147 } else {
2148 ars_start = acpi_desc->ars_status->address;
2149 ars_len = acpi_desc->ars_status->length;
2150 }
2151 dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
2152 spa->range_index, ars_start, ars_len);
2153 /* notify the region about new poison entries */
2154 nvdimm_region_notify(nfit_spa->nd_region,
2155 NVDIMM_REVALIDATE_POISON);
2156 break;
2157 } while (1);
2158}
2159
2160static void acpi_nfit_scrub(struct work_struct *work)
2161{
2162 struct device *dev;
2163 u64 init_scrub_length = 0;
2164 struct nfit_spa *nfit_spa;
2165 u64 init_scrub_address = 0;
2166 bool init_ars_done = false;
2167 struct acpi_nfit_desc *acpi_desc;
2168 unsigned int tmo = scrub_timeout;
2169 unsigned int overflow_retry = scrub_overflow_abort;
2170
2171 acpi_desc = container_of(work, typeof(*acpi_desc), work);
2172 dev = acpi_desc->dev;
2173
2174 /*
2175 * We scrub in 2 phases. The first phase waits for any platform
2176 * firmware initiated scrubs to complete and then we go search for the
2177 * affected spa regions to mark them scanned. In the second phase we
2178 * initiate a directed scrub for every range that was not scrubbed in
2179 * phase 1.
2180 */
2181
2182 /* process platform firmware initiated scrubs */
2183 retry:
2184 mutex_lock(&acpi_desc->init_mutex);
2185 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2186 struct nd_cmd_ars_status *ars_status;
2187 struct acpi_nfit_system_address *spa;
2188 u64 ars_start, ars_len;
2189 int rc;
2190
2191 if (acpi_desc->cancel)
2192 break;
2193
2194 if (nfit_spa->nd_region)
2195 continue;
2196
2197 if (init_ars_done) {
2198 /*
2199 * No need to re-query, we're now just
2200 * reconciling all the ranges covered by the
2201 * initial scrub
2202 */
2203 rc = 0;
2204 } else
2205 rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
2206
2207 if (rc == -ENOTTY) {
2208 /* no ars capability, just register spa and move on */
2209 acpi_nfit_register_region(acpi_desc, nfit_spa);
2210 continue;
2211 }
2212
2213 if (rc == -EBUSY && !tmo) {
2214 /* fallthrough to directed scrub in phase 2 */
2215 dev_warn(dev, "timeout awaiting ars results, continuing...\n");
2216 break;
2217 } else if (rc == -EBUSY) {
2218 mutex_unlock(&acpi_desc->init_mutex);
2219 ssleep(1);
2220 tmo--;
2221 goto retry;
2222 }
2223
2224 /* we got some results, but there are more pending... */
2225 if (rc == -ENOSPC && overflow_retry--) {
2226 ars_status = acpi_desc->ars_status;
2227 /*
2228 * Record the original scrub range, so that we
2229 * can recall all the ranges impacted by the
2230 * initial scrub.
2231 */
2232 if (!init_scrub_length) {
2233 init_scrub_length = ars_status->length;
2234 init_scrub_address = ars_status->address;
2235 }
2236 rc = ars_continue(acpi_desc);
2237 if (rc == 0) {
2238 mutex_unlock(&acpi_desc->init_mutex);
2239 goto retry;
2240 }
2241 }
2242
2243 if (rc < 0) {
2244 /*
2245 * Initial scrub failed, we'll give it one more
2246 * try below...
2247 */
2248 break;
2249 }
2250
2251 /* We got some final results, record completed ranges */
2252 ars_status = acpi_desc->ars_status;
2253 if (init_scrub_length) {
2254 ars_start = init_scrub_address;
2255 ars_len = ars_start + init_scrub_length;
2256 } else {
2257 ars_start = ars_status->address;
2258 ars_len = ars_status->length;
2259 }
2260 spa = nfit_spa->spa;
2261
2262 if (!init_ars_done) {
2263 init_ars_done = true;
2264 dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
2265 ars_start, ars_len);
2266 }
2267 if (ars_start <= spa->address && ars_start + ars_len
2268 >= spa->address + spa->length)
2269 acpi_nfit_register_region(acpi_desc, nfit_spa);
2270 }
2271
2272 /*
2273 * For all the ranges not covered by an initial scrub we still
2274 * want to see if there are errors, but it's ok to discover them
2275 * asynchronously.
2276 */
2277 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2278 /*
2279 * Flag all the ranges that still need scrubbing, but
2280 * register them now to make data available.
2281 */
2282 if (nfit_spa->nd_region)
2283 nfit_spa->ars_done = 1;
2284 else
2285 acpi_nfit_register_region(acpi_desc, nfit_spa);
2286 }
2287
2288 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2289 acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2290 mutex_unlock(&acpi_desc->init_mutex);
2291}
2292
Dan Williams1f7df6f2015-06-09 20:13:14 -04002293static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
2294{
2295 struct nfit_spa *nfit_spa;
Dan Williams1cf03c02016-02-17 13:01:23 -08002296 int rc;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002297
Dan Williams1cf03c02016-02-17 13:01:23 -08002298 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
2299 if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
2300 /* BLK regions don't need to wait for ars results */
2301 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
2302 if (rc)
2303 return rc;
2304 }
Dan Williams1f7df6f2015-06-09 20:13:14 -04002305
Dan Williams1cf03c02016-02-17 13:01:23 -08002306 queue_work(nfit_wq, &acpi_desc->work);
Dan Williams1f7df6f2015-06-09 20:13:14 -04002307 return 0;
2308}
2309
Vishal Verma20985162015-10-27 16:58:27 -06002310static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2311 struct nfit_table_prev *prev)
2312{
2313 struct device *dev = acpi_desc->dev;
2314
2315 if (!list_empty(&prev->spas) ||
2316 !list_empty(&prev->memdevs) ||
2317 !list_empty(&prev->dcrs) ||
2318 !list_empty(&prev->bdws) ||
2319 !list_empty(&prev->idts) ||
2320 !list_empty(&prev->flushes)) {
2321 dev_err(dev, "new nfit deletes entries (unsupported)\n");
2322 return -ENXIO;
2323 }
2324 return 0;
2325}
2326
Dan Williams6bc75612015-06-17 17:23:32 -04002327int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
Dan Williamsb94d5232015-05-19 22:54:31 -04002328{
2329 struct device *dev = acpi_desc->dev;
Vishal Verma20985162015-10-27 16:58:27 -06002330 struct nfit_table_prev prev;
Dan Williamsb94d5232015-05-19 22:54:31 -04002331 const void *end;
2332 u8 *data;
Dan Williams1f7df6f2015-06-09 20:13:14 -04002333 int rc;
Dan Williamsb94d5232015-05-19 22:54:31 -04002334
Vishal Verma20985162015-10-27 16:58:27 -06002335 mutex_lock(&acpi_desc->init_mutex);
2336
2337 INIT_LIST_HEAD(&prev.spas);
2338 INIT_LIST_HEAD(&prev.memdevs);
2339 INIT_LIST_HEAD(&prev.dcrs);
2340 INIT_LIST_HEAD(&prev.bdws);
2341 INIT_LIST_HEAD(&prev.idts);
2342 INIT_LIST_HEAD(&prev.flushes);
2343
2344 list_cut_position(&prev.spas, &acpi_desc->spas,
2345 acpi_desc->spas.prev);
2346 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
2347 acpi_desc->memdevs.prev);
2348 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
2349 acpi_desc->dcrs.prev);
2350 list_cut_position(&prev.bdws, &acpi_desc->bdws,
2351 acpi_desc->bdws.prev);
2352 list_cut_position(&prev.idts, &acpi_desc->idts,
2353 acpi_desc->idts.prev);
2354 list_cut_position(&prev.flushes, &acpi_desc->flushes,
2355 acpi_desc->flushes.prev);
2356
2357 data = (u8 *) acpi_desc->nfit;
2358 end = data + sz;
Vishal Verma20985162015-10-27 16:58:27 -06002359 while (!IS_ERR_OR_NULL(data))
2360 data = add_table(acpi_desc, &prev, data, end);
2361
2362 if (IS_ERR(data)) {
2363 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
2364 PTR_ERR(data));
2365 rc = PTR_ERR(data);
2366 goto out_unlock;
2367 }
2368
2369 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
2370 if (rc)
2371 goto out_unlock;
2372
2373 if (nfit_mem_init(acpi_desc) != 0) {
2374 rc = -ENOMEM;
2375 goto out_unlock;
2376 }
2377
2378 acpi_nfit_init_dsms(acpi_desc);
2379
2380 rc = acpi_nfit_register_dimms(acpi_desc);
2381 if (rc)
2382 goto out_unlock;
2383
2384 rc = acpi_nfit_register_regions(acpi_desc);
2385
2386 out_unlock:
2387 mutex_unlock(&acpi_desc->init_mutex);
2388 return rc;
2389}
2390EXPORT_SYMBOL_GPL(acpi_nfit_init);
2391
Dan Williams7ae0fa432016-02-19 12:16:34 -08002392struct acpi_nfit_flush_work {
2393 struct work_struct work;
2394 struct completion cmp;
2395};
2396
2397static void flush_probe(struct work_struct *work)
2398{
2399 struct acpi_nfit_flush_work *flush;
2400
2401 flush = container_of(work, typeof(*flush), work);
2402 complete(&flush->cmp);
2403}
2404
2405static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2406{
2407 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2408 struct device *dev = acpi_desc->dev;
2409 struct acpi_nfit_flush_work flush;
2410
2411 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2412 device_lock(dev);
2413 device_unlock(dev);
2414
2415 /*
2416 * Scrub work could take 10s of seconds, userspace may give up so we
2417 * need to be interruptible while waiting.
2418 */
2419 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2420 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2421 queue_work(nfit_wq, &flush.work);
2422 return wait_for_completion_interruptible(&flush.cmp);
2423}
2424
Dan Williams87bf5722016-02-22 21:50:31 -08002425static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2426 struct nvdimm *nvdimm, unsigned int cmd)
2427{
2428 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2429
2430 if (nvdimm)
2431 return 0;
2432 if (cmd != ND_CMD_ARS_START)
2433 return 0;
2434
2435 /*
2436 * The kernel and userspace may race to initiate a scrub, but
2437 * the scrub thread is prepared to lose that initial race. It
2438 * just needs guarantees that any ars it initiates are not
2439 * interrupted by any intervening start reqeusts from userspace.
2440 */
2441 if (work_busy(&acpi_desc->work))
2442 return -EBUSY;
2443
2444 return 0;
2445}
2446
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002447void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
Vishal Verma20985162015-10-27 16:58:27 -06002448{
2449 struct nvdimm_bus_descriptor *nd_desc;
Vishal Verma20985162015-10-27 16:58:27 -06002450
2451 dev_set_drvdata(dev, acpi_desc);
2452 acpi_desc->dev = dev;
2453 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2454 nd_desc = &acpi_desc->nd_desc;
2455 nd_desc->provider_name = "ACPI.NFIT";
2456 nd_desc->ndctl = acpi_nfit_ctl;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002457 nd_desc->flush_probe = acpi_nfit_flush_probe;
Dan Williams87bf5722016-02-22 21:50:31 -08002458 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
Vishal Verma20985162015-10-27 16:58:27 -06002459 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2460
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002461 INIT_LIST_HEAD(&acpi_desc->spa_maps);
Dan Williamsb94d5232015-05-19 22:54:31 -04002462 INIT_LIST_HEAD(&acpi_desc->spas);
2463 INIT_LIST_HEAD(&acpi_desc->dcrs);
2464 INIT_LIST_HEAD(&acpi_desc->bdws);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002465 INIT_LIST_HEAD(&acpi_desc->idts);
Ross Zwislerc2ad2952015-07-10 11:06:13 -06002466 INIT_LIST_HEAD(&acpi_desc->flushes);
Dan Williamsb94d5232015-05-19 22:54:31 -04002467 INIT_LIST_HEAD(&acpi_desc->memdevs);
2468 INIT_LIST_HEAD(&acpi_desc->dimms);
Ross Zwisler047fc8a2015-06-25 04:21:02 -04002469 mutex_init(&acpi_desc->spa_map_mutex);
Vishal Verma20985162015-10-27 16:58:27 -06002470 mutex_init(&acpi_desc->init_mutex);
Dan Williams1cf03c02016-02-17 13:01:23 -08002471 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
Dan Williamsb94d5232015-05-19 22:54:31 -04002472}
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002473EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
Dan Williamsb94d5232015-05-19 22:54:31 -04002474
2475static int acpi_nfit_add(struct acpi_device *adev)
2476{
Vishal Verma20985162015-10-27 16:58:27 -06002477 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Dan Williamsb94d5232015-05-19 22:54:31 -04002478 struct acpi_nfit_desc *acpi_desc;
2479 struct device *dev = &adev->dev;
2480 struct acpi_table_header *tbl;
2481 acpi_status status = AE_OK;
2482 acpi_size sz;
2483 int rc;
2484
Lee, Chun-Yi82595422016-01-21 20:32:10 +08002485 status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
Dan Williamsb94d5232015-05-19 22:54:31 -04002486 if (ACPI_FAILURE(status)) {
Vishal Verma20985162015-10-27 16:58:27 -06002487 /* This is ok, we could have an nvdimm hotplugged later */
2488 dev_dbg(dev, "failed to find NFIT at startup\n");
2489 return 0;
Dan Williamsb94d5232015-05-19 22:54:31 -04002490 }
2491
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002492 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2493 if (!acpi_desc)
2494 return -ENOMEM;
2495 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2496 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2497 if (!acpi_desc->nvdimm_bus)
2498 return -ENOMEM;
Dan Williamsb94d5232015-05-19 22:54:31 -04002499
Linda Knippers6b577c92015-11-20 19:05:49 -05002500 /*
2501 * Save the acpi header for later and then skip it,
2502 * making nfit point to the first nfit table header.
2503 */
2504 acpi_desc->acpi_header = *tbl;
2505 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
2506 sz -= sizeof(struct acpi_table_nfit);
Dan Williamsb94d5232015-05-19 22:54:31 -04002507
Vishal Verma20985162015-10-27 16:58:27 -06002508 /* Evaluate _FIT and override with that if present */
2509 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2510 if (ACPI_SUCCESS(status) && buf.length > 0) {
Linda Knippers6b577c92015-11-20 19:05:49 -05002511 union acpi_object *obj;
2512 /*
2513 * Adjust for the acpi_object header of the _FIT
2514 */
2515 obj = buf.pointer;
2516 if (obj->type == ACPI_TYPE_BUFFER) {
2517 acpi_desc->nfit =
2518 (struct acpi_nfit_header *)obj->buffer.pointer;
2519 sz = obj->buffer.length;
2520 } else
2521 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
2522 __func__, (int) obj->type);
Vishal Verma20985162015-10-27 16:58:27 -06002523 }
Dan Williamsb94d5232015-05-19 22:54:31 -04002524
2525 rc = acpi_nfit_init(acpi_desc, sz);
2526 if (rc) {
2527 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2528 return rc;
2529 }
2530 return 0;
2531}
2532
2533static int acpi_nfit_remove(struct acpi_device *adev)
2534{
2535 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2536
Dan Williams7ae0fa432016-02-19 12:16:34 -08002537 acpi_desc->cancel = 1;
2538 flush_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002539 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
2540 return 0;
2541}
2542
Vishal Verma20985162015-10-27 16:58:27 -06002543static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
2544{
2545 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
2546 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
Linda Knippers6b577c92015-11-20 19:05:49 -05002547 struct acpi_nfit_header *nfit_saved;
2548 union acpi_object *obj;
Vishal Verma20985162015-10-27 16:58:27 -06002549 struct device *dev = &adev->dev;
2550 acpi_status status;
2551 int ret;
2552
2553 dev_dbg(dev, "%s: event: %d\n", __func__, event);
2554
2555 device_lock(dev);
2556 if (!dev->driver) {
2557 /* dev->driver may be null if we're being removed */
2558 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
Alexey Khoroshilovd91e8922015-12-11 23:24:10 +03002559 goto out_unlock;
Vishal Verma20985162015-10-27 16:58:27 -06002560 }
2561
2562 if (!acpi_desc) {
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002563 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
2564 if (!acpi_desc)
Vishal Verma20985162015-10-27 16:58:27 -06002565 goto out_unlock;
Dan Williamsa61fe6f2016-02-19 12:29:32 -08002566 acpi_nfit_desc_init(acpi_desc, &adev->dev);
2567 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
2568 if (!acpi_desc->nvdimm_bus)
2569 goto out_unlock;
Dan Williams7ae0fa432016-02-19 12:16:34 -08002570 } else {
2571 /*
2572 * Finish previous registration before considering new
2573 * regions.
2574 */
2575 flush_workqueue(nfit_wq);
Vishal Verma20985162015-10-27 16:58:27 -06002576 }
2577
2578 /* Evaluate _FIT */
2579 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
2580 if (ACPI_FAILURE(status)) {
2581 dev_err(dev, "failed to evaluate _FIT\n");
2582 goto out_unlock;
2583 }
2584
2585 nfit_saved = acpi_desc->nfit;
Linda Knippers6b577c92015-11-20 19:05:49 -05002586 obj = buf.pointer;
2587 if (obj->type == ACPI_TYPE_BUFFER) {
2588 acpi_desc->nfit =
2589 (struct acpi_nfit_header *)obj->buffer.pointer;
2590 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
2591 if (ret) {
2592 /* Merge failed, restore old nfit, and exit */
2593 acpi_desc->nfit = nfit_saved;
2594 dev_err(dev, "failed to merge updated NFIT\n");
2595 }
2596 } else {
2597 /* Bad _FIT, restore old nfit */
2598 dev_err(dev, "Invalid _FIT\n");
Vishal Verma20985162015-10-27 16:58:27 -06002599 }
2600 kfree(buf.pointer);
2601
2602 out_unlock:
2603 device_unlock(dev);
2604}
2605
Dan Williamsb94d5232015-05-19 22:54:31 -04002606static const struct acpi_device_id acpi_nfit_ids[] = {
2607 { "ACPI0012", 0 },
2608 { "", 0 },
2609};
2610MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
2611
2612static struct acpi_driver acpi_nfit_driver = {
2613 .name = KBUILD_MODNAME,
2614 .ids = acpi_nfit_ids,
2615 .ops = {
2616 .add = acpi_nfit_add,
2617 .remove = acpi_nfit_remove,
Vishal Verma20985162015-10-27 16:58:27 -06002618 .notify = acpi_nfit_notify,
Dan Williamsb94d5232015-05-19 22:54:31 -04002619 },
2620};
2621
2622static __init int nfit_init(void)
2623{
2624 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
2625 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
2626 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
2627 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
2628 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
2629 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
2630 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
2631
2632 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
2633 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
2634 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
2635 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
2636 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
2637 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
2638 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
2639 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
2640 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
2641 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
Dan Williams31eca762016-04-28 16:23:43 -07002642 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
2643 acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
Dan Williamsb94d5232015-05-19 22:54:31 -04002644
Dan Williams7ae0fa432016-02-19 12:16:34 -08002645 nfit_wq = create_singlethread_workqueue("nfit");
2646 if (!nfit_wq)
2647 return -ENOMEM;
2648
Dan Williamsb94d5232015-05-19 22:54:31 -04002649 return acpi_bus_register_driver(&acpi_nfit_driver);
2650}
2651
2652static __exit void nfit_exit(void)
2653{
2654 acpi_bus_unregister_driver(&acpi_nfit_driver);
Dan Williams7ae0fa432016-02-19 12:16:34 -08002655 destroy_workqueue(nfit_wq);
Dan Williamsb94d5232015-05-19 22:54:31 -04002656}
2657
2658module_init(nfit_init);
2659module_exit(nfit_exit);
2660MODULE_LICENSE("GPL v2");
2661MODULE_AUTHOR("Intel Corporation");