blob: 9112a6210a4b55e5aca75120d76f393c730ebef6 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
Dan Williams62232e452015-06-08 14:27:06 -040016#include <linux/ndctl.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040017#include <linux/list.h>
18#include <linux/acpi.h>
19#include "nfit.h"
20
21static u8 nfit_uuid[NFIT_UUID_MAX][16];
22
23static const u8 *to_nfit_uuid(enum nfit_uuids id)
24{
25 return nfit_uuid[id];
26}
27
Dan Williams62232e452015-06-08 14:27:06 -040028static struct acpi_nfit_desc *to_acpi_nfit_desc(
29 struct nvdimm_bus_descriptor *nd_desc)
30{
31 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
32}
33
34static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
35{
36 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
37
38 /*
39 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
40 * acpi_device.
41 */
42 if (!nd_desc->provider_name
43 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
44 return NULL;
45
46 return to_acpi_device(acpi_desc->dev);
47}
48
Dan Williamsb94d5232015-05-19 22:54:31 -040049static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
50 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
51 unsigned int buf_len)
52{
Dan Williams62232e452015-06-08 14:27:06 -040053 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
54 const struct nd_cmd_desc *desc = NULL;
55 union acpi_object in_obj, in_buf, *out_obj;
56 struct device *dev = acpi_desc->dev;
57 const char *cmd_name, *dimm_name;
58 unsigned long dsm_mask;
59 acpi_handle handle;
60 const u8 *uuid;
61 u32 offset;
62 int rc, i;
63
64 if (nvdimm) {
65 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
66 struct acpi_device *adev = nfit_mem->adev;
67
68 if (!adev)
69 return -ENOTTY;
70 dimm_name = dev_name(&adev->dev);
71 cmd_name = nvdimm_cmd_name(cmd);
72 dsm_mask = nfit_mem->dsm_mask;
73 desc = nd_cmd_dimm_desc(cmd);
74 uuid = to_nfit_uuid(NFIT_DEV_DIMM);
75 handle = adev->handle;
76 } else {
77 struct acpi_device *adev = to_acpi_dev(acpi_desc);
78
79 cmd_name = nvdimm_bus_cmd_name(cmd);
80 dsm_mask = nd_desc->dsm_mask;
81 desc = nd_cmd_bus_desc(cmd);
82 uuid = to_nfit_uuid(NFIT_DEV_BUS);
83 handle = adev->handle;
84 dimm_name = "bus";
85 }
86
87 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
88 return -ENOTTY;
89
90 if (!test_bit(cmd, &dsm_mask))
91 return -ENOTTY;
92
93 in_obj.type = ACPI_TYPE_PACKAGE;
94 in_obj.package.count = 1;
95 in_obj.package.elements = &in_buf;
96 in_buf.type = ACPI_TYPE_BUFFER;
97 in_buf.buffer.pointer = buf;
98 in_buf.buffer.length = 0;
99
100 /* libnvdimm has already validated the input envelope */
101 for (i = 0; i < desc->in_num; i++)
102 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
103 i, buf);
104
105 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
106 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
107 dimm_name, cmd_name, in_buf.buffer.length);
108 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
109 4, in_buf.buffer.pointer, min_t(u32, 128,
110 in_buf.buffer.length), true);
111 }
112
113 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
114 if (!out_obj) {
115 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
116 cmd_name);
117 return -EINVAL;
118 }
119
120 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
121 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
122 __func__, dimm_name, cmd_name, out_obj->type);
123 rc = -EINVAL;
124 goto out;
125 }
126
127 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
128 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
129 dimm_name, cmd_name, out_obj->buffer.length);
130 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
131 4, out_obj->buffer.pointer, min_t(u32, 128,
132 out_obj->buffer.length), true);
133 }
134
135 for (i = 0, offset = 0; i < desc->out_num; i++) {
136 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
137 (u32 *) out_obj->buffer.pointer);
138
139 if (offset + out_size > out_obj->buffer.length) {
140 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
141 __func__, dimm_name, cmd_name, i);
142 break;
143 }
144
145 if (in_buf.buffer.length + offset + out_size > buf_len) {
146 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
147 __func__, dimm_name, cmd_name, i);
148 rc = -ENXIO;
149 goto out;
150 }
151 memcpy(buf + in_buf.buffer.length + offset,
152 out_obj->buffer.pointer + offset, out_size);
153 offset += out_size;
154 }
155 if (offset + in_buf.buffer.length < buf_len) {
156 if (i >= 1) {
157 /*
158 * status valid, return the number of bytes left
159 * unfilled in the output buffer
160 */
161 rc = buf_len - offset - in_buf.buffer.length;
162 } else {
163 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
164 __func__, dimm_name, cmd_name, buf_len,
165 offset);
166 rc = -ENXIO;
167 }
168 } else
169 rc = 0;
170
171 out:
172 ACPI_FREE(out_obj);
173
174 return rc;
Dan Williamsb94d5232015-05-19 22:54:31 -0400175}
176
177static const char *spa_type_name(u16 type)
178{
179 static const char *to_name[] = {
180 [NFIT_SPA_VOLATILE] = "volatile",
181 [NFIT_SPA_PM] = "pmem",
182 [NFIT_SPA_DCR] = "dimm-control-region",
183 [NFIT_SPA_BDW] = "block-data-window",
184 [NFIT_SPA_VDISK] = "volatile-disk",
185 [NFIT_SPA_VCD] = "volatile-cd",
186 [NFIT_SPA_PDISK] = "persistent-disk",
187 [NFIT_SPA_PCD] = "persistent-cd",
188
189 };
190
191 if (type > NFIT_SPA_PCD)
192 return "unknown";
193
194 return to_name[type];
195}
196
197static int nfit_spa_type(struct acpi_nfit_system_address *spa)
198{
199 int i;
200
201 for (i = 0; i < NFIT_UUID_MAX; i++)
202 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
203 return i;
204 return -1;
205}
206
207static bool add_spa(struct acpi_nfit_desc *acpi_desc,
208 struct acpi_nfit_system_address *spa)
209{
210 struct device *dev = acpi_desc->dev;
211 struct nfit_spa *nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa),
212 GFP_KERNEL);
213
214 if (!nfit_spa)
215 return false;
216 INIT_LIST_HEAD(&nfit_spa->list);
217 nfit_spa->spa = spa;
218 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
219 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
220 spa->range_index,
221 spa_type_name(nfit_spa_type(spa)));
222 return true;
223}
224
225static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
226 struct acpi_nfit_memory_map *memdev)
227{
228 struct device *dev = acpi_desc->dev;
229 struct nfit_memdev *nfit_memdev = devm_kzalloc(dev,
230 sizeof(*nfit_memdev), GFP_KERNEL);
231
232 if (!nfit_memdev)
233 return false;
234 INIT_LIST_HEAD(&nfit_memdev->list);
235 nfit_memdev->memdev = memdev;
236 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
237 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
238 __func__, memdev->device_handle, memdev->range_index,
239 memdev->region_index);
240 return true;
241}
242
243static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
244 struct acpi_nfit_control_region *dcr)
245{
246 struct device *dev = acpi_desc->dev;
247 struct nfit_dcr *nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr),
248 GFP_KERNEL);
249
250 if (!nfit_dcr)
251 return false;
252 INIT_LIST_HEAD(&nfit_dcr->list);
253 nfit_dcr->dcr = dcr;
254 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
255 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
256 dcr->region_index, dcr->windows);
257 return true;
258}
259
260static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
261 struct acpi_nfit_data_region *bdw)
262{
263 struct device *dev = acpi_desc->dev;
264 struct nfit_bdw *nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw),
265 GFP_KERNEL);
266
267 if (!nfit_bdw)
268 return false;
269 INIT_LIST_HEAD(&nfit_bdw->list);
270 nfit_bdw->bdw = bdw;
271 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
272 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
273 bdw->region_index, bdw->windows);
274 return true;
275}
276
277static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
278 const void *end)
279{
280 struct device *dev = acpi_desc->dev;
281 struct acpi_nfit_header *hdr;
282 void *err = ERR_PTR(-ENOMEM);
283
284 if (table >= end)
285 return NULL;
286
287 hdr = table;
288 switch (hdr->type) {
289 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
290 if (!add_spa(acpi_desc, table))
291 return err;
292 break;
293 case ACPI_NFIT_TYPE_MEMORY_MAP:
294 if (!add_memdev(acpi_desc, table))
295 return err;
296 break;
297 case ACPI_NFIT_TYPE_CONTROL_REGION:
298 if (!add_dcr(acpi_desc, table))
299 return err;
300 break;
301 case ACPI_NFIT_TYPE_DATA_REGION:
302 if (!add_bdw(acpi_desc, table))
303 return err;
304 break;
305 /* TODO */
306 case ACPI_NFIT_TYPE_INTERLEAVE:
307 dev_dbg(dev, "%s: idt\n", __func__);
308 break;
309 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
310 dev_dbg(dev, "%s: flush\n", __func__);
311 break;
312 case ACPI_NFIT_TYPE_SMBIOS:
313 dev_dbg(dev, "%s: smbios\n", __func__);
314 break;
315 default:
316 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
317 break;
318 }
319
320 return table + hdr->length;
321}
322
323static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
324 struct nfit_mem *nfit_mem)
325{
326 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
327 u16 dcr = nfit_mem->dcr->region_index;
328 struct nfit_spa *nfit_spa;
329
330 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
331 u16 range_index = nfit_spa->spa->range_index;
332 int type = nfit_spa_type(nfit_spa->spa);
333 struct nfit_memdev *nfit_memdev;
334
335 if (type != NFIT_SPA_BDW)
336 continue;
337
338 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
339 if (nfit_memdev->memdev->range_index != range_index)
340 continue;
341 if (nfit_memdev->memdev->device_handle != device_handle)
342 continue;
343 if (nfit_memdev->memdev->region_index != dcr)
344 continue;
345
346 nfit_mem->spa_bdw = nfit_spa->spa;
347 return;
348 }
349 }
350
351 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
352 nfit_mem->spa_dcr->range_index);
353 nfit_mem->bdw = NULL;
354}
355
356static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
357 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
358{
359 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
360 struct nfit_dcr *nfit_dcr;
361 struct nfit_bdw *nfit_bdw;
362
363 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
364 if (nfit_dcr->dcr->region_index != dcr)
365 continue;
366 nfit_mem->dcr = nfit_dcr->dcr;
367 break;
368 }
369
370 if (!nfit_mem->dcr) {
371 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
372 spa->range_index, __to_nfit_memdev(nfit_mem)
373 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
374 return -ENODEV;
375 }
376
377 /*
378 * We've found enough to create an nvdimm, optionally
379 * find an associated BDW
380 */
381 list_add(&nfit_mem->list, &acpi_desc->dimms);
382
383 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
384 if (nfit_bdw->bdw->region_index != dcr)
385 continue;
386 nfit_mem->bdw = nfit_bdw->bdw;
387 break;
388 }
389
390 if (!nfit_mem->bdw)
391 return 0;
392
393 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
394 return 0;
395}
396
397static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
398 struct acpi_nfit_system_address *spa)
399{
400 struct nfit_mem *nfit_mem, *found;
401 struct nfit_memdev *nfit_memdev;
402 int type = nfit_spa_type(spa);
403 u16 dcr;
404
405 switch (type) {
406 case NFIT_SPA_DCR:
407 case NFIT_SPA_PM:
408 break;
409 default:
410 return 0;
411 }
412
413 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
414 int rc;
415
416 if (nfit_memdev->memdev->range_index != spa->range_index)
417 continue;
418 found = NULL;
419 dcr = nfit_memdev->memdev->region_index;
420 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
421 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
422 found = nfit_mem;
423 break;
424 }
425
426 if (found)
427 nfit_mem = found;
428 else {
429 nfit_mem = devm_kzalloc(acpi_desc->dev,
430 sizeof(*nfit_mem), GFP_KERNEL);
431 if (!nfit_mem)
432 return -ENOMEM;
433 INIT_LIST_HEAD(&nfit_mem->list);
434 }
435
436 if (type == NFIT_SPA_DCR) {
437 /* multiple dimms may share a SPA when interleaved */
438 nfit_mem->spa_dcr = spa;
439 nfit_mem->memdev_dcr = nfit_memdev->memdev;
440 } else {
441 /*
442 * A single dimm may belong to multiple SPA-PM
443 * ranges, record at least one in addition to
444 * any SPA-DCR range.
445 */
446 nfit_mem->memdev_pmem = nfit_memdev->memdev;
447 }
448
449 if (found)
450 continue;
451
452 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
453 if (rc)
454 return rc;
455 }
456
457 return 0;
458}
459
460static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
461{
462 struct nfit_mem *a = container_of(_a, typeof(*a), list);
463 struct nfit_mem *b = container_of(_b, typeof(*b), list);
464 u32 handleA, handleB;
465
466 handleA = __to_nfit_memdev(a)->device_handle;
467 handleB = __to_nfit_memdev(b)->device_handle;
468 if (handleA < handleB)
469 return -1;
470 else if (handleA > handleB)
471 return 1;
472 return 0;
473}
474
475static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
476{
477 struct nfit_spa *nfit_spa;
478
479 /*
480 * For each SPA-DCR or SPA-PMEM address range find its
481 * corresponding MEMDEV(s). From each MEMDEV find the
482 * corresponding DCR. Then, if we're operating on a SPA-DCR,
483 * try to find a SPA-BDW and a corresponding BDW that references
484 * the DCR. Throw it all into an nfit_mem object. Note, that
485 * BDWs are optional.
486 */
487 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
488 int rc;
489
490 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
491 if (rc)
492 return rc;
493 }
494
495 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
496
497 return 0;
498}
499
Dan Williams45def222015-04-26 19:26:48 -0400500static ssize_t revision_show(struct device *dev,
501 struct device_attribute *attr, char *buf)
502{
503 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
504 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
505 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
506
507 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
508}
509static DEVICE_ATTR_RO(revision);
510
511static struct attribute *acpi_nfit_attributes[] = {
512 &dev_attr_revision.attr,
513 NULL,
514};
515
516static struct attribute_group acpi_nfit_attribute_group = {
517 .name = "nfit",
518 .attrs = acpi_nfit_attributes,
519};
520
521static const struct attribute_group *acpi_nfit_attribute_groups[] = {
522 &nvdimm_bus_attribute_group,
523 &acpi_nfit_attribute_group,
524 NULL,
525};
526
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400527static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
528{
529 struct nvdimm *nvdimm = to_nvdimm(dev);
530 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
531
532 return __to_nfit_memdev(nfit_mem);
533}
534
535static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
536{
537 struct nvdimm *nvdimm = to_nvdimm(dev);
538 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
539
540 return nfit_mem->dcr;
541}
542
543static ssize_t handle_show(struct device *dev,
544 struct device_attribute *attr, char *buf)
545{
546 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
547
548 return sprintf(buf, "%#x\n", memdev->device_handle);
549}
550static DEVICE_ATTR_RO(handle);
551
552static ssize_t phys_id_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
554{
555 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
556
557 return sprintf(buf, "%#x\n", memdev->physical_id);
558}
559static DEVICE_ATTR_RO(phys_id);
560
561static ssize_t vendor_show(struct device *dev,
562 struct device_attribute *attr, char *buf)
563{
564 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
565
566 return sprintf(buf, "%#x\n", dcr->vendor_id);
567}
568static DEVICE_ATTR_RO(vendor);
569
570static ssize_t rev_id_show(struct device *dev,
571 struct device_attribute *attr, char *buf)
572{
573 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
574
575 return sprintf(buf, "%#x\n", dcr->revision_id);
576}
577static DEVICE_ATTR_RO(rev_id);
578
579static ssize_t device_show(struct device *dev,
580 struct device_attribute *attr, char *buf)
581{
582 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
583
584 return sprintf(buf, "%#x\n", dcr->device_id);
585}
586static DEVICE_ATTR_RO(device);
587
588static ssize_t format_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590{
591 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
592
593 return sprintf(buf, "%#x\n", dcr->code);
594}
595static DEVICE_ATTR_RO(format);
596
597static ssize_t serial_show(struct device *dev,
598 struct device_attribute *attr, char *buf)
599{
600 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
601
602 return sprintf(buf, "%#x\n", dcr->serial_number);
603}
604static DEVICE_ATTR_RO(serial);
605
606static struct attribute *acpi_nfit_dimm_attributes[] = {
607 &dev_attr_handle.attr,
608 &dev_attr_phys_id.attr,
609 &dev_attr_vendor.attr,
610 &dev_attr_device.attr,
611 &dev_attr_format.attr,
612 &dev_attr_serial.attr,
613 &dev_attr_rev_id.attr,
614 NULL,
615};
616
617static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
618 struct attribute *a, int n)
619{
620 struct device *dev = container_of(kobj, struct device, kobj);
621
622 if (to_nfit_dcr(dev))
623 return a->mode;
624 else
625 return 0;
626}
627
628static struct attribute_group acpi_nfit_dimm_attribute_group = {
629 .name = "nfit",
630 .attrs = acpi_nfit_dimm_attributes,
631 .is_visible = acpi_nfit_dimm_attr_visible,
632};
633
634static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
Dan Williams62232e452015-06-08 14:27:06 -0400635 &nvdimm_attribute_group,
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400636 &acpi_nfit_dimm_attribute_group,
637 NULL,
638};
639
640static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
641 u32 device_handle)
642{
643 struct nfit_mem *nfit_mem;
644
645 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
646 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
647 return nfit_mem->nvdimm;
648
649 return NULL;
650}
651
Dan Williams62232e452015-06-08 14:27:06 -0400652static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
653 struct nfit_mem *nfit_mem, u32 device_handle)
654{
655 struct acpi_device *adev, *adev_dimm;
656 struct device *dev = acpi_desc->dev;
657 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
658 unsigned long long sta;
659 int i, rc = -ENODEV;
660 acpi_status status;
661
662 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
663 adev = to_acpi_dev(acpi_desc);
664 if (!adev)
665 return 0;
666
667 adev_dimm = acpi_find_child_device(adev, device_handle, false);
668 nfit_mem->adev = adev_dimm;
669 if (!adev_dimm) {
670 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
671 device_handle);
672 return -ENODEV;
673 }
674
675 status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta);
676 if (status == AE_NOT_FOUND) {
677 dev_dbg(dev, "%s missing _STA, assuming enabled...\n",
678 dev_name(&adev_dimm->dev));
679 rc = 0;
680 } else if (ACPI_FAILURE(status))
681 dev_err(dev, "%s failed to retrieve_STA, disabling...\n",
682 dev_name(&adev_dimm->dev));
683 else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0)
684 dev_info(dev, "%s disabled by firmware\n",
685 dev_name(&adev_dimm->dev));
686 else
687 rc = 0;
688
689 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
690 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
691 set_bit(i, &nfit_mem->dsm_mask);
692
693 return rc;
694}
695
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400696static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
697{
698 struct nfit_mem *nfit_mem;
699
700 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
701 struct nvdimm *nvdimm;
702 unsigned long flags = 0;
703 u32 device_handle;
Dan Williams62232e452015-06-08 14:27:06 -0400704 int rc;
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400705
706 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
707 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
708 if (nvdimm) {
709 /*
710 * If for some reason we find multiple DCRs the
711 * first one wins
712 */
713 dev_err(acpi_desc->dev, "duplicate DCR detected: %s\n",
714 nvdimm_name(nvdimm));
715 continue;
716 }
717
718 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
719 flags |= NDD_ALIASING;
720
Dan Williams62232e452015-06-08 14:27:06 -0400721 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
722 if (rc)
723 continue;
724
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400725 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
Dan Williams62232e452015-06-08 14:27:06 -0400726 acpi_nfit_dimm_attribute_groups,
727 flags, &nfit_mem->dsm_mask);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400728 if (!nvdimm)
729 return -ENOMEM;
730
731 nfit_mem->nvdimm = nvdimm;
732 }
733
734 return 0;
735}
736
Dan Williams62232e452015-06-08 14:27:06 -0400737static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
738{
739 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
740 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
741 struct acpi_device *adev;
742 int i;
743
744 adev = to_acpi_dev(acpi_desc);
745 if (!adev)
746 return;
747
748 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
749 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
750 set_bit(i, &nd_desc->dsm_mask);
751}
752
Dan Williamsb94d5232015-05-19 22:54:31 -0400753static int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
754{
755 struct device *dev = acpi_desc->dev;
756 const void *end;
757 u8 *data;
758
759 INIT_LIST_HEAD(&acpi_desc->spas);
760 INIT_LIST_HEAD(&acpi_desc->dcrs);
761 INIT_LIST_HEAD(&acpi_desc->bdws);
762 INIT_LIST_HEAD(&acpi_desc->memdevs);
763 INIT_LIST_HEAD(&acpi_desc->dimms);
764
765 data = (u8 *) acpi_desc->nfit;
766 end = data + sz;
767 data += sizeof(struct acpi_table_nfit);
768 while (!IS_ERR_OR_NULL(data))
769 data = add_table(acpi_desc, data, end);
770
771 if (IS_ERR(data)) {
772 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
773 PTR_ERR(data));
774 return PTR_ERR(data);
775 }
776
777 if (nfit_mem_init(acpi_desc) != 0)
778 return -ENOMEM;
779
Dan Williams62232e452015-06-08 14:27:06 -0400780 acpi_nfit_init_dsms(acpi_desc);
781
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400782 return acpi_nfit_register_dimms(acpi_desc);
Dan Williamsb94d5232015-05-19 22:54:31 -0400783}
784
785static int acpi_nfit_add(struct acpi_device *adev)
786{
787 struct nvdimm_bus_descriptor *nd_desc;
788 struct acpi_nfit_desc *acpi_desc;
789 struct device *dev = &adev->dev;
790 struct acpi_table_header *tbl;
791 acpi_status status = AE_OK;
792 acpi_size sz;
793 int rc;
794
795 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
796 if (ACPI_FAILURE(status)) {
797 dev_err(dev, "failed to find NFIT\n");
798 return -ENXIO;
799 }
800
801 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
802 if (!acpi_desc)
803 return -ENOMEM;
804
805 dev_set_drvdata(dev, acpi_desc);
806 acpi_desc->dev = dev;
807 acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
808 nd_desc = &acpi_desc->nd_desc;
809 nd_desc->provider_name = "ACPI.NFIT";
810 nd_desc->ndctl = acpi_nfit_ctl;
Dan Williams45def222015-04-26 19:26:48 -0400811 nd_desc->attr_groups = acpi_nfit_attribute_groups;
Dan Williamsb94d5232015-05-19 22:54:31 -0400812
813 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
814 if (!acpi_desc->nvdimm_bus)
815 return -ENXIO;
816
817 rc = acpi_nfit_init(acpi_desc, sz);
818 if (rc) {
819 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
820 return rc;
821 }
822 return 0;
823}
824
825static int acpi_nfit_remove(struct acpi_device *adev)
826{
827 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
828
829 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
830 return 0;
831}
832
833static const struct acpi_device_id acpi_nfit_ids[] = {
834 { "ACPI0012", 0 },
835 { "", 0 },
836};
837MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
838
839static struct acpi_driver acpi_nfit_driver = {
840 .name = KBUILD_MODNAME,
841 .ids = acpi_nfit_ids,
842 .ops = {
843 .add = acpi_nfit_add,
844 .remove = acpi_nfit_remove,
845 },
846};
847
848static __init int nfit_init(void)
849{
850 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
851 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
852 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
853 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
854 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
855 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
856 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
857
858 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
859 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
860 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
861 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
862 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
863 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
864 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
865 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
866 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
867 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
868
869 return acpi_bus_register_driver(&acpi_nfit_driver);
870}
871
872static __exit void nfit_exit(void)
873{
874 acpi_bus_unregister_driver(&acpi_nfit_driver);
875}
876
877module_init(nfit_init);
878module_exit(nfit_exit);
879MODULE_LICENSE("GPL v2");
880MODULE_AUTHOR("Intel Corporation");