2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/list.h>
19 #include <linux/acpi.h>
20 #include <linux/sort.h>
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
28 #include <asm-generic/io-64-nonatomic-hi-lo.h>
30 static bool force_enable_dimms;
31 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
32 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
34 static u8 nfit_uuid[NFIT_UUID_MAX][16];
36 const u8 *to_nfit_uuid(enum nfit_uuids id)
40 EXPORT_SYMBOL(to_nfit_uuid);
42 static struct acpi_nfit_desc *to_acpi_nfit_desc(
43 struct nvdimm_bus_descriptor *nd_desc)
45 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
48 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
50 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
53 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
56 if (!nd_desc->provider_name
57 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
60 return to_acpi_device(acpi_desc->dev);
63 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
64 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
67 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
68 const struct nd_cmd_desc *desc = NULL;
69 union acpi_object in_obj, in_buf, *out_obj;
70 struct device *dev = acpi_desc->dev;
71 const char *cmd_name, *dimm_name;
72 unsigned long dsm_mask;
79 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
80 struct acpi_device *adev = nfit_mem->adev;
84 dimm_name = nvdimm_name(nvdimm);
85 cmd_name = nvdimm_cmd_name(cmd);
86 dsm_mask = nfit_mem->dsm_mask;
87 desc = nd_cmd_dimm_desc(cmd);
88 uuid = to_nfit_uuid(NFIT_DEV_DIMM);
89 handle = adev->handle;
91 struct acpi_device *adev = to_acpi_dev(acpi_desc);
93 cmd_name = nvdimm_bus_cmd_name(cmd);
94 dsm_mask = nd_desc->dsm_mask;
95 desc = nd_cmd_bus_desc(cmd);
96 uuid = to_nfit_uuid(NFIT_DEV_BUS);
97 handle = adev->handle;
101 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
104 if (!test_bit(cmd, &dsm_mask))
107 in_obj.type = ACPI_TYPE_PACKAGE;
108 in_obj.package.count = 1;
109 in_obj.package.elements = &in_buf;
110 in_buf.type = ACPI_TYPE_BUFFER;
111 in_buf.buffer.pointer = buf;
112 in_buf.buffer.length = 0;
114 /* libnvdimm has already validated the input envelope */
115 for (i = 0; i < desc->in_num; i++)
116 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
119 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
120 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
121 dimm_name, cmd_name, in_buf.buffer.length);
122 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
123 4, in_buf.buffer.pointer, min_t(u32, 128,
124 in_buf.buffer.length), true);
127 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
129 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
134 if (out_obj->package.type != ACPI_TYPE_BUFFER) {
135 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
136 __func__, dimm_name, cmd_name, out_obj->type);
141 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
142 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
143 dimm_name, cmd_name, out_obj->buffer.length);
144 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
145 4, out_obj->buffer.pointer, min_t(u32, 128,
146 out_obj->buffer.length), true);
149 for (i = 0, offset = 0; i < desc->out_num; i++) {
150 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
151 (u32 *) out_obj->buffer.pointer);
153 if (offset + out_size > out_obj->buffer.length) {
154 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
155 __func__, dimm_name, cmd_name, i);
159 if (in_buf.buffer.length + offset + out_size > buf_len) {
160 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
161 __func__, dimm_name, cmd_name, i);
165 memcpy(buf + in_buf.buffer.length + offset,
166 out_obj->buffer.pointer + offset, out_size);
169 if (offset + in_buf.buffer.length < buf_len) {
172 * status valid, return the number of bytes left
173 * unfilled in the output buffer
175 rc = buf_len - offset - in_buf.buffer.length;
177 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
178 __func__, dimm_name, cmd_name, buf_len,
191 static const char *spa_type_name(u16 type)
193 static const char *to_name[] = {
194 [NFIT_SPA_VOLATILE] = "volatile",
195 [NFIT_SPA_PM] = "pmem",
196 [NFIT_SPA_DCR] = "dimm-control-region",
197 [NFIT_SPA_BDW] = "block-data-window",
198 [NFIT_SPA_VDISK] = "volatile-disk",
199 [NFIT_SPA_VCD] = "volatile-cd",
200 [NFIT_SPA_PDISK] = "persistent-disk",
201 [NFIT_SPA_PCD] = "persistent-cd",
205 if (type > NFIT_SPA_PCD)
208 return to_name[type];
211 static int nfit_spa_type(struct acpi_nfit_system_address *spa)
215 for (i = 0; i < NFIT_UUID_MAX; i++)
216 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
221 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
222 struct acpi_nfit_system_address *spa)
224 struct device *dev = acpi_desc->dev;
225 struct nfit_spa *nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa),
230 INIT_LIST_HEAD(&nfit_spa->list);
232 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
233 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
235 spa_type_name(nfit_spa_type(spa)));
239 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
240 struct acpi_nfit_memory_map *memdev)
242 struct device *dev = acpi_desc->dev;
243 struct nfit_memdev *nfit_memdev = devm_kzalloc(dev,
244 sizeof(*nfit_memdev), GFP_KERNEL);
248 INIT_LIST_HEAD(&nfit_memdev->list);
249 nfit_memdev->memdev = memdev;
250 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
251 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
252 __func__, memdev->device_handle, memdev->range_index,
253 memdev->region_index);
257 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
258 struct acpi_nfit_control_region *dcr)
260 struct device *dev = acpi_desc->dev;
261 struct nfit_dcr *nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr),
266 INIT_LIST_HEAD(&nfit_dcr->list);
268 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
269 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
270 dcr->region_index, dcr->windows);
274 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
275 struct acpi_nfit_data_region *bdw)
277 struct device *dev = acpi_desc->dev;
278 struct nfit_bdw *nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw),
283 INIT_LIST_HEAD(&nfit_bdw->list);
285 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
286 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
287 bdw->region_index, bdw->windows);
291 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
292 struct acpi_nfit_interleave *idt)
294 struct device *dev = acpi_desc->dev;
295 struct nfit_idt *nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt),
300 INIT_LIST_HEAD(&nfit_idt->list);
302 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
303 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
304 idt->interleave_index, idt->line_count);
308 static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
311 struct device *dev = acpi_desc->dev;
312 struct acpi_nfit_header *hdr;
313 void *err = ERR_PTR(-ENOMEM);
320 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
321 if (!add_spa(acpi_desc, table))
324 case ACPI_NFIT_TYPE_MEMORY_MAP:
325 if (!add_memdev(acpi_desc, table))
328 case ACPI_NFIT_TYPE_CONTROL_REGION:
329 if (!add_dcr(acpi_desc, table))
332 case ACPI_NFIT_TYPE_DATA_REGION:
333 if (!add_bdw(acpi_desc, table))
336 case ACPI_NFIT_TYPE_INTERLEAVE:
337 if (!add_idt(acpi_desc, table))
340 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
341 dev_dbg(dev, "%s: flush\n", __func__);
343 case ACPI_NFIT_TYPE_SMBIOS:
344 dev_dbg(dev, "%s: smbios\n", __func__);
347 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
351 return table + hdr->length;
354 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
355 struct nfit_mem *nfit_mem)
357 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
358 u16 dcr = nfit_mem->dcr->region_index;
359 struct nfit_spa *nfit_spa;
361 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
362 u16 range_index = nfit_spa->spa->range_index;
363 int type = nfit_spa_type(nfit_spa->spa);
364 struct nfit_memdev *nfit_memdev;
366 if (type != NFIT_SPA_BDW)
369 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
370 if (nfit_memdev->memdev->range_index != range_index)
372 if (nfit_memdev->memdev->device_handle != device_handle)
374 if (nfit_memdev->memdev->region_index != dcr)
377 nfit_mem->spa_bdw = nfit_spa->spa;
382 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
383 nfit_mem->spa_dcr->range_index);
384 nfit_mem->bdw = NULL;
387 static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
388 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
390 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
391 struct nfit_memdev *nfit_memdev;
392 struct nfit_dcr *nfit_dcr;
393 struct nfit_bdw *nfit_bdw;
394 struct nfit_idt *nfit_idt;
395 u16 idt_idx, range_index;
397 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
398 if (nfit_dcr->dcr->region_index != dcr)
400 nfit_mem->dcr = nfit_dcr->dcr;
404 if (!nfit_mem->dcr) {
405 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
406 spa->range_index, __to_nfit_memdev(nfit_mem)
407 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
412 * We've found enough to create an nvdimm, optionally
413 * find an associated BDW
415 list_add(&nfit_mem->list, &acpi_desc->dimms);
417 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
418 if (nfit_bdw->bdw->region_index != dcr)
420 nfit_mem->bdw = nfit_bdw->bdw;
427 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
429 if (!nfit_mem->spa_bdw)
432 range_index = nfit_mem->spa_bdw->range_index;
433 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
434 if (nfit_memdev->memdev->range_index != range_index ||
435 nfit_memdev->memdev->region_index != dcr)
437 nfit_mem->memdev_bdw = nfit_memdev->memdev;
438 idt_idx = nfit_memdev->memdev->interleave_index;
439 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
440 if (nfit_idt->idt->interleave_index != idt_idx)
442 nfit_mem->idt_bdw = nfit_idt->idt;
451 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
452 struct acpi_nfit_system_address *spa)
454 struct nfit_mem *nfit_mem, *found;
455 struct nfit_memdev *nfit_memdev;
456 int type = nfit_spa_type(spa);
467 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
470 if (nfit_memdev->memdev->range_index != spa->range_index)
473 dcr = nfit_memdev->memdev->region_index;
474 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
475 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
483 nfit_mem = devm_kzalloc(acpi_desc->dev,
484 sizeof(*nfit_mem), GFP_KERNEL);
487 INIT_LIST_HEAD(&nfit_mem->list);
490 if (type == NFIT_SPA_DCR) {
491 struct nfit_idt *nfit_idt;
494 /* multiple dimms may share a SPA when interleaved */
495 nfit_mem->spa_dcr = spa;
496 nfit_mem->memdev_dcr = nfit_memdev->memdev;
497 idt_idx = nfit_memdev->memdev->interleave_index;
498 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
499 if (nfit_idt->idt->interleave_index != idt_idx)
501 nfit_mem->idt_dcr = nfit_idt->idt;
506 * A single dimm may belong to multiple SPA-PM
507 * ranges, record at least one in addition to
510 nfit_mem->memdev_pmem = nfit_memdev->memdev;
516 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
524 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
526 struct nfit_mem *a = container_of(_a, typeof(*a), list);
527 struct nfit_mem *b = container_of(_b, typeof(*b), list);
528 u32 handleA, handleB;
530 handleA = __to_nfit_memdev(a)->device_handle;
531 handleB = __to_nfit_memdev(b)->device_handle;
532 if (handleA < handleB)
534 else if (handleA > handleB)
539 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
541 struct nfit_spa *nfit_spa;
544 * For each SPA-DCR or SPA-PMEM address range find its
545 * corresponding MEMDEV(s). From each MEMDEV find the
546 * corresponding DCR. Then, if we're operating on a SPA-DCR,
547 * try to find a SPA-BDW and a corresponding BDW that references
548 * the DCR. Throw it all into an nfit_mem object. Note, that
551 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
554 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
559 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
564 static ssize_t revision_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
567 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
568 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
569 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
571 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
573 static DEVICE_ATTR_RO(revision);
575 static struct attribute *acpi_nfit_attributes[] = {
576 &dev_attr_revision.attr,
580 static struct attribute_group acpi_nfit_attribute_group = {
582 .attrs = acpi_nfit_attributes,
585 const struct attribute_group *acpi_nfit_attribute_groups[] = {
586 &nvdimm_bus_attribute_group,
587 &acpi_nfit_attribute_group,
590 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
592 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
594 struct nvdimm *nvdimm = to_nvdimm(dev);
595 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
597 return __to_nfit_memdev(nfit_mem);
600 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
602 struct nvdimm *nvdimm = to_nvdimm(dev);
603 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
605 return nfit_mem->dcr;
608 static ssize_t handle_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
611 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
613 return sprintf(buf, "%#x\n", memdev->device_handle);
615 static DEVICE_ATTR_RO(handle);
617 static ssize_t phys_id_show(struct device *dev,
618 struct device_attribute *attr, char *buf)
620 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
622 return sprintf(buf, "%#x\n", memdev->physical_id);
624 static DEVICE_ATTR_RO(phys_id);
626 static ssize_t vendor_show(struct device *dev,
627 struct device_attribute *attr, char *buf)
629 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
631 return sprintf(buf, "%#x\n", dcr->vendor_id);
633 static DEVICE_ATTR_RO(vendor);
635 static ssize_t rev_id_show(struct device *dev,
636 struct device_attribute *attr, char *buf)
638 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
640 return sprintf(buf, "%#x\n", dcr->revision_id);
642 static DEVICE_ATTR_RO(rev_id);
644 static ssize_t device_show(struct device *dev,
645 struct device_attribute *attr, char *buf)
647 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
649 return sprintf(buf, "%#x\n", dcr->device_id);
651 static DEVICE_ATTR_RO(device);
653 static ssize_t format_show(struct device *dev,
654 struct device_attribute *attr, char *buf)
656 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
658 return sprintf(buf, "%#x\n", dcr->code);
660 static DEVICE_ATTR_RO(format);
662 static ssize_t serial_show(struct device *dev,
663 struct device_attribute *attr, char *buf)
665 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
667 return sprintf(buf, "%#x\n", dcr->serial_number);
669 static DEVICE_ATTR_RO(serial);
671 static ssize_t flags_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
674 u16 flags = to_nfit_memdev(dev)->flags;
676 return sprintf(buf, "%s%s%s%s%s\n",
677 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
678 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
679 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
680 flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
681 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
683 static DEVICE_ATTR_RO(flags);
685 static struct attribute *acpi_nfit_dimm_attributes[] = {
686 &dev_attr_handle.attr,
687 &dev_attr_phys_id.attr,
688 &dev_attr_vendor.attr,
689 &dev_attr_device.attr,
690 &dev_attr_format.attr,
691 &dev_attr_serial.attr,
692 &dev_attr_rev_id.attr,
693 &dev_attr_flags.attr,
697 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
698 struct attribute *a, int n)
700 struct device *dev = container_of(kobj, struct device, kobj);
702 if (to_nfit_dcr(dev))
708 static struct attribute_group acpi_nfit_dimm_attribute_group = {
710 .attrs = acpi_nfit_dimm_attributes,
711 .is_visible = acpi_nfit_dimm_attr_visible,
714 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
715 &nvdimm_attribute_group,
716 &nd_device_attribute_group,
717 &acpi_nfit_dimm_attribute_group,
721 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
724 struct nfit_mem *nfit_mem;
726 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
727 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
728 return nfit_mem->nvdimm;
733 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
734 struct nfit_mem *nfit_mem, u32 device_handle)
736 struct acpi_device *adev, *adev_dimm;
737 struct device *dev = acpi_desc->dev;
738 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
739 unsigned long long sta;
743 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
744 adev = to_acpi_dev(acpi_desc);
748 adev_dimm = acpi_find_child_device(adev, device_handle, false);
749 nfit_mem->adev = adev_dimm;
751 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
753 return force_enable_dimms ? 0 : -ENODEV;
756 status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta);
757 if (status == AE_NOT_FOUND) {
758 dev_dbg(dev, "%s missing _STA, assuming enabled...\n",
759 dev_name(&adev_dimm->dev));
761 } else if (ACPI_FAILURE(status))
762 dev_err(dev, "%s failed to retrieve_STA, disabling...\n",
763 dev_name(&adev_dimm->dev));
764 else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0)
765 dev_info(dev, "%s disabled by firmware\n",
766 dev_name(&adev_dimm->dev));
770 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
771 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
772 set_bit(i, &nfit_mem->dsm_mask);
774 return force_enable_dimms ? 0 : rc;
777 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
779 struct nfit_mem *nfit_mem;
782 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
783 struct nvdimm *nvdimm;
784 unsigned long flags = 0;
789 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
790 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
793 * If for some reason we find multiple DCRs the
796 dev_err(acpi_desc->dev, "duplicate DCR detected: %s\n",
797 nvdimm_name(nvdimm));
801 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
802 flags |= NDD_ALIASING;
804 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
805 if (mem_flags & ACPI_NFIT_MEM_ARMED)
806 flags |= NDD_UNARMED;
808 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
812 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
813 acpi_nfit_dimm_attribute_groups,
814 flags, &nfit_mem->dsm_mask);
818 nfit_mem->nvdimm = nvdimm;
821 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
824 dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
826 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
827 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
828 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
829 mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
833 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
836 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
838 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
839 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
840 struct acpi_device *adev;
843 adev = to_acpi_dev(acpi_desc);
847 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
848 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
849 set_bit(i, &nd_desc->dsm_mask);
852 static ssize_t range_index_show(struct device *dev,
853 struct device_attribute *attr, char *buf)
855 struct nd_region *nd_region = to_nd_region(dev);
856 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
858 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
860 static DEVICE_ATTR_RO(range_index);
862 static struct attribute *acpi_nfit_region_attributes[] = {
863 &dev_attr_range_index.attr,
867 static struct attribute_group acpi_nfit_region_attribute_group = {
869 .attrs = acpi_nfit_region_attributes,
872 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
873 &nd_region_attribute_group,
874 &nd_mapping_attribute_group,
875 &nd_device_attribute_group,
876 &nd_numa_attribute_group,
877 &acpi_nfit_region_attribute_group,
881 /* enough info to uniquely specify an interleave set */
882 struct nfit_set_info {
883 struct nfit_set_info_map {
890 static size_t sizeof_nfit_set_info(int num_mappings)
892 return sizeof(struct nfit_set_info)
893 + num_mappings * sizeof(struct nfit_set_info_map);
896 static int cmp_map(const void *m0, const void *m1)
898 const struct nfit_set_info_map *map0 = m0;
899 const struct nfit_set_info_map *map1 = m1;
901 return memcmp(&map0->region_offset, &map1->region_offset,
905 /* Retrieve the nth entry referencing this spa */
906 static struct acpi_nfit_memory_map *memdev_from_spa(
907 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
909 struct nfit_memdev *nfit_memdev;
911 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
912 if (nfit_memdev->memdev->range_index == range_index)
914 return nfit_memdev->memdev;
918 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
919 struct nd_region_desc *ndr_desc,
920 struct acpi_nfit_system_address *spa)
922 int i, spa_type = nfit_spa_type(spa);
923 struct device *dev = acpi_desc->dev;
924 struct nd_interleave_set *nd_set;
925 u16 nr = ndr_desc->num_mappings;
926 struct nfit_set_info *info;
928 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
933 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
937 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
940 for (i = 0; i < nr; i++) {
941 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
942 struct nfit_set_info_map *map = &info->mapping[i];
943 struct nvdimm *nvdimm = nd_mapping->nvdimm;
944 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
945 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
946 spa->range_index, i);
948 if (!memdev || !nfit_mem->dcr) {
949 dev_err(dev, "%s: failed to find DCR\n", __func__);
953 map->region_offset = memdev->region_offset;
954 map->serial_number = nfit_mem->dcr->serial_number;
957 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
959 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
960 ndr_desc->nd_set = nd_set;
961 devm_kfree(dev, info);
966 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
968 struct acpi_nfit_interleave *idt = mmio->idt;
969 u32 sub_line_offset, line_index, line_offset;
970 u64 line_no, table_skip_count, table_offset;
972 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
973 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
974 line_offset = idt->line_offset[line_index]
976 table_offset = table_skip_count * mmio->table_size;
978 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
981 static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
983 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
984 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
987 offset = to_interleave_offset(offset, mmio);
989 return readq(mmio->base + offset);
992 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
993 resource_size_t dpa, unsigned int len, unsigned int write)
996 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
999 BCW_OFFSET_MASK = (1ULL << 48)-1,
1001 BCW_LEN_MASK = (1ULL << 8) - 1,
1005 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1006 len = len >> L1_CACHE_SHIFT;
1007 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1008 cmd |= ((u64) write) << BCW_CMD_SHIFT;
1010 offset = nfit_blk->cmd_offset + mmio->size * bw;
1011 if (mmio->num_lines)
1012 offset = to_interleave_offset(offset, mmio);
1014 writeq(cmd, mmio->base + offset);
1015 /* FIXME: conditionally perform read-back if mandated by firmware */
1018 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1019 resource_size_t dpa, void *iobuf, size_t len, int rw,
1022 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1023 unsigned int copied = 0;
1027 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1028 + lane * mmio->size;
1029 /* TODO: non-temporal access, flush hints, cache management etc... */
1030 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1035 if (mmio->num_lines) {
1038 offset = to_interleave_offset(base_offset + copied,
1040 div_u64_rem(offset, mmio->line_size, &line_offset);
1041 c = min_t(size_t, len, mmio->line_size - line_offset);
1043 offset = base_offset + nfit_blk->bdw_offset;
1048 memcpy(mmio->aperture + offset, iobuf + copied, c);
1050 memcpy(iobuf + copied, mmio->aperture + offset, c);
1055 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1059 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1060 resource_size_t dpa, void *iobuf, u64 len, int rw)
1062 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1063 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1064 struct nd_region *nd_region = nfit_blk->nd_region;
1065 unsigned int lane, copied = 0;
1068 lane = nd_region_acquire_lane(nd_region);
1070 u64 c = min(len, mmio->size);
1072 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1073 iobuf + copied, c, rw, lane);
1080 nd_region_release_lane(nd_region, lane);
1085 static void nfit_spa_mapping_release(struct kref *kref)
1087 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1088 struct acpi_nfit_system_address *spa = spa_map->spa;
1089 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1091 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1092 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1093 iounmap(spa_map->iomem);
1094 release_mem_region(spa->address, spa->length);
1095 list_del(&spa_map->list);
1099 static struct nfit_spa_mapping *find_spa_mapping(
1100 struct acpi_nfit_desc *acpi_desc,
1101 struct acpi_nfit_system_address *spa)
1103 struct nfit_spa_mapping *spa_map;
1105 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1106 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1107 if (spa_map->spa == spa)
1113 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1114 struct acpi_nfit_system_address *spa)
1116 struct nfit_spa_mapping *spa_map;
1118 mutex_lock(&acpi_desc->spa_map_mutex);
1119 spa_map = find_spa_mapping(acpi_desc, spa);
1122 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1123 mutex_unlock(&acpi_desc->spa_map_mutex);
1126 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1127 struct acpi_nfit_system_address *spa)
1129 resource_size_t start = spa->address;
1130 resource_size_t n = spa->length;
1131 struct nfit_spa_mapping *spa_map;
1132 struct resource *res;
1134 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1136 spa_map = find_spa_mapping(acpi_desc, spa);
1138 kref_get(&spa_map->kref);
1139 return spa_map->iomem;
1142 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1146 INIT_LIST_HEAD(&spa_map->list);
1148 kref_init(&spa_map->kref);
1149 spa_map->acpi_desc = acpi_desc;
1151 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1155 /* TODO: cacheability based on the spa type */
1156 spa_map->iomem = ioremap_nocache(start, n);
1157 if (!spa_map->iomem)
1160 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1161 return spa_map->iomem;
1164 release_mem_region(start, n);
1171 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1172 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1173 * @nfit_spa: spa table to map
1175 * In the case where block-data-window apertures and
1176 * dimm-control-regions are interleaved they will end up sharing a
1177 * single request_mem_region() + ioremap() for the address range. In
1178 * the style of devm nfit_spa_map() mappings are automatically dropped
1179 * when all region devices referencing the same mapping are disabled /
1182 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1183 struct acpi_nfit_system_address *spa)
1185 void __iomem *iomem;
1187 mutex_lock(&acpi_desc->spa_map_mutex);
1188 iomem = __nfit_spa_map(acpi_desc, spa);
1189 mutex_unlock(&acpi_desc->spa_map_mutex);
1194 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1195 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1198 mmio->num_lines = idt->line_count;
1199 mmio->line_size = idt->line_size;
1200 if (interleave_ways == 0)
1202 mmio->table_size = mmio->num_lines * interleave_ways
1209 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1212 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1213 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1214 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1215 struct nfit_blk_mmio *mmio;
1216 struct nfit_blk *nfit_blk;
1217 struct nfit_mem *nfit_mem;
1218 struct nvdimm *nvdimm;
1221 nvdimm = nd_blk_region_to_dimm(ndbr);
1222 nfit_mem = nvdimm_provider_data(nvdimm);
1223 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1224 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1225 nfit_mem ? "" : " nfit_mem",
1226 nfit_mem->dcr ? "" : " dcr",
1227 nfit_mem->bdw ? "" : " bdw");
1231 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1234 nd_blk_region_set_provider_data(ndbr, nfit_blk);
1235 nfit_blk->nd_region = to_nd_region(dev);
1237 /* map block aperture memory */
1238 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1239 mmio = &nfit_blk->mmio[BDW];
1240 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
1242 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1243 nvdimm_name(nvdimm));
1246 mmio->size = nfit_mem->bdw->size;
1247 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1248 mmio->idt = nfit_mem->idt_bdw;
1249 mmio->spa = nfit_mem->spa_bdw;
1250 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1251 nfit_mem->memdev_bdw->interleave_ways);
1253 dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1254 __func__, nvdimm_name(nvdimm));
1258 /* map block control memory */
1259 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1260 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1261 mmio = &nfit_blk->mmio[DCR];
1262 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
1264 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1265 nvdimm_name(nvdimm));
1268 mmio->size = nfit_mem->dcr->window_size;
1269 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1270 mmio->idt = nfit_mem->idt_dcr;
1271 mmio->spa = nfit_mem->spa_dcr;
1272 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1273 nfit_mem->memdev_dcr->interleave_ways);
1275 dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1276 __func__, nvdimm_name(nvdimm));
1280 if (mmio->line_size == 0)
1283 if ((u32) nfit_blk->cmd_offset % mmio->line_size
1284 + 8 > mmio->line_size) {
1285 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1287 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
1288 + 8 > mmio->line_size) {
1289 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1296 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1299 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1300 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1301 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1302 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1306 return; /* never enabled */
1308 /* auto-free BLK spa mappings */
1309 for (i = 0; i < 2; i++) {
1310 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1313 nfit_spa_unmap(acpi_desc, mmio->spa);
1315 nd_blk_region_set_provider_data(ndbr, NULL);
1316 /* devm will free nfit_blk */
1319 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1320 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1321 struct acpi_nfit_memory_map *memdev,
1322 struct acpi_nfit_system_address *spa)
1324 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1325 memdev->device_handle);
1326 struct nd_blk_region_desc *ndbr_desc;
1327 struct nfit_mem *nfit_mem;
1331 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1332 spa->range_index, memdev->device_handle);
1336 nd_mapping->nvdimm = nvdimm;
1337 switch (nfit_spa_type(spa)) {
1339 case NFIT_SPA_VOLATILE:
1340 nd_mapping->start = memdev->address;
1341 nd_mapping->size = memdev->region_size;
1344 nfit_mem = nvdimm_provider_data(nvdimm);
1345 if (!nfit_mem || !nfit_mem->bdw) {
1346 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1347 spa->range_index, nvdimm_name(nvdimm));
1349 nd_mapping->size = nfit_mem->bdw->capacity;
1350 nd_mapping->start = nfit_mem->bdw->start_address;
1351 ndr_desc->num_lanes = nfit_mem->bdw->windows;
1355 ndr_desc->nd_mapping = nd_mapping;
1356 ndr_desc->num_mappings = blk_valid;
1357 ndbr_desc = to_blk_region_desc(ndr_desc);
1358 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1359 ndbr_desc->disable = acpi_nfit_blk_region_disable;
1360 ndbr_desc->do_io = acpi_desc->blk_do_io;
1361 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
1369 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1370 struct nfit_spa *nfit_spa)
1372 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1373 struct acpi_nfit_system_address *spa = nfit_spa->spa;
1374 struct nd_blk_region_desc ndbr_desc;
1375 struct nd_region_desc *ndr_desc;
1376 struct nfit_memdev *nfit_memdev;
1377 struct nvdimm_bus *nvdimm_bus;
1378 struct resource res;
1381 if (spa->range_index == 0) {
1382 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1387 memset(&res, 0, sizeof(res));
1388 memset(&nd_mappings, 0, sizeof(nd_mappings));
1389 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1390 res.start = spa->address;
1391 res.end = res.start + spa->length - 1;
1392 ndr_desc = &ndbr_desc.ndr_desc;
1393 ndr_desc->res = &res;
1394 ndr_desc->provider_data = nfit_spa;
1395 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
1396 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1397 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1398 spa->proximity_domain);
1400 ndr_desc->numa_node = NUMA_NO_NODE;
1402 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1403 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1404 struct nd_mapping *nd_mapping;
1406 if (memdev->range_index != spa->range_index)
1408 if (count >= ND_MAX_MAPPINGS) {
1409 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1410 spa->range_index, ND_MAX_MAPPINGS);
1413 nd_mapping = &nd_mappings[count++];
1414 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
1420 ndr_desc->nd_mapping = nd_mappings;
1421 ndr_desc->num_mappings = count;
1422 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
1426 nvdimm_bus = acpi_desc->nvdimm_bus;
1427 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
1428 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
1430 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1431 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
1437 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
1439 struct nfit_spa *nfit_spa;
1441 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1442 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
1450 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1452 struct device *dev = acpi_desc->dev;
1457 INIT_LIST_HEAD(&acpi_desc->spa_maps);
1458 INIT_LIST_HEAD(&acpi_desc->spas);
1459 INIT_LIST_HEAD(&acpi_desc->dcrs);
1460 INIT_LIST_HEAD(&acpi_desc->bdws);
1461 INIT_LIST_HEAD(&acpi_desc->idts);
1462 INIT_LIST_HEAD(&acpi_desc->memdevs);
1463 INIT_LIST_HEAD(&acpi_desc->dimms);
1464 mutex_init(&acpi_desc->spa_map_mutex);
1466 data = (u8 *) acpi_desc->nfit;
1468 data += sizeof(struct acpi_table_nfit);
1469 while (!IS_ERR_OR_NULL(data))
1470 data = add_table(acpi_desc, data, end);
1473 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
1475 return PTR_ERR(data);
1478 if (nfit_mem_init(acpi_desc) != 0)
1481 acpi_nfit_init_dsms(acpi_desc);
1483 rc = acpi_nfit_register_dimms(acpi_desc);
1487 return acpi_nfit_register_regions(acpi_desc);
1489 EXPORT_SYMBOL_GPL(acpi_nfit_init);
1491 static int acpi_nfit_add(struct acpi_device *adev)
1493 struct nvdimm_bus_descriptor *nd_desc;
1494 struct acpi_nfit_desc *acpi_desc;
1495 struct device *dev = &adev->dev;
1496 struct acpi_table_header *tbl;
1497 acpi_status status = AE_OK;
1501 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
1502 if (ACPI_FAILURE(status)) {
1503 dev_err(dev, "failed to find NFIT\n");
1507 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1511 dev_set_drvdata(dev, acpi_desc);
1512 acpi_desc->dev = dev;
1513 acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
1514 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
1515 nd_desc = &acpi_desc->nd_desc;
1516 nd_desc->provider_name = "ACPI.NFIT";
1517 nd_desc->ndctl = acpi_nfit_ctl;
1518 nd_desc->attr_groups = acpi_nfit_attribute_groups;
1520 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
1521 if (!acpi_desc->nvdimm_bus)
1524 rc = acpi_nfit_init(acpi_desc, sz);
1526 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1532 static int acpi_nfit_remove(struct acpi_device *adev)
1534 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1536 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1540 static const struct acpi_device_id acpi_nfit_ids[] = {
1544 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
1546 static struct acpi_driver acpi_nfit_driver = {
1547 .name = KBUILD_MODNAME,
1548 .ids = acpi_nfit_ids,
1550 .add = acpi_nfit_add,
1551 .remove = acpi_nfit_remove,
1555 static __init int nfit_init(void)
1557 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
1558 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
1559 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
1560 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
1561 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
1562 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
1563 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
1565 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
1566 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
1567 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
1568 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
1569 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
1570 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
1571 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
1572 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
1573 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
1574 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
1576 return acpi_bus_register_driver(&acpi_nfit_driver);
1579 static __exit void nfit_exit(void)
1581 acpi_bus_unregister_driver(&acpi_nfit_driver);
1584 module_init(nfit_init);
1585 module_exit(nfit_exit);
1586 MODULE_LICENSE("GPL v2");
1587 MODULE_AUTHOR("Intel Corporation");