Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 1 | /* |
| 2 | * libnvdimm - Non-volatile-memory Devices Subsystem |
| 3 | * |
| 4 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of version 2 of the GNU General Public License as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | */ |
| 15 | #ifndef __LIBNVDIMM_H__ |
| 16 | #define __LIBNVDIMM_H__ |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 17 | #include <linux/kernel.h> |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 18 | #include <linux/sizes.h> |
| 19 | #include <linux/types.h> |
Dan Williams | faec6f8 | 2017-06-06 11:10:51 -0700 | [diff] [blame] | 20 | #include <linux/uuid.h> |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 21 | |
| 22 | enum { |
| 23 | /* when a dimm supports both PMEM and BLK access a label is required */ |
Dan Williams | 8f078b3 | 2017-05-04 14:01:24 -0700 | [diff] [blame] | 24 | NDD_ALIASING = 0, |
Dan Williams | 5813882 | 2015-06-23 20:08:34 -0400 | [diff] [blame] | 25 | /* unarmed memory devices may not persist writes */ |
Dan Williams | 8f078b3 | 2017-05-04 14:01:24 -0700 | [diff] [blame] | 26 | NDD_UNARMED = 1, |
| 27 | /* locked memory devices should not be accessed */ |
| 28 | NDD_LOCKED = 2, |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 29 | |
| 30 | /* need to set a limit somewhere, but yes, this is likely overkill */ |
| 31 | ND_IOCTL_MAX_BUFLEN = SZ_4M, |
Dan Williams | 4577b06 | 2016-02-17 13:08:58 -0800 | [diff] [blame] | 32 | ND_CMD_MAX_ELEM = 5, |
Jerry Hoemann | 40abf9b | 2016-04-11 15:02:28 -0700 | [diff] [blame] | 33 | ND_CMD_MAX_ENVELOPE = 256, |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 34 | ND_MAX_MAPPINGS = 32, |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 35 | |
Dan Williams | 004f1af | 2015-08-24 19:20:23 -0400 | [diff] [blame] | 36 | /* region flag indicating to direct-map persistent memory by default */ |
| 37 | ND_REGION_PAGEMAP = 0, |
| 38 | |
Dan Williams | 1b40e09 | 2015-05-01 13:34:01 -0400 | [diff] [blame] | 39 | /* mark newly adjusted resources as requiring a label update */ |
| 40 | DPA_RESOURCE_ADJUSTED = 1 << 0, |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 41 | }; |
| 42 | |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 43 | extern struct attribute_group nvdimm_bus_attribute_group; |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 44 | extern struct attribute_group nvdimm_attribute_group; |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 45 | extern struct attribute_group nd_device_attribute_group; |
Toshi Kani | 74ae66c | 2015-06-19 12:18:34 -0600 | [diff] [blame] | 46 | extern struct attribute_group nd_numa_attribute_group; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 47 | extern struct attribute_group nd_region_attribute_group; |
| 48 | extern struct attribute_group nd_mapping_attribute_group; |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 49 | |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 50 | struct nvdimm; |
| 51 | struct nvdimm_bus_descriptor; |
| 52 | typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, |
| 53 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, |
Dan Williams | aef2533 | 2016-02-12 17:01:11 -0800 | [diff] [blame] | 54 | unsigned int buf_len, int *cmd_rc); |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 55 | |
| 56 | struct nvdimm_bus_descriptor { |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 57 | const struct attribute_group **attr_groups; |
Jerry Hoemann | 7db5bb3 | 2017-06-30 20:53:24 -0700 | [diff] [blame] | 58 | unsigned long bus_dsm_mask; |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 59 | unsigned long cmd_mask; |
Dan Williams | bc9775d | 2016-07-21 20:03:19 -0700 | [diff] [blame] | 60 | struct module *module; |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 61 | char *provider_name; |
| 62 | ndctl_fn ndctl; |
Dan Williams | 7ae0fa43 | 2016-02-19 12:16:34 -0800 | [diff] [blame] | 63 | int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); |
Dan Williams | 87bf572 | 2016-02-22 21:50:31 -0800 | [diff] [blame] | 64 | int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, |
| 65 | struct nvdimm *nvdimm, unsigned int cmd); |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 66 | }; |
| 67 | |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 68 | struct nd_cmd_desc { |
| 69 | int in_num; |
| 70 | int out_num; |
| 71 | u32 in_sizes[ND_CMD_MAX_ELEM]; |
| 72 | int out_sizes[ND_CMD_MAX_ELEM]; |
| 73 | }; |
| 74 | |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 75 | struct nd_interleave_set { |
Dan Williams | c12c48c | 2017-06-04 10:59:15 +0900 | [diff] [blame] | 76 | /* v1.1 definition of the interleave-set-cookie algorithm */ |
| 77 | u64 cookie1; |
| 78 | /* v1.2 definition of the interleave-set-cookie algorithm */ |
| 79 | u64 cookie2; |
Dan Williams | 86ef58a | 2017-02-28 18:32:48 -0800 | [diff] [blame] | 80 | /* compatibility with initial buggy Linux implementation */ |
| 81 | u64 altcookie; |
Dan Williams | faec6f8 | 2017-06-06 11:10:51 -0700 | [diff] [blame] | 82 | |
| 83 | guid_t type_guid; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 84 | }; |
| 85 | |
Dan Williams | 44c462e | 2016-09-19 16:38:50 -0700 | [diff] [blame] | 86 | struct nd_mapping_desc { |
| 87 | struct nvdimm *nvdimm; |
| 88 | u64 start; |
| 89 | u64 size; |
| 90 | }; |
| 91 | |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 92 | struct nd_region_desc { |
| 93 | struct resource *res; |
Dan Williams | 44c462e | 2016-09-19 16:38:50 -0700 | [diff] [blame] | 94 | struct nd_mapping_desc *mapping; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 95 | u16 num_mappings; |
| 96 | const struct attribute_group **attr_groups; |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 97 | struct nd_interleave_set *nd_set; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 98 | void *provider_data; |
Vishal Verma | 5212e11 | 2015-06-25 04:20:32 -0400 | [diff] [blame] | 99 | int num_lanes; |
Toshi Kani | 41d7a6d | 2015-06-19 12:18:33 -0600 | [diff] [blame] | 100 | int numa_node; |
Dan Williams | 004f1af | 2015-08-24 19:20:23 -0400 | [diff] [blame] | 101 | unsigned long flags; |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 102 | }; |
| 103 | |
Dan Williams | 29b9aa0 | 2016-06-06 17:42:38 -0700 | [diff] [blame] | 104 | struct device; |
| 105 | void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, |
| 106 | size_t size, unsigned long flags); |
| 107 | static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, |
| 108 | resource_size_t offset, size_t size) |
| 109 | { |
| 110 | return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); |
| 111 | } |
| 112 | |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 113 | struct nvdimm_bus; |
Dan Williams | 3d88002 | 2015-05-31 15:02:11 -0400 | [diff] [blame] | 114 | struct module; |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 115 | struct device; |
| 116 | struct nd_blk_region; |
| 117 | struct nd_blk_region_desc { |
| 118 | int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 119 | int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, |
| 120 | void *iobuf, u64 len, int rw); |
| 121 | struct nd_region_desc ndr_desc; |
| 122 | }; |
| 123 | |
| 124 | static inline struct nd_blk_region_desc *to_blk_region_desc( |
| 125 | struct nd_region_desc *ndr_desc) |
| 126 | { |
| 127 | return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); |
| 128 | |
| 129 | } |
| 130 | |
Vishal Verma | 0caeef6 | 2015-12-24 19:21:43 -0700 | [diff] [blame] | 131 | int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); |
Dave Jiang | 006358b | 2017-04-07 15:33:31 -0700 | [diff] [blame] | 132 | void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, |
| 133 | phys_addr_t start, unsigned int len); |
Dan Williams | bc9775d | 2016-07-21 20:03:19 -0700 | [diff] [blame] | 134 | struct nvdimm_bus *nvdimm_bus_register(struct device *parent, |
| 135 | struct nvdimm_bus_descriptor *nfit_desc); |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 136 | void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 137 | struct nvdimm_bus *to_nvdimm_bus(struct device *dev); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 138 | struct nvdimm *to_nvdimm(struct device *dev); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 139 | struct nd_region *to_nd_region(struct device *dev); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 140 | struct nd_blk_region *to_nd_blk_region(struct device *dev); |
Dan Williams | 45def22 | 2015-04-26 19:26:48 -0400 | [diff] [blame] | 141 | struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); |
Vishal Verma | 37b137f | 2016-07-23 21:51:42 -0700 | [diff] [blame] | 142 | struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 143 | const char *nvdimm_name(struct nvdimm *nvdimm); |
Dan Williams | ba9c8dd | 2016-08-22 19:28:37 -0700 | [diff] [blame] | 144 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); |
Dan Williams | e3654ec | 2016-04-28 16:17:07 -0700 | [diff] [blame] | 145 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); |
Dan Williams | e6dfb2d | 2015-04-25 03:56:17 -0400 | [diff] [blame] | 146 | void *nvdimm_provider_data(struct nvdimm *nvdimm); |
| 147 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 148 | const struct attribute_group **groups, unsigned long flags, |
Dan Williams | e5ae3b2 | 2016-06-07 17:00:04 -0700 | [diff] [blame] | 149 | unsigned long cmd_mask, int num_flush, |
| 150 | struct resource *flush_wpq); |
Dan Williams | 62232e45 | 2015-06-08 14:27:06 -0400 | [diff] [blame] | 151 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); |
| 152 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); |
| 153 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, |
| 154 | const struct nd_cmd_desc *desc, int idx, void *buf); |
| 155 | u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, |
| 156 | const struct nd_cmd_desc *desc, int idx, const u32 *in_field, |
Dan Williams | efda1b5d | 2016-12-06 09:10:12 -0800 | [diff] [blame] | 157 | const u32 *out_field, unsigned long remainder); |
Dan Williams | 4d88a97 | 2015-05-31 14:41:48 -0400 | [diff] [blame] | 158 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); |
Dan Williams | 1f7df6f | 2015-06-09 20:13:14 -0400 | [diff] [blame] | 159 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, |
| 160 | struct nd_region_desc *ndr_desc); |
| 161 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, |
| 162 | struct nd_region_desc *ndr_desc); |
| 163 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, |
| 164 | struct nd_region_desc *ndr_desc); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 165 | void *nd_region_provider_data(struct nd_region *nd_region); |
| 166 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); |
| 167 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); |
| 168 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); |
Dan Williams | ca6a465 | 2017-01-13 20:36:58 -0800 | [diff] [blame] | 169 | unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); |
Ross Zwisler | 047fc8a | 2015-06-25 04:21:02 -0400 | [diff] [blame] | 170 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region); |
| 171 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); |
Dan Williams | eaf9615 | 2015-05-01 13:11:27 -0400 | [diff] [blame] | 172 | u64 nd_fletcher64(void *addr, size_t len, bool le); |
Dan Williams | f284a4f | 2016-07-07 19:44:50 -0700 | [diff] [blame] | 173 | void nvdimm_flush(struct nd_region *nd_region); |
| 174 | int nvdimm_has_flush(struct nd_region *nd_region); |
Dan Williams | 0b27796 | 2017-06-09 09:46:50 -0700 | [diff] [blame] | 175 | int nvdimm_has_cache(struct nd_region *nd_region); |
Dan Williams | b94d523 | 2015-05-19 22:54:31 -0400 | [diff] [blame] | 176 | #endif /* __LIBNVDIMM_H__ */ |