1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * libnvdimm - Non-volatile-memory Devices Subsystem 4 * 5 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 6 */ 7 #ifndef __LIBNVDIMM_H__ 8 #define __LIBNVDIMM_H__ 9 #include <linux/kernel.h> 10 #include <linux/sizes.h> 11 #include <linux/types.h> 12 #include <linux/uuid.h> 13 #include <linux/spinlock.h> 14 #include <linux/bio.h> 15 16 struct badrange_entry { 17 u64 start; 18 u64 length; 19 struct list_head list; 20 }; 21 22 struct badrange { 23 struct list_head list; 24 spinlock_t lock; 25 }; 26 27 enum { 28 /* when a dimm supports both PMEM and BLK access a label is required */ 29 NDD_ALIASING = 0, 30 /* unarmed memory devices may not persist writes */ 31 NDD_UNARMED = 1, 32 /* locked memory devices should not be accessed */ 33 NDD_LOCKED = 2, 34 /* memory under security wipes should not be accessed */ 35 NDD_SECURITY_OVERWRITE = 3, 36 /* tracking whether or not there is a pending device reference */ 37 NDD_WORK_PENDING = 4, 38 /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ 39 NDD_NOBLK = 5, 40 41 /* need to set a limit somewhere, but yes, this is likely overkill */ 42 ND_IOCTL_MAX_BUFLEN = SZ_4M, 43 ND_CMD_MAX_ELEM = 5, 44 ND_CMD_MAX_ENVELOPE = 256, 45 ND_MAX_MAPPINGS = 32, 46 47 /* region flag indicating to direct-map persistent memory by default */ 48 ND_REGION_PAGEMAP = 0, 49 /* 50 * Platform ensures entire CPU store data path is flushed to pmem on 51 * system power loss. 52 */ 53 ND_REGION_PERSIST_CACHE = 1, 54 /* 55 * Platform provides mechanisms to automatically flush outstanding 56 * write data from memory controler to pmem on system power loss. 57 * (ADR) 58 */ 59 ND_REGION_PERSIST_MEMCTRL = 2, 60 61 /* Platform provides asynchronous flush mechanism */ 62 ND_REGION_ASYNC = 3, 63 64 /* mark newly adjusted resources as requiring a label update */ 65 DPA_RESOURCE_ADJUSTED = 1 << 0, 66 }; 67 68 struct nvdimm; 69 struct nvdimm_bus_descriptor; 70 typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, 71 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 72 unsigned int buf_len, int *cmd_rc); 73 74 struct device_node; 75 struct nvdimm_bus_descriptor { 76 const struct attribute_group **attr_groups; 77 unsigned long bus_dsm_mask; 78 unsigned long cmd_mask; 79 struct module *module; 80 char *provider_name; 81 struct device_node *of_node; 82 ndctl_fn ndctl; 83 int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); 84 int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, 85 struct nvdimm *nvdimm, unsigned int cmd, void *data); 86 }; 87 88 struct nd_cmd_desc { 89 int in_num; 90 int out_num; 91 u32 in_sizes[ND_CMD_MAX_ELEM]; 92 int out_sizes[ND_CMD_MAX_ELEM]; 93 }; 94 95 struct nd_interleave_set { 96 /* v1.1 definition of the interleave-set-cookie algorithm */ 97 u64 cookie1; 98 /* v1.2 definition of the interleave-set-cookie algorithm */ 99 u64 cookie2; 100 /* compatibility with initial buggy Linux implementation */ 101 u64 altcookie; 102 103 guid_t type_guid; 104 }; 105 106 struct nd_mapping_desc { 107 struct nvdimm *nvdimm; 108 u64 start; 109 u64 size; 110 int position; 111 }; 112 113 struct nd_region; 114 struct nd_region_desc { 115 struct resource *res; 116 struct nd_mapping_desc *mapping; 117 u16 num_mappings; 118 const struct attribute_group **attr_groups; 119 struct nd_interleave_set *nd_set; 120 void *provider_data; 121 int num_lanes; 122 int numa_node; 123 int target_node; 124 unsigned long flags; 125 struct device_node *of_node; 126 int (*flush)(struct nd_region *nd_region, struct bio *bio); 127 }; 128 129 struct device; 130 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 131 size_t size, unsigned long flags); 132 static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, 133 resource_size_t offset, size_t size) 134 { 135 return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); 136 } 137 138 struct nvdimm_bus; 139 struct module; 140 struct device; 141 struct nd_blk_region; 142 struct nd_blk_region_desc { 143 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 144 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 145 void *iobuf, u64 len, int rw); 146 struct nd_region_desc ndr_desc; 147 }; 148 149 static inline struct nd_blk_region_desc *to_blk_region_desc( 150 struct nd_region_desc *ndr_desc) 151 { 152 return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); 153 154 } 155 156 /* 157 * Note that separate bits for locked + unlocked are defined so that 158 * 'flags == 0' corresponds to an error / not-supported state. 159 */ 160 enum nvdimm_security_bits { 161 NVDIMM_SECURITY_DISABLED, 162 NVDIMM_SECURITY_UNLOCKED, 163 NVDIMM_SECURITY_LOCKED, 164 NVDIMM_SECURITY_FROZEN, 165 NVDIMM_SECURITY_OVERWRITE, 166 }; 167 168 #define NVDIMM_PASSPHRASE_LEN 32 169 #define NVDIMM_KEY_DESC_LEN 22 170 171 struct nvdimm_key_data { 172 u8 data[NVDIMM_PASSPHRASE_LEN]; 173 }; 174 175 enum nvdimm_passphrase_type { 176 NVDIMM_USER, 177 NVDIMM_MASTER, 178 }; 179 180 struct nvdimm_security_ops { 181 unsigned long (*get_flags)(struct nvdimm *nvdimm, 182 enum nvdimm_passphrase_type pass_type); 183 int (*freeze)(struct nvdimm *nvdimm); 184 int (*change_key)(struct nvdimm *nvdimm, 185 const struct nvdimm_key_data *old_data, 186 const struct nvdimm_key_data *new_data, 187 enum nvdimm_passphrase_type pass_type); 188 int (*unlock)(struct nvdimm *nvdimm, 189 const struct nvdimm_key_data *key_data); 190 int (*disable)(struct nvdimm *nvdimm, 191 const struct nvdimm_key_data *key_data); 192 int (*erase)(struct nvdimm *nvdimm, 193 const struct nvdimm_key_data *key_data, 194 enum nvdimm_passphrase_type pass_type); 195 int (*overwrite)(struct nvdimm *nvdimm, 196 const struct nvdimm_key_data *key_data); 197 int (*query_overwrite)(struct nvdimm *nvdimm); 198 }; 199 200 void badrange_init(struct badrange *badrange); 201 int badrange_add(struct badrange *badrange, u64 addr, u64 length); 202 void badrange_forget(struct badrange *badrange, phys_addr_t start, 203 unsigned int len); 204 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, 205 u64 length); 206 struct nvdimm_bus *nvdimm_bus_register(struct device *parent, 207 struct nvdimm_bus_descriptor *nfit_desc); 208 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); 209 struct nvdimm_bus *to_nvdimm_bus(struct device *dev); 210 struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm); 211 struct nvdimm *to_nvdimm(struct device *dev); 212 struct nd_region *to_nd_region(struct device *dev); 213 struct device *nd_region_dev(struct nd_region *nd_region); 214 struct nd_blk_region *to_nd_blk_region(struct device *dev); 215 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); 216 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); 217 const char *nvdimm_name(struct nvdimm *nvdimm); 218 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); 219 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); 220 void *nvdimm_provider_data(struct nvdimm *nvdimm); 221 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, 222 void *provider_data, const struct attribute_group **groups, 223 unsigned long flags, unsigned long cmd_mask, int num_flush, 224 struct resource *flush_wpq, const char *dimm_id, 225 const struct nvdimm_security_ops *sec_ops); 226 static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, 227 void *provider_data, const struct attribute_group **groups, 228 unsigned long flags, unsigned long cmd_mask, int num_flush, 229 struct resource *flush_wpq) 230 { 231 return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, 232 cmd_mask, num_flush, flush_wpq, NULL, NULL); 233 } 234 235 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); 236 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); 237 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, 238 const struct nd_cmd_desc *desc, int idx, void *buf); 239 u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, 240 const struct nd_cmd_desc *desc, int idx, const u32 *in_field, 241 const u32 *out_field, unsigned long remainder); 242 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); 243 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, 244 struct nd_region_desc *ndr_desc); 245 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 246 struct nd_region_desc *ndr_desc); 247 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 248 struct nd_region_desc *ndr_desc); 249 void *nd_region_provider_data(struct nd_region *nd_region); 250 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); 251 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); 252 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); 253 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); 254 unsigned int nd_region_acquire_lane(struct nd_region *nd_region); 255 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); 256 u64 nd_fletcher64(void *addr, size_t len, bool le); 257 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio); 258 int generic_nvdimm_flush(struct nd_region *nd_region); 259 int nvdimm_has_flush(struct nd_region *nd_region); 260 int nvdimm_has_cache(struct nd_region *nd_region); 261 int nvdimm_in_overwrite(struct nvdimm *nvdimm); 262 bool is_nvdimm_sync(struct nd_region *nd_region); 263 264 static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf, 265 unsigned int buf_len, int *cmd_rc) 266 { 267 struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm); 268 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 269 270 return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc); 271 } 272 273 #ifdef CONFIG_ARCH_HAS_PMEM_API 274 #define ARCH_MEMREMAP_PMEM MEMREMAP_WB 275 void arch_wb_cache_pmem(void *addr, size_t size); 276 void arch_invalidate_pmem(void *addr, size_t size); 277 #else 278 #define ARCH_MEMREMAP_PMEM MEMREMAP_WT 279 static inline void arch_wb_cache_pmem(void *addr, size_t size) 280 { 281 } 282 static inline void arch_invalidate_pmem(void *addr, size_t size) 283 { 284 } 285 #endif 286 287 #endif /* __LIBNVDIMM_H__ */ 288