1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 9 typedef unsigned long dax_entry_t; 10 11 struct dax_device; 12 struct gendisk; 13 struct iomap_ops; 14 struct iomap_iter; 15 struct iomap; 16 17 enum dax_access_mode { 18 DAX_ACCESS, 19 DAX_RECOVERY_WRITE, 20 }; 21 22 struct dax_operations { 23 /* 24 * direct_access: translate a device-relative 25 * logical-page-offset into an absolute physical pfn. Return the 26 * number of pages available for DAX at that pfn. 27 */ 28 long (*direct_access)(struct dax_device *, pgoff_t, long, 29 enum dax_access_mode, void **, pfn_t *); 30 /* 31 * Validate whether this device is usable as an fsdax backing 32 * device. 33 */ 34 bool (*dax_supported)(struct dax_device *, struct block_device *, int, 35 sector_t, sector_t); 36 /* zero_page_range: required operation. Zero page range */ 37 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 38 /* 39 * recovery_write: recover a poisoned range by DAX device driver 40 * capable of clearing poison. 41 */ 42 size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff, 43 void *addr, size_t bytes, struct iov_iter *iter); 44 }; 45 46 #if IS_ENABLED(CONFIG_DAX) 47 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); 48 void put_dax(struct dax_device *dax_dev); 49 void kill_dax(struct dax_device *dax_dev); 50 void dax_write_cache(struct dax_device *dax_dev, bool wc); 51 bool dax_write_cache_enabled(struct dax_device *dax_dev); 52 bool dax_synchronous(struct dax_device *dax_dev); 53 void set_dax_synchronous(struct dax_device *dax_dev); 54 size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, 55 void *addr, size_t bytes, struct iov_iter *i); 56 /* 57 * Check if given mapping is supported by the file / underlying device. 58 */ 59 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 60 struct dax_device *dax_dev) 61 { 62 if (!(vma->vm_flags & VM_SYNC)) 63 return true; 64 if (!IS_DAX(file_inode(vma->vm_file))) 65 return false; 66 return dax_synchronous(dax_dev); 67 } 68 #else 69 static inline struct dax_device *alloc_dax(void *private, 70 const struct dax_operations *ops) 71 { 72 /* 73 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 74 * NULL is an error or expected. 75 */ 76 return NULL; 77 } 78 static inline void put_dax(struct dax_device *dax_dev) 79 { 80 } 81 static inline void kill_dax(struct dax_device *dax_dev) 82 { 83 } 84 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 85 { 86 } 87 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 88 { 89 return false; 90 } 91 static inline bool dax_synchronous(struct dax_device *dax_dev) 92 { 93 return true; 94 } 95 static inline void set_dax_synchronous(struct dax_device *dax_dev) 96 { 97 } 98 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 99 struct dax_device *dax_dev) 100 { 101 return !(vma->vm_flags & VM_SYNC); 102 } 103 static inline size_t dax_recovery_write(struct dax_device *dax_dev, 104 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) 105 { 106 return 0; 107 } 108 #endif 109 110 void set_dax_nocache(struct dax_device *dax_dev); 111 void set_dax_nomc(struct dax_device *dax_dev); 112 113 struct writeback_control; 114 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) 115 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); 116 void dax_remove_host(struct gendisk *disk); 117 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, 118 u64 *start_off); 119 static inline void fs_put_dax(struct dax_device *dax_dev) 120 { 121 put_dax(dax_dev); 122 } 123 #else 124 static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) 125 { 126 return 0; 127 } 128 static inline void dax_remove_host(struct gendisk *disk) 129 { 130 } 131 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, 132 u64 *start_off) 133 { 134 return NULL; 135 } 136 static inline void fs_put_dax(struct dax_device *dax_dev) 137 { 138 } 139 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ 140 141 #if IS_ENABLED(CONFIG_FS_DAX) 142 int dax_writeback_mapping_range(struct address_space *mapping, 143 struct dax_device *dax_dev, struct writeback_control *wbc); 144 145 struct page *dax_layout_busy_page(struct address_space *mapping); 146 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 147 dax_entry_t dax_lock_page(struct page *page); 148 void dax_unlock_page(struct page *page, dax_entry_t cookie); 149 #else 150 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 151 { 152 return NULL; 153 } 154 155 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) 156 { 157 return NULL; 158 } 159 160 static inline int dax_writeback_mapping_range(struct address_space *mapping, 161 struct dax_device *dax_dev, struct writeback_control *wbc) 162 { 163 return -EOPNOTSUPP; 164 } 165 166 static inline dax_entry_t dax_lock_page(struct page *page) 167 { 168 if (IS_DAX(page->mapping->host)) 169 return ~0UL; 170 return 0; 171 } 172 173 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 174 { 175 } 176 #endif 177 178 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 179 const struct iomap_ops *ops); 180 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 181 const struct iomap_ops *ops); 182 183 #if IS_ENABLED(CONFIG_DAX) 184 int dax_read_lock(void); 185 void dax_read_unlock(int id); 186 #else 187 static inline int dax_read_lock(void) 188 { 189 return 0; 190 } 191 192 static inline void dax_read_unlock(int id) 193 { 194 } 195 #endif /* CONFIG_DAX */ 196 bool dax_alive(struct dax_device *dax_dev); 197 void *dax_get_private(struct dax_device *dax_dev); 198 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 199 enum dax_access_mode mode, void **kaddr, pfn_t *pfn); 200 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 201 size_t bytes, struct iov_iter *i); 202 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 203 size_t bytes, struct iov_iter *i); 204 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 205 size_t nr_pages); 206 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 207 208 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 209 const struct iomap_ops *ops); 210 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 211 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 212 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 213 enum page_entry_size pe_size, pfn_t pfn); 214 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 215 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 216 pgoff_t index); 217 static inline bool dax_mapping(struct address_space *mapping) 218 { 219 return mapping->host && IS_DAX(mapping->host); 220 } 221 222 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES 223 void hmem_register_device(int target_nid, struct resource *r); 224 #else 225 static inline void hmem_register_device(int target_nid, struct resource *r) 226 { 227 } 228 #endif 229 230 #endif 231