1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 9 /* Flag for synchronous flush */ 10 #define DAXDEV_F_SYNC (1UL << 0) 11 12 typedef unsigned long dax_entry_t; 13 14 struct iomap_ops; 15 struct iomap; 16 struct dax_device; 17 struct dax_operations { 18 /* 19 * direct_access: translate a device-relative 20 * logical-page-offset into an absolute physical pfn. Return the 21 * number of pages available for DAX at that pfn. 22 */ 23 long (*direct_access)(struct dax_device *, pgoff_t, long, 24 void **, pfn_t *); 25 /* 26 * Validate whether this device is usable as an fsdax backing 27 * device. 28 */ 29 bool (*dax_supported)(struct dax_device *, struct block_device *, int, 30 sector_t, sector_t); 31 /* copy_from_iter: required operation for fs-dax direct-i/o */ 32 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 33 struct iov_iter *); 34 /* copy_to_iter: required operation for fs-dax direct-i/o */ 35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 36 struct iov_iter *); 37 /* zero_page_range: required operation. Zero page range */ 38 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 39 }; 40 41 #if IS_ENABLED(CONFIG_DAX) 42 struct dax_device *alloc_dax(void *private, const char *host, 43 const struct dax_operations *ops, unsigned long flags); 44 void put_dax(struct dax_device *dax_dev); 45 void kill_dax(struct dax_device *dax_dev); 46 void dax_write_cache(struct dax_device *dax_dev, bool wc); 47 bool dax_write_cache_enabled(struct dax_device *dax_dev); 48 bool __dax_synchronous(struct dax_device *dax_dev); 49 static inline bool dax_synchronous(struct dax_device *dax_dev) 50 { 51 return __dax_synchronous(dax_dev); 52 } 53 void __set_dax_synchronous(struct dax_device *dax_dev); 54 static inline void set_dax_synchronous(struct dax_device *dax_dev) 55 { 56 __set_dax_synchronous(dax_dev); 57 } 58 /* 59 * Check if given mapping is supported by the file / underlying device. 60 */ 61 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 62 struct dax_device *dax_dev) 63 { 64 if (!(vma->vm_flags & VM_SYNC)) 65 return true; 66 if (!IS_DAX(file_inode(vma->vm_file))) 67 return false; 68 return dax_synchronous(dax_dev); 69 } 70 #else 71 static inline struct dax_device *alloc_dax(void *private, const char *host, 72 const struct dax_operations *ops, unsigned long flags) 73 { 74 /* 75 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 76 * NULL is an error or expected. 77 */ 78 return NULL; 79 } 80 static inline void put_dax(struct dax_device *dax_dev) 81 { 82 } 83 static inline void kill_dax(struct dax_device *dax_dev) 84 { 85 } 86 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 87 { 88 } 89 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 90 { 91 return false; 92 } 93 static inline bool dax_synchronous(struct dax_device *dax_dev) 94 { 95 return true; 96 } 97 static inline void set_dax_synchronous(struct dax_device *dax_dev) 98 { 99 } 100 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 101 struct dax_device *dax_dev) 102 { 103 return !(vma->vm_flags & VM_SYNC); 104 } 105 #endif 106 107 struct writeback_control; 108 int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 109 #if IS_ENABLED(CONFIG_FS_DAX) 110 bool generic_fsdax_supported(struct dax_device *dax_dev, 111 struct block_device *bdev, int blocksize, sector_t start, 112 sector_t sectors); 113 114 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 115 int blocksize, sector_t start, sector_t len); 116 117 static inline void fs_put_dax(struct dax_device *dax_dev) 118 { 119 put_dax(dax_dev); 120 } 121 122 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 123 int dax_writeback_mapping_range(struct address_space *mapping, 124 struct dax_device *dax_dev, struct writeback_control *wbc); 125 126 struct page *dax_layout_busy_page(struct address_space *mapping); 127 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 128 dax_entry_t dax_lock_page(struct page *page); 129 void dax_unlock_page(struct page *page, dax_entry_t cookie); 130 #else 131 #define generic_fsdax_supported NULL 132 133 static inline bool dax_supported(struct dax_device *dax_dev, 134 struct block_device *bdev, int blocksize, sector_t start, 135 sector_t len) 136 { 137 return false; 138 } 139 140 static inline void fs_put_dax(struct dax_device *dax_dev) 141 { 142 } 143 144 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 145 { 146 return NULL; 147 } 148 149 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 150 { 151 return NULL; 152 } 153 154 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) 155 { 156 return NULL; 157 } 158 159 static inline int dax_writeback_mapping_range(struct address_space *mapping, 160 struct dax_device *dax_dev, struct writeback_control *wbc) 161 { 162 return -EOPNOTSUPP; 163 } 164 165 static inline dax_entry_t dax_lock_page(struct page *page) 166 { 167 if (IS_DAX(page->mapping->host)) 168 return ~0UL; 169 return 0; 170 } 171 172 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 173 { 174 } 175 #endif 176 177 #if IS_ENABLED(CONFIG_DAX) 178 int dax_read_lock(void); 179 void dax_read_unlock(int id); 180 #else 181 static inline int dax_read_lock(void) 182 { 183 return 0; 184 } 185 186 static inline void dax_read_unlock(int id) 187 { 188 } 189 #endif /* CONFIG_DAX */ 190 bool dax_alive(struct dax_device *dax_dev); 191 void *dax_get_private(struct dax_device *dax_dev); 192 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 193 void **kaddr, pfn_t *pfn); 194 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 195 size_t bytes, struct iov_iter *i); 196 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 197 size_t bytes, struct iov_iter *i); 198 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 199 size_t nr_pages); 200 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 201 202 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 203 const struct iomap_ops *ops); 204 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 205 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 206 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 207 enum page_entry_size pe_size, pfn_t pfn); 208 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 209 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 210 pgoff_t index); 211 s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap); 212 static inline bool dax_mapping(struct address_space *mapping) 213 { 214 return mapping->host && IS_DAX(mapping->host); 215 } 216 217 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES 218 void hmem_register_device(int target_nid, struct resource *r); 219 #else 220 static inline void hmem_register_device(int target_nid, struct resource *r) 221 { 222 } 223 #endif 224 225 #endif 226