1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 #include <asm/pgtable.h> 9 10 /* Flag for synchronous flush */ 11 #define DAXDEV_F_SYNC (1UL << 0) 12 13 typedef unsigned long dax_entry_t; 14 15 struct iomap_ops; 16 struct dax_device; 17 struct dax_operations { 18 /* 19 * direct_access: translate a device-relative 20 * logical-page-offset into an absolute physical pfn. Return the 21 * number of pages available for DAX at that pfn. 22 */ 23 long (*direct_access)(struct dax_device *, pgoff_t, long, 24 void **, pfn_t *); 25 /* 26 * Validate whether this device is usable as an fsdax backing 27 * device. 28 */ 29 bool (*dax_supported)(struct dax_device *, struct block_device *, int, 30 sector_t, sector_t); 31 /* copy_from_iter: required operation for fs-dax direct-i/o */ 32 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 33 struct iov_iter *); 34 /* copy_to_iter: required operation for fs-dax direct-i/o */ 35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 36 struct iov_iter *); 37 }; 38 39 extern struct attribute_group dax_attribute_group; 40 41 #if IS_ENABLED(CONFIG_DAX) 42 struct dax_device *dax_get_by_host(const char *host); 43 struct dax_device *alloc_dax(void *private, const char *host, 44 const struct dax_operations *ops, unsigned long flags); 45 void put_dax(struct dax_device *dax_dev); 46 void kill_dax(struct dax_device *dax_dev); 47 void dax_write_cache(struct dax_device *dax_dev, bool wc); 48 bool dax_write_cache_enabled(struct dax_device *dax_dev); 49 bool __dax_synchronous(struct dax_device *dax_dev); 50 static inline bool dax_synchronous(struct dax_device *dax_dev) 51 { 52 return __dax_synchronous(dax_dev); 53 } 54 void __set_dax_synchronous(struct dax_device *dax_dev); 55 static inline void set_dax_synchronous(struct dax_device *dax_dev) 56 { 57 __set_dax_synchronous(dax_dev); 58 } 59 /* 60 * Check if given mapping is supported by the file / underlying device. 61 */ 62 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 63 struct dax_device *dax_dev) 64 { 65 if (!(vma->vm_flags & VM_SYNC)) 66 return true; 67 if (!IS_DAX(file_inode(vma->vm_file))) 68 return false; 69 return dax_synchronous(dax_dev); 70 } 71 #else 72 static inline struct dax_device *dax_get_by_host(const char *host) 73 { 74 return NULL; 75 } 76 static inline struct dax_device *alloc_dax(void *private, const char *host, 77 const struct dax_operations *ops, unsigned long flags) 78 { 79 /* 80 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 81 * NULL is an error or expected. 82 */ 83 return NULL; 84 } 85 static inline void put_dax(struct dax_device *dax_dev) 86 { 87 } 88 static inline void kill_dax(struct dax_device *dax_dev) 89 { 90 } 91 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 92 { 93 } 94 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 95 { 96 return false; 97 } 98 static inline bool dax_synchronous(struct dax_device *dax_dev) 99 { 100 return true; 101 } 102 static inline void set_dax_synchronous(struct dax_device *dax_dev) 103 { 104 } 105 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 106 struct dax_device *dax_dev) 107 { 108 return !(vma->vm_flags & VM_SYNC); 109 } 110 #endif 111 112 struct writeback_control; 113 int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 114 #if IS_ENABLED(CONFIG_FS_DAX) 115 bool __bdev_dax_supported(struct block_device *bdev, int blocksize); 116 static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) 117 { 118 return __bdev_dax_supported(bdev, blocksize); 119 } 120 121 bool __generic_fsdax_supported(struct dax_device *dax_dev, 122 struct block_device *bdev, int blocksize, sector_t start, 123 sector_t sectors); 124 static inline bool generic_fsdax_supported(struct dax_device *dax_dev, 125 struct block_device *bdev, int blocksize, sector_t start, 126 sector_t sectors) 127 { 128 return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, 129 sectors); 130 } 131 132 static inline void fs_put_dax(struct dax_device *dax_dev) 133 { 134 put_dax(dax_dev); 135 } 136 137 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 138 int dax_writeback_mapping_range(struct address_space *mapping, 139 struct dax_device *dax_dev, struct writeback_control *wbc); 140 141 struct page *dax_layout_busy_page(struct address_space *mapping); 142 dax_entry_t dax_lock_page(struct page *page); 143 void dax_unlock_page(struct page *page, dax_entry_t cookie); 144 #else 145 static inline bool bdev_dax_supported(struct block_device *bdev, 146 int blocksize) 147 { 148 return false; 149 } 150 151 static inline bool generic_fsdax_supported(struct dax_device *dax_dev, 152 struct block_device *bdev, int blocksize, sector_t start, 153 sector_t sectors) 154 { 155 return false; 156 } 157 158 static inline void fs_put_dax(struct dax_device *dax_dev) 159 { 160 } 161 162 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 163 { 164 return NULL; 165 } 166 167 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 168 { 169 return NULL; 170 } 171 172 static inline int dax_writeback_mapping_range(struct address_space *mapping, 173 struct dax_device *dax_dev, struct writeback_control *wbc) 174 { 175 return -EOPNOTSUPP; 176 } 177 178 static inline dax_entry_t dax_lock_page(struct page *page) 179 { 180 if (IS_DAX(page->mapping->host)) 181 return ~0UL; 182 return 0; 183 } 184 185 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 186 { 187 } 188 #endif 189 190 int dax_read_lock(void); 191 void dax_read_unlock(int id); 192 bool dax_alive(struct dax_device *dax_dev); 193 void *dax_get_private(struct dax_device *dax_dev); 194 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 195 void **kaddr, pfn_t *pfn); 196 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 197 int blocksize, sector_t start, sector_t len); 198 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 199 size_t bytes, struct iov_iter *i); 200 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 201 size_t bytes, struct iov_iter *i); 202 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 203 204 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 205 const struct iomap_ops *ops); 206 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 207 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 208 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 209 enum page_entry_size pe_size, pfn_t pfn); 210 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 211 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 212 pgoff_t index); 213 214 #ifdef CONFIG_FS_DAX 215 int __dax_zero_page_range(struct block_device *bdev, 216 struct dax_device *dax_dev, sector_t sector, 217 unsigned int offset, unsigned int length); 218 #else 219 static inline int __dax_zero_page_range(struct block_device *bdev, 220 struct dax_device *dax_dev, sector_t sector, 221 unsigned int offset, unsigned int length) 222 { 223 return -ENXIO; 224 } 225 #endif 226 227 static inline bool dax_mapping(struct address_space *mapping) 228 { 229 return mapping->host && IS_DAX(mapping->host); 230 } 231 232 #endif 233