1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 #include <asm/pgtable.h> 9 10 /* Flag for synchronous flush */ 11 #define DAXDEV_F_SYNC (1UL << 0) 12 13 typedef unsigned long dax_entry_t; 14 15 struct iomap_ops; 16 struct iomap; 17 struct dax_device; 18 struct dax_operations { 19 /* 20 * direct_access: translate a device-relative 21 * logical-page-offset into an absolute physical pfn. Return the 22 * number of pages available for DAX at that pfn. 23 */ 24 long (*direct_access)(struct dax_device *, pgoff_t, long, 25 void **, pfn_t *); 26 /* 27 * Validate whether this device is usable as an fsdax backing 28 * device. 29 */ 30 bool (*dax_supported)(struct dax_device *, struct block_device *, int, 31 sector_t, sector_t); 32 /* copy_from_iter: required operation for fs-dax direct-i/o */ 33 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 34 struct iov_iter *); 35 /* copy_to_iter: required operation for fs-dax direct-i/o */ 36 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 37 struct iov_iter *); 38 /* zero_page_range: required operation. Zero page range */ 39 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 40 }; 41 42 extern struct attribute_group dax_attribute_group; 43 44 #if IS_ENABLED(CONFIG_DAX) 45 struct dax_device *dax_get_by_host(const char *host); 46 struct dax_device *alloc_dax(void *private, const char *host, 47 const struct dax_operations *ops, unsigned long flags); 48 void put_dax(struct dax_device *dax_dev); 49 void kill_dax(struct dax_device *dax_dev); 50 void dax_write_cache(struct dax_device *dax_dev, bool wc); 51 bool dax_write_cache_enabled(struct dax_device *dax_dev); 52 bool __dax_synchronous(struct dax_device *dax_dev); 53 static inline bool dax_synchronous(struct dax_device *dax_dev) 54 { 55 return __dax_synchronous(dax_dev); 56 } 57 void __set_dax_synchronous(struct dax_device *dax_dev); 58 static inline void set_dax_synchronous(struct dax_device *dax_dev) 59 { 60 __set_dax_synchronous(dax_dev); 61 } 62 /* 63 * Check if given mapping is supported by the file / underlying device. 64 */ 65 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 66 struct dax_device *dax_dev) 67 { 68 if (!(vma->vm_flags & VM_SYNC)) 69 return true; 70 if (!IS_DAX(file_inode(vma->vm_file))) 71 return false; 72 return dax_synchronous(dax_dev); 73 } 74 #else 75 static inline struct dax_device *dax_get_by_host(const char *host) 76 { 77 return NULL; 78 } 79 static inline struct dax_device *alloc_dax(void *private, const char *host, 80 const struct dax_operations *ops, unsigned long flags) 81 { 82 /* 83 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 84 * NULL is an error or expected. 85 */ 86 return NULL; 87 } 88 static inline void put_dax(struct dax_device *dax_dev) 89 { 90 } 91 static inline void kill_dax(struct dax_device *dax_dev) 92 { 93 } 94 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 95 { 96 } 97 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 98 { 99 return false; 100 } 101 static inline bool dax_synchronous(struct dax_device *dax_dev) 102 { 103 return true; 104 } 105 static inline void set_dax_synchronous(struct dax_device *dax_dev) 106 { 107 } 108 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 109 struct dax_device *dax_dev) 110 { 111 return !(vma->vm_flags & VM_SYNC); 112 } 113 #endif 114 115 struct writeback_control; 116 int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 117 #if IS_ENABLED(CONFIG_FS_DAX) 118 bool __bdev_dax_supported(struct block_device *bdev, int blocksize); 119 static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) 120 { 121 return __bdev_dax_supported(bdev, blocksize); 122 } 123 124 bool __generic_fsdax_supported(struct dax_device *dax_dev, 125 struct block_device *bdev, int blocksize, sector_t start, 126 sector_t sectors); 127 static inline bool generic_fsdax_supported(struct dax_device *dax_dev, 128 struct block_device *bdev, int blocksize, sector_t start, 129 sector_t sectors) 130 { 131 return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, 132 sectors); 133 } 134 135 static inline void fs_put_dax(struct dax_device *dax_dev) 136 { 137 put_dax(dax_dev); 138 } 139 140 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 141 int dax_writeback_mapping_range(struct address_space *mapping, 142 struct dax_device *dax_dev, struct writeback_control *wbc); 143 144 struct page *dax_layout_busy_page(struct address_space *mapping); 145 dax_entry_t dax_lock_page(struct page *page); 146 void dax_unlock_page(struct page *page, dax_entry_t cookie); 147 #else 148 static inline bool bdev_dax_supported(struct block_device *bdev, 149 int blocksize) 150 { 151 return false; 152 } 153 154 static inline bool generic_fsdax_supported(struct dax_device *dax_dev, 155 struct block_device *bdev, int blocksize, sector_t start, 156 sector_t sectors) 157 { 158 return false; 159 } 160 161 static inline void fs_put_dax(struct dax_device *dax_dev) 162 { 163 } 164 165 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 166 { 167 return NULL; 168 } 169 170 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 171 { 172 return NULL; 173 } 174 175 static inline int dax_writeback_mapping_range(struct address_space *mapping, 176 struct dax_device *dax_dev, struct writeback_control *wbc) 177 { 178 return -EOPNOTSUPP; 179 } 180 181 static inline dax_entry_t dax_lock_page(struct page *page) 182 { 183 if (IS_DAX(page->mapping->host)) 184 return ~0UL; 185 return 0; 186 } 187 188 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 189 { 190 } 191 #endif 192 193 int dax_read_lock(void); 194 void dax_read_unlock(int id); 195 bool dax_alive(struct dax_device *dax_dev); 196 void *dax_get_private(struct dax_device *dax_dev); 197 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 198 void **kaddr, pfn_t *pfn); 199 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 200 int blocksize, sector_t start, sector_t len); 201 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 202 size_t bytes, struct iov_iter *i); 203 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 204 size_t bytes, struct iov_iter *i); 205 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 206 size_t nr_pages); 207 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 208 209 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 210 const struct iomap_ops *ops); 211 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 212 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 213 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 214 enum page_entry_size pe_size, pfn_t pfn); 215 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 216 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 217 pgoff_t index); 218 int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, 219 struct iomap *iomap); 220 static inline bool dax_mapping(struct address_space *mapping) 221 { 222 return mapping->host && IS_DAX(mapping->host); 223 } 224 225 #endif 226