1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DAX_H 3 #define _LINUX_DAX_H 4 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/radix-tree.h> 8 #include <asm/pgtable.h> 9 10 struct iomap_ops; 11 struct dax_device; 12 struct dax_operations { 13 /* 14 * direct_access: translate a device-relative 15 * logical-page-offset into an absolute physical pfn. Return the 16 * number of pages available for DAX at that pfn. 17 */ 18 long (*direct_access)(struct dax_device *, pgoff_t, long, 19 void **, pfn_t *); 20 /* copy_from_iter: required operation for fs-dax direct-i/o */ 21 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 22 struct iov_iter *); 23 /* copy_to_iter: required operation for fs-dax direct-i/o */ 24 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 25 struct iov_iter *); 26 }; 27 28 extern struct attribute_group dax_attribute_group; 29 30 #if IS_ENABLED(CONFIG_DAX) 31 struct dax_device *dax_get_by_host(const char *host); 32 struct dax_device *alloc_dax(void *private, const char *host, 33 const struct dax_operations *ops); 34 void put_dax(struct dax_device *dax_dev); 35 void kill_dax(struct dax_device *dax_dev); 36 void dax_write_cache(struct dax_device *dax_dev, bool wc); 37 bool dax_write_cache_enabled(struct dax_device *dax_dev); 38 #else 39 static inline struct dax_device *dax_get_by_host(const char *host) 40 { 41 return NULL; 42 } 43 static inline struct dax_device *alloc_dax(void *private, const char *host, 44 const struct dax_operations *ops) 45 { 46 /* 47 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 48 * NULL is an error or expected. 49 */ 50 return NULL; 51 } 52 static inline void put_dax(struct dax_device *dax_dev) 53 { 54 } 55 static inline void kill_dax(struct dax_device *dax_dev) 56 { 57 } 58 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 59 { 60 } 61 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 62 { 63 return false; 64 } 65 #endif 66 67 struct writeback_control; 68 int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 69 #if IS_ENABLED(CONFIG_FS_DAX) 70 bool __bdev_dax_supported(struct block_device *bdev, int blocksize); 71 static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) 72 { 73 return __bdev_dax_supported(bdev, blocksize); 74 } 75 76 static inline struct dax_device *fs_dax_get_by_host(const char *host) 77 { 78 return dax_get_by_host(host); 79 } 80 81 static inline void fs_put_dax(struct dax_device *dax_dev) 82 { 83 put_dax(dax_dev); 84 } 85 86 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 87 int dax_writeback_mapping_range(struct address_space *mapping, 88 struct block_device *bdev, struct writeback_control *wbc); 89 90 struct page *dax_layout_busy_page(struct address_space *mapping); 91 #else 92 static inline bool bdev_dax_supported(struct block_device *bdev, 93 int blocksize) 94 { 95 return false; 96 } 97 98 static inline struct dax_device *fs_dax_get_by_host(const char *host) 99 { 100 return NULL; 101 } 102 103 static inline void fs_put_dax(struct dax_device *dax_dev) 104 { 105 } 106 107 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 108 { 109 return NULL; 110 } 111 112 static inline struct page *dax_layout_busy_page(struct address_space *mapping) 113 { 114 return NULL; 115 } 116 117 static inline int dax_writeback_mapping_range(struct address_space *mapping, 118 struct block_device *bdev, struct writeback_control *wbc) 119 { 120 return -EOPNOTSUPP; 121 } 122 #endif 123 124 int dax_read_lock(void); 125 void dax_read_unlock(int id); 126 bool dax_alive(struct dax_device *dax_dev); 127 void *dax_get_private(struct dax_device *dax_dev); 128 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 129 void **kaddr, pfn_t *pfn); 130 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 131 size_t bytes, struct iov_iter *i); 132 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 133 size_t bytes, struct iov_iter *i); 134 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 135 136 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 137 const struct iomap_ops *ops); 138 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 139 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 140 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 141 enum page_entry_size pe_size, pfn_t pfn); 142 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 143 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 144 pgoff_t index); 145 146 #ifdef CONFIG_FS_DAX 147 int __dax_zero_page_range(struct block_device *bdev, 148 struct dax_device *dax_dev, sector_t sector, 149 unsigned int offset, unsigned int length); 150 #else 151 static inline int __dax_zero_page_range(struct block_device *bdev, 152 struct dax_device *dax_dev, sector_t sector, 153 unsigned int offset, unsigned int length) 154 { 155 return -ENXIO; 156 } 157 #endif 158 159 static inline bool dax_mapping(struct address_space *mapping) 160 { 161 return mapping->host && IS_DAX(mapping->host); 162 } 163 164 #endif 165