xref: /linux-6.15/include/linux/dax.h (revision c6f40468)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DAX_H
3 #define _LINUX_DAX_H
4 
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/radix-tree.h>
8 
9 /* Flag for synchronous flush */
10 #define DAXDEV_F_SYNC (1UL << 0)
11 
12 typedef unsigned long dax_entry_t;
13 
14 struct dax_device;
15 struct gendisk;
16 struct iomap_ops;
17 struct iomap_iter;
18 struct iomap;
19 
20 struct dax_operations {
21 	/*
22 	 * direct_access: translate a device-relative
23 	 * logical-page-offset into an absolute physical pfn. Return the
24 	 * number of pages available for DAX at that pfn.
25 	 */
26 	long (*direct_access)(struct dax_device *, pgoff_t, long,
27 			void **, pfn_t *);
28 	/*
29 	 * Validate whether this device is usable as an fsdax backing
30 	 * device.
31 	 */
32 	bool (*dax_supported)(struct dax_device *, struct block_device *, int,
33 			sector_t, sector_t);
34 	/* copy_from_iter: required operation for fs-dax direct-i/o */
35 	size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
36 			struct iov_iter *);
37 	/* copy_to_iter: required operation for fs-dax direct-i/o */
38 	size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
39 			struct iov_iter *);
40 	/* zero_page_range: required operation. Zero page range   */
41 	int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
42 };
43 
44 #if IS_ENABLED(CONFIG_DAX)
45 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops,
46 		unsigned long flags);
47 void put_dax(struct dax_device *dax_dev);
48 void kill_dax(struct dax_device *dax_dev);
49 void dax_write_cache(struct dax_device *dax_dev, bool wc);
50 bool dax_write_cache_enabled(struct dax_device *dax_dev);
51 bool __dax_synchronous(struct dax_device *dax_dev);
52 static inline bool dax_synchronous(struct dax_device *dax_dev)
53 {
54 	return  __dax_synchronous(dax_dev);
55 }
56 void __set_dax_synchronous(struct dax_device *dax_dev);
57 static inline void set_dax_synchronous(struct dax_device *dax_dev)
58 {
59 	__set_dax_synchronous(dax_dev);
60 }
61 /*
62  * Check if given mapping is supported by the file / underlying device.
63  */
64 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
65 					     struct dax_device *dax_dev)
66 {
67 	if (!(vma->vm_flags & VM_SYNC))
68 		return true;
69 	if (!IS_DAX(file_inode(vma->vm_file)))
70 		return false;
71 	return dax_synchronous(dax_dev);
72 }
73 #else
74 static inline struct dax_device *alloc_dax(void *private,
75 		const struct dax_operations *ops, unsigned long flags)
76 {
77 	/*
78 	 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
79 	 * NULL is an error or expected.
80 	 */
81 	return NULL;
82 }
83 static inline void put_dax(struct dax_device *dax_dev)
84 {
85 }
86 static inline void kill_dax(struct dax_device *dax_dev)
87 {
88 }
89 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
90 {
91 }
92 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
93 {
94 	return false;
95 }
96 static inline bool dax_synchronous(struct dax_device *dax_dev)
97 {
98 	return true;
99 }
100 static inline void set_dax_synchronous(struct dax_device *dax_dev)
101 {
102 }
103 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
104 				struct dax_device *dax_dev)
105 {
106 	return !(vma->vm_flags & VM_SYNC);
107 }
108 #endif
109 
110 struct writeback_control;
111 #if IS_ENABLED(CONFIG_FS_DAX)
112 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
113 void dax_remove_host(struct gendisk *disk);
114 
115 static inline void fs_put_dax(struct dax_device *dax_dev)
116 {
117 	put_dax(dax_dev);
118 }
119 
120 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
121 int dax_writeback_mapping_range(struct address_space *mapping,
122 		struct dax_device *dax_dev, struct writeback_control *wbc);
123 
124 struct page *dax_layout_busy_page(struct address_space *mapping);
125 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
126 dax_entry_t dax_lock_page(struct page *page);
127 void dax_unlock_page(struct page *page, dax_entry_t cookie);
128 #else
129 static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
130 {
131 	return 0;
132 }
133 static inline void dax_remove_host(struct gendisk *disk)
134 {
135 }
136 
137 static inline void fs_put_dax(struct dax_device *dax_dev)
138 {
139 }
140 
141 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
142 {
143 	return NULL;
144 }
145 
146 static inline struct page *dax_layout_busy_page(struct address_space *mapping)
147 {
148 	return NULL;
149 }
150 
151 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
152 {
153 	return NULL;
154 }
155 
156 static inline int dax_writeback_mapping_range(struct address_space *mapping,
157 		struct dax_device *dax_dev, struct writeback_control *wbc)
158 {
159 	return -EOPNOTSUPP;
160 }
161 
162 static inline dax_entry_t dax_lock_page(struct page *page)
163 {
164 	if (IS_DAX(page->mapping->host))
165 		return ~0UL;
166 	return 0;
167 }
168 
169 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
170 {
171 }
172 #endif
173 
174 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
175 		const struct iomap_ops *ops);
176 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
177 		const struct iomap_ops *ops);
178 
179 #if IS_ENABLED(CONFIG_DAX)
180 int dax_read_lock(void);
181 void dax_read_unlock(int id);
182 #else
183 static inline int dax_read_lock(void)
184 {
185 	return 0;
186 }
187 
188 static inline void dax_read_unlock(int id)
189 {
190 }
191 #endif /* CONFIG_DAX */
192 bool dax_alive(struct dax_device *dax_dev);
193 void *dax_get_private(struct dax_device *dax_dev);
194 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
195 		void **kaddr, pfn_t *pfn);
196 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
197 		size_t bytes, struct iov_iter *i);
198 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
199 		size_t bytes, struct iov_iter *i);
200 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
201 			size_t nr_pages);
202 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
203 
204 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
205 		const struct iomap_ops *ops);
206 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
207 		    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
208 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
209 		enum page_entry_size pe_size, pfn_t pfn);
210 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
211 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
212 				      pgoff_t index);
213 static inline bool dax_mapping(struct address_space *mapping)
214 {
215 	return mapping->host && IS_DAX(mapping->host);
216 }
217 
218 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES
219 void hmem_register_device(int target_nid, struct resource *r);
220 #else
221 static inline void hmem_register_device(int target_nid, struct resource *r)
222 {
223 }
224 #endif
225 
226 #endif
227