xref: /linux-6.15/include/linux/dax.h (revision e511c4a3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DAX_H
3 #define _LINUX_DAX_H
4 
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/radix-tree.h>
8 
9 typedef unsigned long dax_entry_t;
10 
11 struct dax_device;
12 struct gendisk;
13 struct iomap_ops;
14 struct iomap_iter;
15 struct iomap;
16 
17 enum dax_access_mode {
18 	DAX_ACCESS,
19 	DAX_RECOVERY_WRITE,
20 };
21 
22 struct dax_operations {
23 	/*
24 	 * direct_access: translate a device-relative
25 	 * logical-page-offset into an absolute physical pfn. Return the
26 	 * number of pages available for DAX at that pfn.
27 	 */
28 	long (*direct_access)(struct dax_device *, pgoff_t, long,
29 			enum dax_access_mode, void **, pfn_t *);
30 	/*
31 	 * Validate whether this device is usable as an fsdax backing
32 	 * device.
33 	 */
34 	bool (*dax_supported)(struct dax_device *, struct block_device *, int,
35 			sector_t, sector_t);
36 	/* zero_page_range: required operation. Zero page range   */
37 	int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
38 };
39 
40 #if IS_ENABLED(CONFIG_DAX)
41 struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
42 void put_dax(struct dax_device *dax_dev);
43 void kill_dax(struct dax_device *dax_dev);
44 void dax_write_cache(struct dax_device *dax_dev, bool wc);
45 bool dax_write_cache_enabled(struct dax_device *dax_dev);
46 bool dax_synchronous(struct dax_device *dax_dev);
47 void set_dax_synchronous(struct dax_device *dax_dev);
48 /*
49  * Check if given mapping is supported by the file / underlying device.
50  */
51 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
52 					     struct dax_device *dax_dev)
53 {
54 	if (!(vma->vm_flags & VM_SYNC))
55 		return true;
56 	if (!IS_DAX(file_inode(vma->vm_file)))
57 		return false;
58 	return dax_synchronous(dax_dev);
59 }
60 #else
61 static inline struct dax_device *alloc_dax(void *private,
62 		const struct dax_operations *ops)
63 {
64 	/*
65 	 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
66 	 * NULL is an error or expected.
67 	 */
68 	return NULL;
69 }
70 static inline void put_dax(struct dax_device *dax_dev)
71 {
72 }
73 static inline void kill_dax(struct dax_device *dax_dev)
74 {
75 }
76 static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
77 {
78 }
79 static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
80 {
81 	return false;
82 }
83 static inline bool dax_synchronous(struct dax_device *dax_dev)
84 {
85 	return true;
86 }
87 static inline void set_dax_synchronous(struct dax_device *dax_dev)
88 {
89 }
90 static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
91 				struct dax_device *dax_dev)
92 {
93 	return !(vma->vm_flags & VM_SYNC);
94 }
95 #endif
96 
97 void set_dax_nocache(struct dax_device *dax_dev);
98 void set_dax_nomc(struct dax_device *dax_dev);
99 
100 struct writeback_control;
101 #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
102 int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
103 void dax_remove_host(struct gendisk *disk);
104 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
105 		u64 *start_off);
106 static inline void fs_put_dax(struct dax_device *dax_dev)
107 {
108 	put_dax(dax_dev);
109 }
110 #else
111 static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
112 {
113 	return 0;
114 }
115 static inline void dax_remove_host(struct gendisk *disk)
116 {
117 }
118 static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
119 		u64 *start_off)
120 {
121 	return NULL;
122 }
123 static inline void fs_put_dax(struct dax_device *dax_dev)
124 {
125 }
126 #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
127 
128 #if IS_ENABLED(CONFIG_FS_DAX)
129 int dax_writeback_mapping_range(struct address_space *mapping,
130 		struct dax_device *dax_dev, struct writeback_control *wbc);
131 
132 struct page *dax_layout_busy_page(struct address_space *mapping);
133 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
134 dax_entry_t dax_lock_page(struct page *page);
135 void dax_unlock_page(struct page *page, dax_entry_t cookie);
136 #else
137 static inline struct page *dax_layout_busy_page(struct address_space *mapping)
138 {
139 	return NULL;
140 }
141 
142 static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
143 {
144 	return NULL;
145 }
146 
147 static inline int dax_writeback_mapping_range(struct address_space *mapping,
148 		struct dax_device *dax_dev, struct writeback_control *wbc)
149 {
150 	return -EOPNOTSUPP;
151 }
152 
153 static inline dax_entry_t dax_lock_page(struct page *page)
154 {
155 	if (IS_DAX(page->mapping->host))
156 		return ~0UL;
157 	return 0;
158 }
159 
160 static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
161 {
162 }
163 #endif
164 
165 int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
166 		const struct iomap_ops *ops);
167 int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
168 		const struct iomap_ops *ops);
169 
170 #if IS_ENABLED(CONFIG_DAX)
171 int dax_read_lock(void);
172 void dax_read_unlock(int id);
173 #else
174 static inline int dax_read_lock(void)
175 {
176 	return 0;
177 }
178 
179 static inline void dax_read_unlock(int id)
180 {
181 }
182 #endif /* CONFIG_DAX */
183 bool dax_alive(struct dax_device *dax_dev);
184 void *dax_get_private(struct dax_device *dax_dev);
185 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
186 		enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
187 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
188 		size_t bytes, struct iov_iter *i);
189 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
190 		size_t bytes, struct iov_iter *i);
191 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
192 			size_t nr_pages);
193 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
194 
195 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
196 		const struct iomap_ops *ops);
197 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
198 		    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
199 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
200 		enum page_entry_size pe_size, pfn_t pfn);
201 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
202 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
203 				      pgoff_t index);
204 static inline bool dax_mapping(struct address_space *mapping)
205 {
206 	return mapping->host && IS_DAX(mapping->host);
207 }
208 
209 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES
210 void hmem_register_device(int target_nid, struct resource *r);
211 #else
212 static inline void hmem_register_device(int target_nid, struct resource *r)
213 {
214 }
215 #endif
216 
217 #endif
218