xref: /linux-6.15/include/linux/backing-dev.h (revision 27258e44)
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/writeback.h>
18 #include <asm/atomic.h>
19 
20 struct page;
21 struct device;
22 struct dentry;
23 
24 /*
25  * Bits in backing_dev_info.state
26  */
27 enum bdi_state {
28 	BDI_pending,		/* On its way to being activated */
29 	BDI_wb_alloc,		/* Default embedded wb allocated */
30 	BDI_async_congested,	/* The async (write) queue is getting full */
31 	BDI_sync_congested,	/* The sync queue is getting full */
32 	BDI_registered,		/* bdi_register() was done */
33 	BDI_unused,		/* Available bits start here */
34 };
35 
36 typedef int (congested_fn)(void *, int);
37 
38 enum bdi_stat_item {
39 	BDI_RECLAIMABLE,
40 	BDI_WRITEBACK,
41 	NR_BDI_STAT_ITEMS
42 };
43 
44 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45 
46 struct bdi_writeback {
47 	struct list_head list;			/* hangs off the bdi */
48 
49 	struct backing_dev_info *bdi;		/* our parent bdi */
50 	unsigned int nr;
51 
52 	unsigned long last_old_flush;		/* last old data flush */
53 
54 	struct task_struct	*task;		/* writeback task */
55 	struct list_head	b_dirty;	/* dirty inodes */
56 	struct list_head	b_io;		/* parked for writeback */
57 	struct list_head	b_more_io;	/* parked for more writeback */
58 };
59 
60 struct backing_dev_info {
61 	struct list_head bdi_list;
62 	struct rcu_head rcu_head;
63 	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
64 	unsigned long state;	/* Always use atomic bitops on this */
65 	unsigned int capabilities; /* Device capabilities */
66 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
67 	void *congested_data;	/* Pointer to aux data for congested func */
68 	void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
69 	void *unplug_io_data;
70 
71 	char *name;
72 
73 	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
74 
75 	struct prop_local_percpu completions;
76 	int dirty_exceeded;
77 
78 	unsigned int min_ratio;
79 	unsigned int max_ratio, max_prop_frac;
80 
81 	struct bdi_writeback wb;  /* default writeback info for this bdi */
82 	spinlock_t wb_lock;	  /* protects update side of wb_list */
83 	struct list_head wb_list; /* the flusher threads hanging off this bdi */
84 	unsigned long wb_mask;	  /* bitmask of registered tasks */
85 	unsigned int wb_cnt;	  /* number of registered tasks */
86 
87 	struct list_head work_list;
88 
89 	struct device *dev;
90 
91 #ifdef CONFIG_DEBUG_FS
92 	struct dentry *debug_dir;
93 	struct dentry *debug_stats;
94 #endif
95 };
96 
97 int bdi_init(struct backing_dev_info *bdi);
98 void bdi_destroy(struct backing_dev_info *bdi);
99 
100 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
101 		const char *fmt, ...);
102 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
103 void bdi_unregister(struct backing_dev_info *bdi);
104 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
105 int bdi_writeback_task(struct bdi_writeback *wb);
106 int bdi_has_dirty_io(struct backing_dev_info *bdi);
107 
108 extern spinlock_t bdi_lock;
109 extern struct list_head bdi_list;
110 
111 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
112 {
113 	return !list_empty(&wb->b_dirty) ||
114 	       !list_empty(&wb->b_io) ||
115 	       !list_empty(&wb->b_more_io);
116 }
117 
118 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
119 		enum bdi_stat_item item, s64 amount)
120 {
121 	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
122 }
123 
124 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
125 		enum bdi_stat_item item)
126 {
127 	__add_bdi_stat(bdi, item, 1);
128 }
129 
130 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
131 		enum bdi_stat_item item)
132 {
133 	unsigned long flags;
134 
135 	local_irq_save(flags);
136 	__inc_bdi_stat(bdi, item);
137 	local_irq_restore(flags);
138 }
139 
140 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
141 		enum bdi_stat_item item)
142 {
143 	__add_bdi_stat(bdi, item, -1);
144 }
145 
146 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
147 		enum bdi_stat_item item)
148 {
149 	unsigned long flags;
150 
151 	local_irq_save(flags);
152 	__dec_bdi_stat(bdi, item);
153 	local_irq_restore(flags);
154 }
155 
156 static inline s64 bdi_stat(struct backing_dev_info *bdi,
157 		enum bdi_stat_item item)
158 {
159 	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
160 }
161 
162 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
163 		enum bdi_stat_item item)
164 {
165 	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
166 }
167 
168 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
169 		enum bdi_stat_item item)
170 {
171 	s64 sum;
172 	unsigned long flags;
173 
174 	local_irq_save(flags);
175 	sum = __bdi_stat_sum(bdi, item);
176 	local_irq_restore(flags);
177 
178 	return sum;
179 }
180 
181 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
182 
183 /*
184  * maximal error of a stat counter.
185  */
186 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
187 {
188 #ifdef CONFIG_SMP
189 	return nr_cpu_ids * BDI_STAT_BATCH;
190 #else
191 	return 1;
192 #endif
193 }
194 
195 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
196 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
197 
198 /*
199  * Flags in backing_dev_info::capability
200  *
201  * The first three flags control whether dirty pages will contribute to the
202  * VM's accounting and whether writepages() should be called for dirty pages
203  * (something that would not, for example, be appropriate for ramfs)
204  *
205  * WARNING: these flags are closely related and should not normally be
206  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
207  * three flags into a single convenience macro.
208  *
209  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
210  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
211  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
212  *
213  * These flags let !MMU mmap() govern direct device mapping vs immediate
214  * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
215  *
216  * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
217  * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
218  * BDI_CAP_READ_MAP:       Can be mapped for reading
219  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
220  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
221  *
222  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
223  */
224 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
225 #define BDI_CAP_NO_WRITEBACK	0x00000002
226 #define BDI_CAP_MAP_COPY	0x00000004
227 #define BDI_CAP_MAP_DIRECT	0x00000008
228 #define BDI_CAP_READ_MAP	0x00000010
229 #define BDI_CAP_WRITE_MAP	0x00000020
230 #define BDI_CAP_EXEC_MAP	0x00000040
231 #define BDI_CAP_NO_ACCT_WB	0x00000080
232 #define BDI_CAP_SWAP_BACKED	0x00000100
233 
234 #define BDI_CAP_VMFLAGS \
235 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
236 
237 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
238 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
239 
240 #if defined(VM_MAYREAD) && \
241 	(BDI_CAP_READ_MAP != VM_MAYREAD || \
242 	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
243 	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
244 #error please change backing_dev_info::capabilities flags
245 #endif
246 
247 extern struct backing_dev_info default_backing_dev_info;
248 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
249 
250 int writeback_in_progress(struct backing_dev_info *bdi);
251 
252 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
253 {
254 	if (bdi->congested_fn)
255 		return bdi->congested_fn(bdi->congested_data, bdi_bits);
256 	return (bdi->state & bdi_bits);
257 }
258 
259 static inline int bdi_read_congested(struct backing_dev_info *bdi)
260 {
261 	return bdi_congested(bdi, 1 << BDI_sync_congested);
262 }
263 
264 static inline int bdi_write_congested(struct backing_dev_info *bdi)
265 {
266 	return bdi_congested(bdi, 1 << BDI_async_congested);
267 }
268 
269 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
270 {
271 	return bdi_congested(bdi, (1 << BDI_sync_congested) |
272 				  (1 << BDI_async_congested));
273 }
274 
275 enum {
276 	BLK_RW_ASYNC	= 0,
277 	BLK_RW_SYNC	= 1,
278 };
279 
280 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
281 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
282 long congestion_wait(int sync, long timeout);
283 
284 
285 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
286 {
287 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
288 }
289 
290 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
291 {
292 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
293 }
294 
295 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
296 {
297 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
298 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
299 				      BDI_CAP_NO_WRITEBACK));
300 }
301 
302 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
303 {
304 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
305 }
306 
307 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
308 {
309 	return bdi == &default_backing_dev_info;
310 }
311 
312 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
313 {
314 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
315 }
316 
317 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
318 {
319 	return bdi_cap_account_dirty(mapping->backing_dev_info);
320 }
321 
322 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
323 {
324 	return bdi_cap_swap_backed(mapping->backing_dev_info);
325 }
326 
327 static inline int bdi_sched_wait(void *word)
328 {
329 	schedule();
330 	return 0;
331 }
332 
333 #endif		/* _LINUX_BACKING_DEV_H */
334