xref: /linux-6.15/include/linux/backing-dev.h (revision a1087ef6)
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <asm/atomic.h>
20 
21 struct page;
22 struct device;
23 struct dentry;
24 
25 /*
26  * Bits in backing_dev_info.state
27  */
28 enum bdi_state {
29 	BDI_pending,		/* On its way to being activated */
30 	BDI_wb_alloc,		/* Default embedded wb allocated */
31 	BDI_async_congested,	/* The async (write) queue is getting full */
32 	BDI_sync_congested,	/* The sync queue is getting full */
33 	BDI_registered,		/* bdi_register() was done */
34 	BDI_writeback_running,	/* Writeback is in progress */
35 	BDI_unused,		/* Available bits start here */
36 };
37 
38 typedef int (congested_fn)(void *, int);
39 
40 enum bdi_stat_item {
41 	BDI_RECLAIMABLE,
42 	BDI_WRITEBACK,
43 	NR_BDI_STAT_ITEMS
44 };
45 
46 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
47 
48 struct bdi_writeback {
49 	struct backing_dev_info *bdi;	/* our parent bdi */
50 	unsigned int nr;
51 
52 	unsigned long last_old_flush;	/* last old data flush */
53 	unsigned long last_active;	/* last time bdi thread was active */
54 
55 	struct task_struct *task;	/* writeback thread */
56 	struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
57 	struct list_head b_dirty;	/* dirty inodes */
58 	struct list_head b_io;		/* parked for writeback */
59 	struct list_head b_more_io;	/* parked for more writeback */
60 };
61 
62 struct backing_dev_info {
63 	struct list_head bdi_list;
64 	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
65 	unsigned long state;	/* Always use atomic bitops on this */
66 	unsigned int capabilities; /* Device capabilities */
67 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 	void *congested_data;	/* Pointer to aux data for congested func */
69 	void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
70 	void *unplug_io_data;
71 
72 	char *name;
73 
74 	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
75 
76 	struct prop_local_percpu completions;
77 	int dirty_exceeded;
78 
79 	unsigned int min_ratio;
80 	unsigned int max_ratio, max_prop_frac;
81 
82 	struct bdi_writeback wb;  /* default writeback info for this bdi */
83 	spinlock_t wb_lock;	  /* protects work_list */
84 
85 	struct list_head work_list;
86 
87 	struct device *dev;
88 
89 	struct timer_list laptop_mode_wb_timer;
90 
91 #ifdef CONFIG_DEBUG_FS
92 	struct dentry *debug_dir;
93 	struct dentry *debug_stats;
94 #endif
95 };
96 
97 int bdi_init(struct backing_dev_info *bdi);
98 void bdi_destroy(struct backing_dev_info *bdi);
99 
100 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
101 		const char *fmt, ...);
102 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
103 void bdi_unregister(struct backing_dev_info *bdi);
104 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
105 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
106 void bdi_start_background_writeback(struct backing_dev_info *bdi);
107 int bdi_writeback_thread(void *data);
108 int bdi_has_dirty_io(struct backing_dev_info *bdi);
109 void bdi_arm_supers_timer(void);
110 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
111 
112 extern spinlock_t bdi_lock;
113 extern struct list_head bdi_list;
114 extern struct list_head bdi_pending_list;
115 
116 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
117 {
118 	return !list_empty(&wb->b_dirty) ||
119 	       !list_empty(&wb->b_io) ||
120 	       !list_empty(&wb->b_more_io);
121 }
122 
123 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
124 		enum bdi_stat_item item, s64 amount)
125 {
126 	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
127 }
128 
129 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
130 		enum bdi_stat_item item)
131 {
132 	__add_bdi_stat(bdi, item, 1);
133 }
134 
135 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
136 		enum bdi_stat_item item)
137 {
138 	unsigned long flags;
139 
140 	local_irq_save(flags);
141 	__inc_bdi_stat(bdi, item);
142 	local_irq_restore(flags);
143 }
144 
145 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
146 		enum bdi_stat_item item)
147 {
148 	__add_bdi_stat(bdi, item, -1);
149 }
150 
151 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
152 		enum bdi_stat_item item)
153 {
154 	unsigned long flags;
155 
156 	local_irq_save(flags);
157 	__dec_bdi_stat(bdi, item);
158 	local_irq_restore(flags);
159 }
160 
161 static inline s64 bdi_stat(struct backing_dev_info *bdi,
162 		enum bdi_stat_item item)
163 {
164 	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
165 }
166 
167 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
168 		enum bdi_stat_item item)
169 {
170 	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
171 }
172 
173 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
174 		enum bdi_stat_item item)
175 {
176 	s64 sum;
177 	unsigned long flags;
178 
179 	local_irq_save(flags);
180 	sum = __bdi_stat_sum(bdi, item);
181 	local_irq_restore(flags);
182 
183 	return sum;
184 }
185 
186 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
187 
188 /*
189  * maximal error of a stat counter.
190  */
191 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
192 {
193 #ifdef CONFIG_SMP
194 	return nr_cpu_ids * BDI_STAT_BATCH;
195 #else
196 	return 1;
197 #endif
198 }
199 
200 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
201 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
202 
203 /*
204  * Flags in backing_dev_info::capability
205  *
206  * The first three flags control whether dirty pages will contribute to the
207  * VM's accounting and whether writepages() should be called for dirty pages
208  * (something that would not, for example, be appropriate for ramfs)
209  *
210  * WARNING: these flags are closely related and should not normally be
211  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
212  * three flags into a single convenience macro.
213  *
214  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
215  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
216  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
217  *
218  * These flags let !MMU mmap() govern direct device mapping vs immediate
219  * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
220  *
221  * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
222  * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
223  * BDI_CAP_READ_MAP:       Can be mapped for reading
224  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
225  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
226  *
227  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
228  */
229 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
230 #define BDI_CAP_NO_WRITEBACK	0x00000002
231 #define BDI_CAP_MAP_COPY	0x00000004
232 #define BDI_CAP_MAP_DIRECT	0x00000008
233 #define BDI_CAP_READ_MAP	0x00000010
234 #define BDI_CAP_WRITE_MAP	0x00000020
235 #define BDI_CAP_EXEC_MAP	0x00000040
236 #define BDI_CAP_NO_ACCT_WB	0x00000080
237 #define BDI_CAP_SWAP_BACKED	0x00000100
238 
239 #define BDI_CAP_VMFLAGS \
240 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
241 
242 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
243 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
244 
245 #if defined(VM_MAYREAD) && \
246 	(BDI_CAP_READ_MAP != VM_MAYREAD || \
247 	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
248 	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
249 #error please change backing_dev_info::capabilities flags
250 #endif
251 
252 extern struct backing_dev_info default_backing_dev_info;
253 extern struct backing_dev_info noop_backing_dev_info;
254 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
255 
256 int writeback_in_progress(struct backing_dev_info *bdi);
257 
258 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
259 {
260 	if (bdi->congested_fn)
261 		return bdi->congested_fn(bdi->congested_data, bdi_bits);
262 	return (bdi->state & bdi_bits);
263 }
264 
265 static inline int bdi_read_congested(struct backing_dev_info *bdi)
266 {
267 	return bdi_congested(bdi, 1 << BDI_sync_congested);
268 }
269 
270 static inline int bdi_write_congested(struct backing_dev_info *bdi)
271 {
272 	return bdi_congested(bdi, 1 << BDI_async_congested);
273 }
274 
275 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
276 {
277 	return bdi_congested(bdi, (1 << BDI_sync_congested) |
278 				  (1 << BDI_async_congested));
279 }
280 
281 enum {
282 	BLK_RW_ASYNC	= 0,
283 	BLK_RW_SYNC	= 1,
284 };
285 
286 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
287 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
288 long congestion_wait(int sync, long timeout);
289 long wait_iff_congested(struct zone *zone, int sync, long timeout);
290 
291 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
292 {
293 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
294 }
295 
296 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
297 {
298 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
299 }
300 
301 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
302 {
303 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
304 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
305 				      BDI_CAP_NO_WRITEBACK));
306 }
307 
308 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
309 {
310 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
311 }
312 
313 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
314 {
315 	return bdi == &default_backing_dev_info;
316 }
317 
318 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
319 {
320 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
321 }
322 
323 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
324 {
325 	return bdi_cap_account_dirty(mapping->backing_dev_info);
326 }
327 
328 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
329 {
330 	return bdi_cap_swap_backed(mapping->backing_dev_info);
331 }
332 
333 static inline int bdi_sched_wait(void *word)
334 {
335 	schedule();
336 	return 0;
337 }
338 
339 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
340 				       struct page *page)
341 {
342 	if (bdi && bdi->unplug_io_fn)
343 		bdi->unplug_io_fn(bdi, page);
344 }
345 
346 static inline void blk_run_address_space(struct address_space *mapping)
347 {
348 	if (mapping)
349 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
350 }
351 
352 #endif		/* _LINUX_BACKING_DEV_H */
353