xref: /linux-6.15/include/linux/backing-dev.h (revision df92b408)
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <asm/atomic.h>
20 
21 struct page;
22 struct device;
23 struct dentry;
24 
25 /*
26  * Bits in backing_dev_info.state
27  */
28 enum bdi_state {
29 	BDI_pending,		/* On its way to being activated */
30 	BDI_wb_alloc,		/* Default embedded wb allocated */
31 	BDI_async_congested,	/* The async (write) queue is getting full */
32 	BDI_sync_congested,	/* The sync queue is getting full */
33 	BDI_registered,		/* bdi_register() was done */
34 	BDI_unused,		/* Available bits start here */
35 };
36 
37 typedef int (congested_fn)(void *, int);
38 
39 enum bdi_stat_item {
40 	BDI_RECLAIMABLE,
41 	BDI_WRITEBACK,
42 	NR_BDI_STAT_ITEMS
43 };
44 
45 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46 
47 struct bdi_writeback {
48 	struct list_head list;			/* hangs off the bdi */
49 
50 	struct backing_dev_info *bdi;		/* our parent bdi */
51 	unsigned int nr;
52 
53 	unsigned long last_old_flush;		/* last old data flush */
54 
55 	struct task_struct	*task;		/* writeback task */
56 	struct list_head	b_dirty;	/* dirty inodes */
57 	struct list_head	b_io;		/* parked for writeback */
58 	struct list_head	b_more_io;	/* parked for more writeback */
59 };
60 
61 struct backing_dev_info {
62 	struct list_head bdi_list;
63 	struct rcu_head rcu_head;
64 	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
65 	unsigned long state;	/* Always use atomic bitops on this */
66 	unsigned int capabilities; /* Device capabilities */
67 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 	void *congested_data;	/* Pointer to aux data for congested func */
69 	void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
70 	void *unplug_io_data;
71 
72 	char *name;
73 
74 	struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
75 
76 	struct prop_local_percpu completions;
77 	int dirty_exceeded;
78 
79 	unsigned int min_ratio;
80 	unsigned int max_ratio, max_prop_frac;
81 
82 	struct bdi_writeback wb;  /* default writeback info for this bdi */
83 	spinlock_t wb_lock;	  /* protects update side of wb_list */
84 	struct list_head wb_list; /* the flusher threads hanging off this bdi */
85 
86 	struct list_head work_list;
87 
88 	struct device *dev;
89 
90 	struct timer_list laptop_mode_wb_timer;
91 
92 #ifdef CONFIG_DEBUG_FS
93 	struct dentry *debug_dir;
94 	struct dentry *debug_stats;
95 #endif
96 };
97 
98 int bdi_init(struct backing_dev_info *bdi);
99 void bdi_destroy(struct backing_dev_info *bdi);
100 
101 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
102 		const char *fmt, ...);
103 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
104 void bdi_unregister(struct backing_dev_info *bdi);
105 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
106 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
107 void bdi_start_background_writeback(struct backing_dev_info *bdi);
108 int bdi_writeback_task(struct bdi_writeback *wb);
109 int bdi_has_dirty_io(struct backing_dev_info *bdi);
110 void bdi_arm_supers_timer(void);
111 
112 extern spinlock_t bdi_lock;
113 extern struct list_head bdi_list;
114 
115 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
116 {
117 	return !list_empty(&wb->b_dirty) ||
118 	       !list_empty(&wb->b_io) ||
119 	       !list_empty(&wb->b_more_io);
120 }
121 
122 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
123 		enum bdi_stat_item item, s64 amount)
124 {
125 	__percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
126 }
127 
128 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
129 		enum bdi_stat_item item)
130 {
131 	__add_bdi_stat(bdi, item, 1);
132 }
133 
134 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
135 		enum bdi_stat_item item)
136 {
137 	unsigned long flags;
138 
139 	local_irq_save(flags);
140 	__inc_bdi_stat(bdi, item);
141 	local_irq_restore(flags);
142 }
143 
144 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
145 		enum bdi_stat_item item)
146 {
147 	__add_bdi_stat(bdi, item, -1);
148 }
149 
150 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
151 		enum bdi_stat_item item)
152 {
153 	unsigned long flags;
154 
155 	local_irq_save(flags);
156 	__dec_bdi_stat(bdi, item);
157 	local_irq_restore(flags);
158 }
159 
160 static inline s64 bdi_stat(struct backing_dev_info *bdi,
161 		enum bdi_stat_item item)
162 {
163 	return percpu_counter_read_positive(&bdi->bdi_stat[item]);
164 }
165 
166 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
167 		enum bdi_stat_item item)
168 {
169 	return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
170 }
171 
172 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
173 		enum bdi_stat_item item)
174 {
175 	s64 sum;
176 	unsigned long flags;
177 
178 	local_irq_save(flags);
179 	sum = __bdi_stat_sum(bdi, item);
180 	local_irq_restore(flags);
181 
182 	return sum;
183 }
184 
185 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
186 
187 /*
188  * maximal error of a stat counter.
189  */
190 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
191 {
192 #ifdef CONFIG_SMP
193 	return nr_cpu_ids * BDI_STAT_BATCH;
194 #else
195 	return 1;
196 #endif
197 }
198 
199 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
200 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
201 
202 /*
203  * Flags in backing_dev_info::capability
204  *
205  * The first three flags control whether dirty pages will contribute to the
206  * VM's accounting and whether writepages() should be called for dirty pages
207  * (something that would not, for example, be appropriate for ramfs)
208  *
209  * WARNING: these flags are closely related and should not normally be
210  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
211  * three flags into a single convenience macro.
212  *
213  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
214  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
215  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
216  *
217  * These flags let !MMU mmap() govern direct device mapping vs immediate
218  * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
219  *
220  * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
221  * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
222  * BDI_CAP_READ_MAP:       Can be mapped for reading
223  * BDI_CAP_WRITE_MAP:      Can be mapped for writing
224  * BDI_CAP_EXEC_MAP:       Can be mapped for execution
225  *
226  * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.
227  */
228 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
229 #define BDI_CAP_NO_WRITEBACK	0x00000002
230 #define BDI_CAP_MAP_COPY	0x00000004
231 #define BDI_CAP_MAP_DIRECT	0x00000008
232 #define BDI_CAP_READ_MAP	0x00000010
233 #define BDI_CAP_WRITE_MAP	0x00000020
234 #define BDI_CAP_EXEC_MAP	0x00000040
235 #define BDI_CAP_NO_ACCT_WB	0x00000080
236 #define BDI_CAP_SWAP_BACKED	0x00000100
237 
238 #define BDI_CAP_VMFLAGS \
239 	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
240 
241 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
242 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
243 
244 #if defined(VM_MAYREAD) && \
245 	(BDI_CAP_READ_MAP != VM_MAYREAD || \
246 	 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
247 	 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
248 #error please change backing_dev_info::capabilities flags
249 #endif
250 
251 extern struct backing_dev_info default_backing_dev_info;
252 extern struct backing_dev_info noop_backing_dev_info;
253 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
254 
255 int writeback_in_progress(struct backing_dev_info *bdi);
256 
257 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
258 {
259 	if (bdi->congested_fn)
260 		return bdi->congested_fn(bdi->congested_data, bdi_bits);
261 	return (bdi->state & bdi_bits);
262 }
263 
264 static inline int bdi_read_congested(struct backing_dev_info *bdi)
265 {
266 	return bdi_congested(bdi, 1 << BDI_sync_congested);
267 }
268 
269 static inline int bdi_write_congested(struct backing_dev_info *bdi)
270 {
271 	return bdi_congested(bdi, 1 << BDI_async_congested);
272 }
273 
274 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
275 {
276 	return bdi_congested(bdi, (1 << BDI_sync_congested) |
277 				  (1 << BDI_async_congested));
278 }
279 
280 enum {
281 	BLK_RW_ASYNC	= 0,
282 	BLK_RW_SYNC	= 1,
283 };
284 
285 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
286 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
287 long congestion_wait(int sync, long timeout);
288 
289 
290 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
291 {
292 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
293 }
294 
295 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
296 {
297 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
298 }
299 
300 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
301 {
302 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
303 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
304 				      BDI_CAP_NO_WRITEBACK));
305 }
306 
307 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
308 {
309 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
310 }
311 
312 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
313 {
314 	return bdi == &default_backing_dev_info;
315 }
316 
317 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
318 {
319 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
320 }
321 
322 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
323 {
324 	return bdi_cap_account_dirty(mapping->backing_dev_info);
325 }
326 
327 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
328 {
329 	return bdi_cap_swap_backed(mapping->backing_dev_info);
330 }
331 
332 static inline int bdi_sched_wait(void *word)
333 {
334 	schedule();
335 	return 0;
336 }
337 
338 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
339 				       struct page *page)
340 {
341 	if (bdi && bdi->unplug_io_fn)
342 		bdi->unplug_io_fn(bdi, page);
343 }
344 
345 static inline void blk_run_address_space(struct address_space *mapping)
346 {
347 	if (mapping)
348 		blk_run_backing_dev(mapping->backing_dev_info, NULL);
349 }
350 
351 #endif		/* _LINUX_BACKING_DEV_H */
352