xref: /linux-6.15/include/linux/backing-dev.h (revision e5a52fd2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/backing-dev.h
4  *
5  * low-level device information and state which is propagated up through
6  * to high-level code.
7  */
8 
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11 
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/device.h>
17 #include <linux/writeback.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/backing-dev-defs.h>
20 #include <linux/slab.h>
21 
22 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23 {
24 	kref_get(&bdi->refcnt);
25 	return bdi;
26 }
27 
28 struct backing_dev_info *bdi_get_by_id(u64 id);
29 void bdi_put(struct backing_dev_info *bdi);
30 
31 __printf(2, 3)
32 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
33 __printf(2, 0)
34 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 		    va_list args);
36 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
37 void bdi_unregister(struct backing_dev_info *bdi);
38 
39 struct backing_dev_info *bdi_alloc(int node_id);
40 
41 void wb_start_background_writeback(struct bdi_writeback *wb);
42 void wb_workfn(struct work_struct *work);
43 void wb_wakeup_delayed(struct bdi_writeback *wb);
44 
45 void wb_wait_for_completion(struct wb_completion *done);
46 
47 extern spinlock_t bdi_lock;
48 extern struct list_head bdi_list;
49 
50 extern struct workqueue_struct *bdi_wq;
51 extern struct workqueue_struct *bdi_async_bio_wq;
52 
53 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
54 {
55 	return test_bit(WB_has_dirty_io, &wb->state);
56 }
57 
58 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59 {
60 	/*
61 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 	 * any dirty wbs.  See wb_update_write_bandwidth().
63 	 */
64 	return atomic_long_read(&bdi->tot_write_bandwidth);
65 }
66 
67 static inline void __add_wb_stat(struct bdi_writeback *wb,
68 				 enum wb_stat_item item, s64 amount)
69 {
70 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71 }
72 
73 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74 {
75 	__add_wb_stat(wb, item, 1);
76 }
77 
78 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79 {
80 	__add_wb_stat(wb, item, -1);
81 }
82 
83 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84 {
85 	return percpu_counter_read_positive(&wb->stat[item]);
86 }
87 
88 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89 {
90 	return percpu_counter_sum_positive(&wb->stat[item]);
91 }
92 
93 extern void wb_writeout_inc(struct bdi_writeback *wb);
94 
95 /*
96  * maximal error of a stat counter.
97  */
98 static inline unsigned long wb_stat_error(void)
99 {
100 #ifdef CONFIG_SMP
101 	return nr_cpu_ids * WB_STAT_BATCH;
102 #else
103 	return 1;
104 #endif
105 }
106 
107 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109 
110 /*
111  * Flags in backing_dev_info::capability
112  *
113  * The first three flags control whether dirty pages will contribute to the
114  * VM's accounting and whether writepages() should be called for dirty pages
115  * (something that would not, for example, be appropriate for ramfs)
116  *
117  * WARNING: these flags are closely related and should not normally be
118  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119  * three flags into a single convenience macro.
120  *
121  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
122  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
123  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
124  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
125  *
126  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
127  * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
128  *			   inefficient.
129  */
130 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
131 #define BDI_CAP_NO_WRITEBACK	0x00000002
132 #define BDI_CAP_NO_ACCT_WB	0x00000004
133 #define BDI_CAP_STABLE_WRITES	0x00000008
134 #define BDI_CAP_STRICTLIMIT	0x00000010
135 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
136 #define BDI_CAP_SYNCHRONOUS_IO	0x00000040
137 
138 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
139 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
140 
141 extern struct backing_dev_info noop_backing_dev_info;
142 
143 /**
144  * writeback_in_progress - determine whether there is writeback in progress
145  * @wb: bdi_writeback of interest
146  *
147  * Determine whether there is writeback waiting to be handled against a
148  * bdi_writeback.
149  */
150 static inline bool writeback_in_progress(struct bdi_writeback *wb)
151 {
152 	return test_bit(WB_writeback_running, &wb->state);
153 }
154 
155 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
156 {
157 	struct super_block *sb;
158 
159 	if (!inode)
160 		return &noop_backing_dev_info;
161 
162 	sb = inode->i_sb;
163 #ifdef CONFIG_BLOCK
164 	if (sb_is_blkdev_sb(sb))
165 		return I_BDEV(inode)->bd_bdi;
166 #endif
167 	return sb->s_bdi;
168 }
169 
170 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
171 {
172 	struct backing_dev_info *bdi = wb->bdi;
173 
174 	if (bdi->congested_fn)
175 		return bdi->congested_fn(bdi->congested_data, cong_bits);
176 	return wb->congested->state & cong_bits;
177 }
178 
179 long congestion_wait(int sync, long timeout);
180 long wait_iff_congested(int sync, long timeout);
181 
182 static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
183 {
184 	return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
185 }
186 
187 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
188 {
189 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
190 }
191 
192 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
193 {
194 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
195 }
196 
197 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
198 {
199 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
200 }
201 
202 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
203 {
204 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
205 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
206 				      BDI_CAP_NO_WRITEBACK));
207 }
208 
209 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
210 {
211 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
212 }
213 
214 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
215 {
216 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
217 }
218 
219 static inline int bdi_sched_wait(void *word)
220 {
221 	schedule();
222 	return 0;
223 }
224 
225 #ifdef CONFIG_CGROUP_WRITEBACK
226 
227 struct bdi_writeback_congested *
228 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
229 void wb_congested_put(struct bdi_writeback_congested *congested);
230 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
231 				    struct cgroup_subsys_state *memcg_css);
232 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
233 				    struct cgroup_subsys_state *memcg_css,
234 				    gfp_t gfp);
235 void wb_memcg_offline(struct mem_cgroup *memcg);
236 void wb_blkcg_offline(struct blkcg *blkcg);
237 int inode_congested(struct inode *inode, int cong_bits);
238 
239 /**
240  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
241  * @inode: inode of interest
242  *
243  * cgroup writeback requires support from both the bdi and filesystem.
244  * Also, both memcg and iocg have to be on the default hierarchy.  Test
245  * whether all conditions are met.
246  *
247  * Note that the test result may change dynamically on the same inode
248  * depending on how memcg and iocg are configured.
249  */
250 static inline bool inode_cgwb_enabled(struct inode *inode)
251 {
252 	struct backing_dev_info *bdi = inode_to_bdi(inode);
253 
254 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
255 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
256 		bdi_cap_account_dirty(bdi) &&
257 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
258 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
259 }
260 
261 /**
262  * wb_find_current - find wb for %current on a bdi
263  * @bdi: bdi of interest
264  *
265  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
266  * Must be called under rcu_read_lock() which protects the returend wb.
267  * NULL if not found.
268  */
269 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
270 {
271 	struct cgroup_subsys_state *memcg_css;
272 	struct bdi_writeback *wb;
273 
274 	memcg_css = task_css(current, memory_cgrp_id);
275 	if (!memcg_css->parent)
276 		return &bdi->wb;
277 
278 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
279 
280 	/*
281 	 * %current's blkcg equals the effective blkcg of its memcg.  No
282 	 * need to use the relatively expensive cgroup_get_e_css().
283 	 */
284 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
285 		return wb;
286 	return NULL;
287 }
288 
289 /**
290  * wb_get_create_current - get or create wb for %current on a bdi
291  * @bdi: bdi of interest
292  * @gfp: allocation mask
293  *
294  * Equivalent to wb_get_create() on %current's memcg.  This function is
295  * called from a relatively hot path and optimizes the common cases using
296  * wb_find_current().
297  */
298 static inline struct bdi_writeback *
299 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
300 {
301 	struct bdi_writeback *wb;
302 
303 	rcu_read_lock();
304 	wb = wb_find_current(bdi);
305 	if (wb && unlikely(!wb_tryget(wb)))
306 		wb = NULL;
307 	rcu_read_unlock();
308 
309 	if (unlikely(!wb)) {
310 		struct cgroup_subsys_state *memcg_css;
311 
312 		memcg_css = task_get_css(current, memory_cgrp_id);
313 		wb = wb_get_create(bdi, memcg_css, gfp);
314 		css_put(memcg_css);
315 	}
316 	return wb;
317 }
318 
319 /**
320  * inode_to_wb_is_valid - test whether an inode has a wb associated
321  * @inode: inode of interest
322  *
323  * Returns %true if @inode has a wb associated.  May be called without any
324  * locking.
325  */
326 static inline bool inode_to_wb_is_valid(struct inode *inode)
327 {
328 	return inode->i_wb;
329 }
330 
331 /**
332  * inode_to_wb - determine the wb of an inode
333  * @inode: inode of interest
334  *
335  * Returns the wb @inode is currently associated with.  The caller must be
336  * holding either @inode->i_lock, the i_pages lock, or the
337  * associated wb's list_lock.
338  */
339 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
340 {
341 #ifdef CONFIG_LOCKDEP
342 	WARN_ON_ONCE(debug_locks &&
343 		     (!lockdep_is_held(&inode->i_lock) &&
344 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
345 		      !lockdep_is_held(&inode->i_wb->list_lock)));
346 #endif
347 	return inode->i_wb;
348 }
349 
350 /**
351  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
352  * @inode: target inode
353  * @cookie: output param, to be passed to the end function
354  *
355  * The caller wants to access the wb associated with @inode but isn't
356  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
357  * function determines the wb associated with @inode and ensures that the
358  * association doesn't change until the transaction is finished with
359  * unlocked_inode_to_wb_end().
360  *
361  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
362  * can't sleep during the transaction.  IRQs may or may not be disabled on
363  * return.
364  */
365 static inline struct bdi_writeback *
366 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
367 {
368 	rcu_read_lock();
369 
370 	/*
371 	 * Paired with store_release in inode_switch_wbs_work_fn() and
372 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
373 	 */
374 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
375 
376 	if (unlikely(cookie->locked))
377 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
378 
379 	/*
380 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
381 	 * lock.  inode_to_wb() will bark.  Deref directly.
382 	 */
383 	return inode->i_wb;
384 }
385 
386 /**
387  * unlocked_inode_to_wb_end - end inode wb access transaction
388  * @inode: target inode
389  * @cookie: @cookie from unlocked_inode_to_wb_begin()
390  */
391 static inline void unlocked_inode_to_wb_end(struct inode *inode,
392 					    struct wb_lock_cookie *cookie)
393 {
394 	if (unlikely(cookie->locked))
395 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
396 
397 	rcu_read_unlock();
398 }
399 
400 #else	/* CONFIG_CGROUP_WRITEBACK */
401 
402 static inline bool inode_cgwb_enabled(struct inode *inode)
403 {
404 	return false;
405 }
406 
407 static inline struct bdi_writeback_congested *
408 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
409 {
410 	refcount_inc(&bdi->wb_congested->refcnt);
411 	return bdi->wb_congested;
412 }
413 
414 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
415 {
416 	if (refcount_dec_and_test(&congested->refcnt))
417 		kfree(congested);
418 }
419 
420 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
421 {
422 	return &bdi->wb;
423 }
424 
425 static inline struct bdi_writeback *
426 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
427 {
428 	return &bdi->wb;
429 }
430 
431 static inline bool inode_to_wb_is_valid(struct inode *inode)
432 {
433 	return true;
434 }
435 
436 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
437 {
438 	return &inode_to_bdi(inode)->wb;
439 }
440 
441 static inline struct bdi_writeback *
442 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
443 {
444 	return inode_to_wb(inode);
445 }
446 
447 static inline void unlocked_inode_to_wb_end(struct inode *inode,
448 					    struct wb_lock_cookie *cookie)
449 {
450 }
451 
452 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
453 {
454 }
455 
456 static inline void wb_blkcg_offline(struct blkcg *blkcg)
457 {
458 }
459 
460 static inline int inode_congested(struct inode *inode, int cong_bits)
461 {
462 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
463 }
464 
465 #endif	/* CONFIG_CGROUP_WRITEBACK */
466 
467 static inline int inode_read_congested(struct inode *inode)
468 {
469 	return inode_congested(inode, 1 << WB_sync_congested);
470 }
471 
472 static inline int inode_write_congested(struct inode *inode)
473 {
474 	return inode_congested(inode, 1 << WB_async_congested);
475 }
476 
477 static inline int inode_rw_congested(struct inode *inode)
478 {
479 	return inode_congested(inode, (1 << WB_sync_congested) |
480 				      (1 << WB_async_congested));
481 }
482 
483 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
484 {
485 	return wb_congested(&bdi->wb, cong_bits);
486 }
487 
488 static inline int bdi_read_congested(struct backing_dev_info *bdi)
489 {
490 	return bdi_congested(bdi, 1 << WB_sync_congested);
491 }
492 
493 static inline int bdi_write_congested(struct backing_dev_info *bdi)
494 {
495 	return bdi_congested(bdi, 1 << WB_async_congested);
496 }
497 
498 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
499 {
500 	return bdi_congested(bdi, (1 << WB_sync_congested) |
501 				  (1 << WB_async_congested));
502 }
503 
504 const char *bdi_dev_name(struct backing_dev_info *bdi);
505 
506 #endif	/* _LINUX_BACKING_DEV_H */
507