xref: /linux-6.15/include/linux/backing-dev.h (revision d3f77dfd)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * include/linux/backing-dev.h
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * low-level device information and state which is propagated up through
61da177e4SLinus Torvalds  * to high-level code.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifndef _LINUX_BACKING_DEV_H
101da177e4SLinus Torvalds #define _LINUX_BACKING_DEV_H
111da177e4SLinus Torvalds 
12cf0ca9feSPeter Zijlstra #include <linux/kernel.h>
13e4ad08feSMiklos Szeredi #include <linux/fs.h>
1403ba3782SJens Axboe #include <linux/sched.h>
15a212b105STejun Heo #include <linux/blkdev.h>
1603ba3782SJens Axboe #include <linux/writeback.h>
1752ebea74STejun Heo #include <linux/blk-cgroup.h>
1866114cadSTejun Heo #include <linux/backing-dev-defs.h>
19a13f35e8STejun Heo #include <linux/slab.h>
20de1414a6SChristoph Hellwig 
21d03f6cdcSJan Kara static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22d03f6cdcSJan Kara {
23d03f6cdcSJan Kara 	kref_get(&bdi->refcnt);
24d03f6cdcSJan Kara 	return bdi;
25d03f6cdcSJan Kara }
26d03f6cdcSJan Kara 
27d03f6cdcSJan Kara void bdi_put(struct backing_dev_info *bdi);
28b2e8fb6eSPeter Zijlstra 
297c4cc300SJan Kara __printf(2, 3)
307c4cc300SJan Kara int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31a93f00b3SMathieu Malaterre __printf(2, 0)
327c4cc300SJan Kara int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
337c4cc300SJan Kara 		    va_list args);
34df08c32cSDan Williams int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
35b02176f3STejun Heo void bdi_unregister(struct backing_dev_info *bdi);
36b02176f3STejun Heo 
37d03f6cdcSJan Kara struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
38baf7a616SJan Kara static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
39baf7a616SJan Kara {
40baf7a616SJan Kara 	return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
41baf7a616SJan Kara }
42b02176f3STejun Heo 
439ecf4866STejun Heo void wb_start_background_writeback(struct bdi_writeback *wb);
44f0054bb1STejun Heo void wb_workfn(struct work_struct *work);
45f0054bb1STejun Heo void wb_wakeup_delayed(struct bdi_writeback *wb);
46cf0ca9feSPeter Zijlstra 
4703ba3782SJens Axboe extern spinlock_t bdi_lock;
4866f3b8e2SJens Axboe extern struct list_head bdi_list;
4966f3b8e2SJens Axboe 
50839a8e86STejun Heo extern struct workqueue_struct *bdi_wq;
51*d3f77dfdSTejun Heo extern struct workqueue_struct *bdi_async_bio_wq;
52839a8e86STejun Heo 
53d6c10f1fSTejun Heo static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
5403ba3782SJens Axboe {
55d6c10f1fSTejun Heo 	return test_bit(WB_has_dirty_io, &wb->state);
5603ba3782SJens Axboe }
5703ba3782SJens Axboe 
5895a46c65STejun Heo static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59e0bf68ddSPeter Zijlstra {
6095a46c65STejun Heo 	/*
6195a46c65STejun Heo 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
6295a46c65STejun Heo 	 * any dirty wbs.  See wb_update_write_bandwidth().
6395a46c65STejun Heo 	 */
6495a46c65STejun Heo 	return atomic_long_read(&bdi->tot_write_bandwidth);
65e0bf68ddSPeter Zijlstra }
66e0bf68ddSPeter Zijlstra 
6793f78d88STejun Heo static inline void __add_wb_stat(struct bdi_writeback *wb,
6893f78d88STejun Heo 				 enum wb_stat_item item, s64 amount)
69e0bf68ddSPeter Zijlstra {
70104b4e51SNikolay Borisov 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71b2e8fb6eSPeter Zijlstra }
72b2e8fb6eSPeter Zijlstra 
733e8f399dSNikolay Borisov static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74e0bf68ddSPeter Zijlstra {
7593f78d88STejun Heo 	__add_wb_stat(wb, item, 1);
76b2e8fb6eSPeter Zijlstra }
77b2e8fb6eSPeter Zijlstra 
7893f78d88STejun Heo static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79b2e8fb6eSPeter Zijlstra {
803e8f399dSNikolay Borisov 	__add_wb_stat(wb, item, -1);
81b2e8fb6eSPeter Zijlstra }
82b2e8fb6eSPeter Zijlstra 
8393f78d88STejun Heo static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84b2e8fb6eSPeter Zijlstra {
8593f78d88STejun Heo 	return percpu_counter_read_positive(&wb->stat[item]);
86b2e8fb6eSPeter Zijlstra }
87b2e8fb6eSPeter Zijlstra 
8893f78d88STejun Heo static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89b2e8fb6eSPeter Zijlstra {
90e3d3910aSNikolay Borisov 	return percpu_counter_sum_positive(&wb->stat[item]);
91b2e8fb6eSPeter Zijlstra }
92b2e8fb6eSPeter Zijlstra 
9393f78d88STejun Heo extern void wb_writeout_inc(struct bdi_writeback *wb);
94dd5656e5SMiklos Szeredi 
95b2e8fb6eSPeter Zijlstra /*
96b2e8fb6eSPeter Zijlstra  * maximal error of a stat counter.
97b2e8fb6eSPeter Zijlstra  */
982bce774eSWang Long static inline unsigned long wb_stat_error(void)
99b2e8fb6eSPeter Zijlstra {
100b2e8fb6eSPeter Zijlstra #ifdef CONFIG_SMP
10193f78d88STejun Heo 	return nr_cpu_ids * WB_STAT_BATCH;
102b2e8fb6eSPeter Zijlstra #else
103b2e8fb6eSPeter Zijlstra 	return 1;
104b2e8fb6eSPeter Zijlstra #endif
105e0bf68ddSPeter Zijlstra }
1061da177e4SLinus Torvalds 
107189d3c4aSPeter Zijlstra int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108a42dde04SPeter Zijlstra int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109189d3c4aSPeter Zijlstra 
1101da177e4SLinus Torvalds /*
1111da177e4SLinus Torvalds  * Flags in backing_dev_info::capability
112e4ad08feSMiklos Szeredi  *
113e4ad08feSMiklos Szeredi  * The first three flags control whether dirty pages will contribute to the
1141da177e4SLinus Torvalds  * VM's accounting and whether writepages() should be called for dirty pages
1151da177e4SLinus Torvalds  * (something that would not, for example, be appropriate for ramfs)
116e4ad08feSMiklos Szeredi  *
117e4ad08feSMiklos Szeredi  * WARNING: these flags are closely related and should not normally be
118e4ad08feSMiklos Szeredi  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119e4ad08feSMiklos Szeredi  * three flags into a single convenience macro.
120e4ad08feSMiklos Szeredi  *
121e4ad08feSMiklos Szeredi  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
122e4ad08feSMiklos Szeredi  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
123e4ad08feSMiklos Szeredi  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
1245a537485SMaxim Patlasov  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
12589e9b9e0STejun Heo  *
12689e9b9e0STejun Heo  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
12723c47d2aSMinchan Kim  * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
12823c47d2aSMinchan Kim  *			   inefficient.
1291da177e4SLinus Torvalds  */
130e4ad08feSMiklos Szeredi #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
131e4ad08feSMiklos Szeredi #define BDI_CAP_NO_WRITEBACK	0x00000002
132b4caecd4SChristoph Hellwig #define BDI_CAP_NO_ACCT_WB	0x00000004
133b4caecd4SChristoph Hellwig #define BDI_CAP_STABLE_WRITES	0x00000008
134b4caecd4SChristoph Hellwig #define BDI_CAP_STRICTLIMIT	0x00000010
13589e9b9e0STejun Heo #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
13623c47d2aSMinchan Kim #define BDI_CAP_SYNCHRONOUS_IO	0x00000040
1371da177e4SLinus Torvalds 
138e4ad08feSMiklos Szeredi #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
139e4ad08feSMiklos Szeredi 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
140e4ad08feSMiklos Szeredi 
1415129a469SJörn Engel extern struct backing_dev_info noop_backing_dev_info;
1421da177e4SLinus Torvalds 
143bc05873dSTejun Heo /**
144bc05873dSTejun Heo  * writeback_in_progress - determine whether there is writeback in progress
145bc05873dSTejun Heo  * @wb: bdi_writeback of interest
146bc05873dSTejun Heo  *
147bc05873dSTejun Heo  * Determine whether there is writeback waiting to be handled against a
148bc05873dSTejun Heo  * bdi_writeback.
149bc05873dSTejun Heo  */
150bc05873dSTejun Heo static inline bool writeback_in_progress(struct bdi_writeback *wb)
1511da177e4SLinus Torvalds {
152bc05873dSTejun Heo 	return test_bit(WB_writeback_running, &wb->state);
153bc05873dSTejun Heo }
1541da177e4SLinus Torvalds 
155a212b105STejun Heo static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
156a212b105STejun Heo {
157a212b105STejun Heo 	struct super_block *sb;
158a212b105STejun Heo 
159a212b105STejun Heo 	if (!inode)
160a212b105STejun Heo 		return &noop_backing_dev_info;
161a212b105STejun Heo 
162a212b105STejun Heo 	sb = inode->i_sb;
163a212b105STejun Heo #ifdef CONFIG_BLOCK
164a212b105STejun Heo 	if (sb_is_blkdev_sb(sb))
165efa7c9f9SJan Kara 		return I_BDEV(inode)->bd_bdi;
166a212b105STejun Heo #endif
167a212b105STejun Heo 	return sb->s_bdi;
168a212b105STejun Heo }
169a212b105STejun Heo 
170ec8a6f26STejun Heo static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1711da177e4SLinus Torvalds {
172ec8a6f26STejun Heo 	struct backing_dev_info *bdi = wb->bdi;
173ec8a6f26STejun Heo 
1741da177e4SLinus Torvalds 	if (bdi->congested_fn)
175ec8a6f26STejun Heo 		return bdi->congested_fn(bdi->congested_data, cong_bits);
176ec8a6f26STejun Heo 	return wb->congested->state & cong_bits;
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1798aa7e847SJens Axboe long congestion_wait(int sync, long timeout);
180e3c1ac58SAndrey Ryabinin long wait_iff_congested(int sync, long timeout);
1811da177e4SLinus Torvalds 
18223c47d2aSMinchan Kim static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
18323c47d2aSMinchan Kim {
18423c47d2aSMinchan Kim 	return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
18523c47d2aSMinchan Kim }
18623c47d2aSMinchan Kim 
1877d311cdaSDarrick J. Wong static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
1887d311cdaSDarrick J. Wong {
1897d311cdaSDarrick J. Wong 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
1907d311cdaSDarrick J. Wong }
1917d311cdaSDarrick J. Wong 
192e4ad08feSMiklos Szeredi static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
193e4ad08feSMiklos Szeredi {
194e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
195e4ad08feSMiklos Szeredi }
1961da177e4SLinus Torvalds 
197e4ad08feSMiklos Szeredi static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
198e4ad08feSMiklos Szeredi {
199e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
200e4ad08feSMiklos Szeredi }
2011da177e4SLinus Torvalds 
202e4ad08feSMiklos Szeredi static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
203e4ad08feSMiklos Szeredi {
204e4ad08feSMiklos Szeredi 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
205e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
206e4ad08feSMiklos Szeredi 				      BDI_CAP_NO_WRITEBACK));
207e4ad08feSMiklos Szeredi }
2081da177e4SLinus Torvalds 
209e4ad08feSMiklos Szeredi static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
210e4ad08feSMiklos Szeredi {
211de1414a6SChristoph Hellwig 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
212e4ad08feSMiklos Szeredi }
213e4ad08feSMiklos Szeredi 
214e4ad08feSMiklos Szeredi static inline bool mapping_cap_account_dirty(struct address_space *mapping)
215e4ad08feSMiklos Szeredi {
216de1414a6SChristoph Hellwig 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
217e4ad08feSMiklos Szeredi }
2181da177e4SLinus Torvalds 
21903ba3782SJens Axboe static inline int bdi_sched_wait(void *word)
22003ba3782SJens Axboe {
22103ba3782SJens Axboe 	schedule();
22203ba3782SJens Axboe 	return 0;
22303ba3782SJens Axboe }
22403ba3782SJens Axboe 
22589e9b9e0STejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
22689e9b9e0STejun Heo 
22752ebea74STejun Heo struct bdi_writeback_congested *
22852ebea74STejun Heo wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
22952ebea74STejun Heo void wb_congested_put(struct bdi_writeback_congested *congested);
23052ebea74STejun Heo struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
23152ebea74STejun Heo 				    struct cgroup_subsys_state *memcg_css,
23252ebea74STejun Heo 				    gfp_t gfp);
23352ebea74STejun Heo void wb_memcg_offline(struct mem_cgroup *memcg);
23452ebea74STejun Heo void wb_blkcg_offline(struct blkcg *blkcg);
235703c2708STejun Heo int inode_congested(struct inode *inode, int cong_bits);
23652ebea74STejun Heo 
23789e9b9e0STejun Heo /**
23889e9b9e0STejun Heo  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
23989e9b9e0STejun Heo  * @inode: inode of interest
24089e9b9e0STejun Heo  *
24189e9b9e0STejun Heo  * cgroup writeback requires support from both the bdi and filesystem.
2429badce00STejun Heo  * Also, both memcg and iocg have to be on the default hierarchy.  Test
2439badce00STejun Heo  * whether all conditions are met.
2449badce00STejun Heo  *
2459badce00STejun Heo  * Note that the test result may change dynamically on the same inode
2469badce00STejun Heo  * depending on how memcg and iocg are configured.
24789e9b9e0STejun Heo  */
24889e9b9e0STejun Heo static inline bool inode_cgwb_enabled(struct inode *inode)
24989e9b9e0STejun Heo {
25089e9b9e0STejun Heo 	struct backing_dev_info *bdi = inode_to_bdi(inode);
25189e9b9e0STejun Heo 
252c0522908STejun Heo 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
253c0522908STejun Heo 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
2549badce00STejun Heo 		bdi_cap_account_dirty(bdi) &&
25589e9b9e0STejun Heo 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
25646b15caaSTejun Heo 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
25789e9b9e0STejun Heo }
25889e9b9e0STejun Heo 
25952ebea74STejun Heo /**
26052ebea74STejun Heo  * wb_find_current - find wb for %current on a bdi
26152ebea74STejun Heo  * @bdi: bdi of interest
26252ebea74STejun Heo  *
26352ebea74STejun Heo  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
26452ebea74STejun Heo  * Must be called under rcu_read_lock() which protects the returend wb.
26552ebea74STejun Heo  * NULL if not found.
26652ebea74STejun Heo  */
26752ebea74STejun Heo static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
26852ebea74STejun Heo {
26952ebea74STejun Heo 	struct cgroup_subsys_state *memcg_css;
27052ebea74STejun Heo 	struct bdi_writeback *wb;
27152ebea74STejun Heo 
27252ebea74STejun Heo 	memcg_css = task_css(current, memory_cgrp_id);
27352ebea74STejun Heo 	if (!memcg_css->parent)
27452ebea74STejun Heo 		return &bdi->wb;
27552ebea74STejun Heo 
27652ebea74STejun Heo 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
27752ebea74STejun Heo 
27852ebea74STejun Heo 	/*
27952ebea74STejun Heo 	 * %current's blkcg equals the effective blkcg of its memcg.  No
28052ebea74STejun Heo 	 * need to use the relatively expensive cgroup_get_e_css().
28152ebea74STejun Heo 	 */
282c165b3e3STejun Heo 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
28352ebea74STejun Heo 		return wb;
28452ebea74STejun Heo 	return NULL;
28552ebea74STejun Heo }
28652ebea74STejun Heo 
28752ebea74STejun Heo /**
28852ebea74STejun Heo  * wb_get_create_current - get or create wb for %current on a bdi
28952ebea74STejun Heo  * @bdi: bdi of interest
29052ebea74STejun Heo  * @gfp: allocation mask
29152ebea74STejun Heo  *
29252ebea74STejun Heo  * Equivalent to wb_get_create() on %current's memcg.  This function is
29352ebea74STejun Heo  * called from a relatively hot path and optimizes the common cases using
29452ebea74STejun Heo  * wb_find_current().
29552ebea74STejun Heo  */
29652ebea74STejun Heo static inline struct bdi_writeback *
29752ebea74STejun Heo wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
29852ebea74STejun Heo {
29952ebea74STejun Heo 	struct bdi_writeback *wb;
30052ebea74STejun Heo 
30152ebea74STejun Heo 	rcu_read_lock();
30252ebea74STejun Heo 	wb = wb_find_current(bdi);
30352ebea74STejun Heo 	if (wb && unlikely(!wb_tryget(wb)))
30452ebea74STejun Heo 		wb = NULL;
30552ebea74STejun Heo 	rcu_read_unlock();
30652ebea74STejun Heo 
30752ebea74STejun Heo 	if (unlikely(!wb)) {
30852ebea74STejun Heo 		struct cgroup_subsys_state *memcg_css;
30952ebea74STejun Heo 
31052ebea74STejun Heo 		memcg_css = task_get_css(current, memory_cgrp_id);
31152ebea74STejun Heo 		wb = wb_get_create(bdi, memcg_css, gfp);
31252ebea74STejun Heo 		css_put(memcg_css);
31352ebea74STejun Heo 	}
31452ebea74STejun Heo 	return wb;
31552ebea74STejun Heo }
31652ebea74STejun Heo 
31752ebea74STejun Heo /**
318aaa2cacfSTejun Heo  * inode_to_wb_is_valid - test whether an inode has a wb associated
319aaa2cacfSTejun Heo  * @inode: inode of interest
320aaa2cacfSTejun Heo  *
321aaa2cacfSTejun Heo  * Returns %true if @inode has a wb associated.  May be called without any
322aaa2cacfSTejun Heo  * locking.
323aaa2cacfSTejun Heo  */
324aaa2cacfSTejun Heo static inline bool inode_to_wb_is_valid(struct inode *inode)
325aaa2cacfSTejun Heo {
326aaa2cacfSTejun Heo 	return inode->i_wb;
327aaa2cacfSTejun Heo }
328aaa2cacfSTejun Heo 
329aaa2cacfSTejun Heo /**
33052ebea74STejun Heo  * inode_to_wb - determine the wb of an inode
33152ebea74STejun Heo  * @inode: inode of interest
33252ebea74STejun Heo  *
333aaa2cacfSTejun Heo  * Returns the wb @inode is currently associated with.  The caller must be
334b93b0163SMatthew Wilcox  * holding either @inode->i_lock, the i_pages lock, or the
335aaa2cacfSTejun Heo  * associated wb's list_lock.
33652ebea74STejun Heo  */
33705b93801SMatthew Wilcox static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
33852ebea74STejun Heo {
339aaa2cacfSTejun Heo #ifdef CONFIG_LOCKDEP
340aaa2cacfSTejun Heo 	WARN_ON_ONCE(debug_locks &&
341aaa2cacfSTejun Heo 		     (!lockdep_is_held(&inode->i_lock) &&
342b93b0163SMatthew Wilcox 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
343aaa2cacfSTejun Heo 		      !lockdep_is_held(&inode->i_wb->list_lock)));
344aaa2cacfSTejun Heo #endif
34552ebea74STejun Heo 	return inode->i_wb;
34652ebea74STejun Heo }
34752ebea74STejun Heo 
348682aa8e1STejun Heo /**
349682aa8e1STejun Heo  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
350682aa8e1STejun Heo  * @inode: target inode
3512e898e4cSGreg Thelen  * @cookie: output param, to be passed to the end function
352682aa8e1STejun Heo  *
353682aa8e1STejun Heo  * The caller wants to access the wb associated with @inode but isn't
354b93b0163SMatthew Wilcox  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
355682aa8e1STejun Heo  * function determines the wb associated with @inode and ensures that the
356682aa8e1STejun Heo  * association doesn't change until the transaction is finished with
357682aa8e1STejun Heo  * unlocked_inode_to_wb_end().
358682aa8e1STejun Heo  *
3592e898e4cSGreg Thelen  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
3602e898e4cSGreg Thelen  * can't sleep during the transaction.  IRQs may or may not be disabled on
3612e898e4cSGreg Thelen  * return.
362682aa8e1STejun Heo  */
363682aa8e1STejun Heo static inline struct bdi_writeback *
3642e898e4cSGreg Thelen unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
365682aa8e1STejun Heo {
366682aa8e1STejun Heo 	rcu_read_lock();
367682aa8e1STejun Heo 
368682aa8e1STejun Heo 	/*
369a9519defSGreg Thelen 	 * Paired with store_release in inode_switch_wbs_work_fn() and
370682aa8e1STejun Heo 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
371682aa8e1STejun Heo 	 */
3722e898e4cSGreg Thelen 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
373682aa8e1STejun Heo 
3742e898e4cSGreg Thelen 	if (unlikely(cookie->locked))
3752e898e4cSGreg Thelen 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
376aaa2cacfSTejun Heo 
377aaa2cacfSTejun Heo 	/*
378b93b0163SMatthew Wilcox 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
379b93b0163SMatthew Wilcox 	 * lock.  inode_to_wb() will bark.  Deref directly.
380aaa2cacfSTejun Heo 	 */
381aaa2cacfSTejun Heo 	return inode->i_wb;
382682aa8e1STejun Heo }
383682aa8e1STejun Heo 
384682aa8e1STejun Heo /**
385682aa8e1STejun Heo  * unlocked_inode_to_wb_end - end inode wb access transaction
386682aa8e1STejun Heo  * @inode: target inode
3872e898e4cSGreg Thelen  * @cookie: @cookie from unlocked_inode_to_wb_begin()
388682aa8e1STejun Heo  */
3892e898e4cSGreg Thelen static inline void unlocked_inode_to_wb_end(struct inode *inode,
3902e898e4cSGreg Thelen 					    struct wb_lock_cookie *cookie)
391682aa8e1STejun Heo {
3922e898e4cSGreg Thelen 	if (unlikely(cookie->locked))
3932e898e4cSGreg Thelen 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
394682aa8e1STejun Heo 
395682aa8e1STejun Heo 	rcu_read_unlock();
396682aa8e1STejun Heo }
397682aa8e1STejun Heo 
39889e9b9e0STejun Heo #else	/* CONFIG_CGROUP_WRITEBACK */
39989e9b9e0STejun Heo 
40089e9b9e0STejun Heo static inline bool inode_cgwb_enabled(struct inode *inode)
40189e9b9e0STejun Heo {
40289e9b9e0STejun Heo 	return false;
40389e9b9e0STejun Heo }
40489e9b9e0STejun Heo 
40552ebea74STejun Heo static inline struct bdi_writeback_congested *
40652ebea74STejun Heo wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
40752ebea74STejun Heo {
408e58dd0deSSebastian Andrzej Siewior 	refcount_inc(&bdi->wb_congested->refcnt);
409a13f35e8STejun Heo 	return bdi->wb_congested;
41052ebea74STejun Heo }
41152ebea74STejun Heo 
41252ebea74STejun Heo static inline void wb_congested_put(struct bdi_writeback_congested *congested)
41352ebea74STejun Heo {
414e58dd0deSSebastian Andrzej Siewior 	if (refcount_dec_and_test(&congested->refcnt))
415a13f35e8STejun Heo 		kfree(congested);
41652ebea74STejun Heo }
41752ebea74STejun Heo 
41852ebea74STejun Heo static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
41952ebea74STejun Heo {
42052ebea74STejun Heo 	return &bdi->wb;
42152ebea74STejun Heo }
42252ebea74STejun Heo 
42352ebea74STejun Heo static inline struct bdi_writeback *
42452ebea74STejun Heo wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
42552ebea74STejun Heo {
42652ebea74STejun Heo 	return &bdi->wb;
42752ebea74STejun Heo }
42852ebea74STejun Heo 
429aaa2cacfSTejun Heo static inline bool inode_to_wb_is_valid(struct inode *inode)
430aaa2cacfSTejun Heo {
431aaa2cacfSTejun Heo 	return true;
432aaa2cacfSTejun Heo }
433aaa2cacfSTejun Heo 
43452ebea74STejun Heo static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
43552ebea74STejun Heo {
43652ebea74STejun Heo 	return &inode_to_bdi(inode)->wb;
43752ebea74STejun Heo }
43852ebea74STejun Heo 
439682aa8e1STejun Heo static inline struct bdi_writeback *
4402e898e4cSGreg Thelen unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
441682aa8e1STejun Heo {
442682aa8e1STejun Heo 	return inode_to_wb(inode);
443682aa8e1STejun Heo }
444682aa8e1STejun Heo 
4452e898e4cSGreg Thelen static inline void unlocked_inode_to_wb_end(struct inode *inode,
4462e898e4cSGreg Thelen 					    struct wb_lock_cookie *cookie)
447682aa8e1STejun Heo {
448682aa8e1STejun Heo }
449682aa8e1STejun Heo 
45052ebea74STejun Heo static inline void wb_memcg_offline(struct mem_cgroup *memcg)
45152ebea74STejun Heo {
45252ebea74STejun Heo }
45352ebea74STejun Heo 
45452ebea74STejun Heo static inline void wb_blkcg_offline(struct blkcg *blkcg)
45552ebea74STejun Heo {
45652ebea74STejun Heo }
45752ebea74STejun Heo 
458703c2708STejun Heo static inline int inode_congested(struct inode *inode, int cong_bits)
459703c2708STejun Heo {
460703c2708STejun Heo 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
461703c2708STejun Heo }
462703c2708STejun Heo 
46389e9b9e0STejun Heo #endif	/* CONFIG_CGROUP_WRITEBACK */
46489e9b9e0STejun Heo 
465703c2708STejun Heo static inline int inode_read_congested(struct inode *inode)
466703c2708STejun Heo {
467703c2708STejun Heo 	return inode_congested(inode, 1 << WB_sync_congested);
468703c2708STejun Heo }
469703c2708STejun Heo 
470703c2708STejun Heo static inline int inode_write_congested(struct inode *inode)
471703c2708STejun Heo {
472703c2708STejun Heo 	return inode_congested(inode, 1 << WB_async_congested);
473703c2708STejun Heo }
474703c2708STejun Heo 
475703c2708STejun Heo static inline int inode_rw_congested(struct inode *inode)
476703c2708STejun Heo {
477703c2708STejun Heo 	return inode_congested(inode, (1 << WB_sync_congested) |
478703c2708STejun Heo 				      (1 << WB_async_congested));
479703c2708STejun Heo }
480703c2708STejun Heo 
481ec8a6f26STejun Heo static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
482ec8a6f26STejun Heo {
483ec8a6f26STejun Heo 	return wb_congested(&bdi->wb, cong_bits);
484ec8a6f26STejun Heo }
485ec8a6f26STejun Heo 
486ec8a6f26STejun Heo static inline int bdi_read_congested(struct backing_dev_info *bdi)
487ec8a6f26STejun Heo {
488ec8a6f26STejun Heo 	return bdi_congested(bdi, 1 << WB_sync_congested);
489ec8a6f26STejun Heo }
490ec8a6f26STejun Heo 
491ec8a6f26STejun Heo static inline int bdi_write_congested(struct backing_dev_info *bdi)
492ec8a6f26STejun Heo {
493ec8a6f26STejun Heo 	return bdi_congested(bdi, 1 << WB_async_congested);
494ec8a6f26STejun Heo }
495ec8a6f26STejun Heo 
496ec8a6f26STejun Heo static inline int bdi_rw_congested(struct backing_dev_info *bdi)
497ec8a6f26STejun Heo {
498ec8a6f26STejun Heo 	return bdi_congested(bdi, (1 << WB_sync_congested) |
499ec8a6f26STejun Heo 				  (1 << WB_async_congested));
500ec8a6f26STejun Heo }
501ec8a6f26STejun Heo 
5021da177e4SLinus Torvalds #endif	/* _LINUX_BACKING_DEV_H */
503