xref: /linux-6.15/include/linux/backing-dev.h (revision e58dd0de)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * include/linux/backing-dev.h
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * low-level device information and state which is propagated up through
61da177e4SLinus Torvalds  * to high-level code.
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #ifndef _LINUX_BACKING_DEV_H
101da177e4SLinus Torvalds #define _LINUX_BACKING_DEV_H
111da177e4SLinus Torvalds 
12cf0ca9feSPeter Zijlstra #include <linux/kernel.h>
13e4ad08feSMiklos Szeredi #include <linux/fs.h>
1403ba3782SJens Axboe #include <linux/sched.h>
15a212b105STejun Heo #include <linux/blkdev.h>
1603ba3782SJens Axboe #include <linux/writeback.h>
1752ebea74STejun Heo #include <linux/blk-cgroup.h>
1866114cadSTejun Heo #include <linux/backing-dev-defs.h>
19a13f35e8STejun Heo #include <linux/slab.h>
20de1414a6SChristoph Hellwig 
21d03f6cdcSJan Kara static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22d03f6cdcSJan Kara {
23d03f6cdcSJan Kara 	kref_get(&bdi->refcnt);
24d03f6cdcSJan Kara 	return bdi;
25d03f6cdcSJan Kara }
26d03f6cdcSJan Kara 
27d03f6cdcSJan Kara void bdi_put(struct backing_dev_info *bdi);
28b2e8fb6eSPeter Zijlstra 
297c4cc300SJan Kara __printf(2, 3)
307c4cc300SJan Kara int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31a93f00b3SMathieu Malaterre __printf(2, 0)
327c4cc300SJan Kara int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
337c4cc300SJan Kara 		    va_list args);
34df08c32cSDan Williams int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
35b02176f3STejun Heo void bdi_unregister(struct backing_dev_info *bdi);
36b02176f3STejun Heo 
37d03f6cdcSJan Kara struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
38baf7a616SJan Kara static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
39baf7a616SJan Kara {
40baf7a616SJan Kara 	return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
41baf7a616SJan Kara }
42b02176f3STejun Heo 
439ecf4866STejun Heo void wb_start_background_writeback(struct bdi_writeback *wb);
44f0054bb1STejun Heo void wb_workfn(struct work_struct *work);
45f0054bb1STejun Heo void wb_wakeup_delayed(struct bdi_writeback *wb);
46cf0ca9feSPeter Zijlstra 
4703ba3782SJens Axboe extern spinlock_t bdi_lock;
4866f3b8e2SJens Axboe extern struct list_head bdi_list;
4966f3b8e2SJens Axboe 
50839a8e86STejun Heo extern struct workqueue_struct *bdi_wq;
51839a8e86STejun Heo 
52d6c10f1fSTejun Heo static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
5303ba3782SJens Axboe {
54d6c10f1fSTejun Heo 	return test_bit(WB_has_dirty_io, &wb->state);
5503ba3782SJens Axboe }
5603ba3782SJens Axboe 
5795a46c65STejun Heo static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
58e0bf68ddSPeter Zijlstra {
5995a46c65STejun Heo 	/*
6095a46c65STejun Heo 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
6195a46c65STejun Heo 	 * any dirty wbs.  See wb_update_write_bandwidth().
6295a46c65STejun Heo 	 */
6395a46c65STejun Heo 	return atomic_long_read(&bdi->tot_write_bandwidth);
64e0bf68ddSPeter Zijlstra }
65e0bf68ddSPeter Zijlstra 
6693f78d88STejun Heo static inline void __add_wb_stat(struct bdi_writeback *wb,
6793f78d88STejun Heo 				 enum wb_stat_item item, s64 amount)
68e0bf68ddSPeter Zijlstra {
69104b4e51SNikolay Borisov 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
70b2e8fb6eSPeter Zijlstra }
71b2e8fb6eSPeter Zijlstra 
723e8f399dSNikolay Borisov static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
73e0bf68ddSPeter Zijlstra {
7493f78d88STejun Heo 	__add_wb_stat(wb, item, 1);
75b2e8fb6eSPeter Zijlstra }
76b2e8fb6eSPeter Zijlstra 
7793f78d88STejun Heo static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
78b2e8fb6eSPeter Zijlstra {
793e8f399dSNikolay Borisov 	__add_wb_stat(wb, item, -1);
80b2e8fb6eSPeter Zijlstra }
81b2e8fb6eSPeter Zijlstra 
8293f78d88STejun Heo static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
83b2e8fb6eSPeter Zijlstra {
8493f78d88STejun Heo 	return percpu_counter_read_positive(&wb->stat[item]);
85b2e8fb6eSPeter Zijlstra }
86b2e8fb6eSPeter Zijlstra 
8793f78d88STejun Heo static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
88b2e8fb6eSPeter Zijlstra {
89e3d3910aSNikolay Borisov 	return percpu_counter_sum_positive(&wb->stat[item]);
90b2e8fb6eSPeter Zijlstra }
91b2e8fb6eSPeter Zijlstra 
9293f78d88STejun Heo extern void wb_writeout_inc(struct bdi_writeback *wb);
93dd5656e5SMiklos Szeredi 
94b2e8fb6eSPeter Zijlstra /*
95b2e8fb6eSPeter Zijlstra  * maximal error of a stat counter.
96b2e8fb6eSPeter Zijlstra  */
972bce774eSWang Long static inline unsigned long wb_stat_error(void)
98b2e8fb6eSPeter Zijlstra {
99b2e8fb6eSPeter Zijlstra #ifdef CONFIG_SMP
10093f78d88STejun Heo 	return nr_cpu_ids * WB_STAT_BATCH;
101b2e8fb6eSPeter Zijlstra #else
102b2e8fb6eSPeter Zijlstra 	return 1;
103b2e8fb6eSPeter Zijlstra #endif
104e0bf68ddSPeter Zijlstra }
1051da177e4SLinus Torvalds 
106189d3c4aSPeter Zijlstra int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
107a42dde04SPeter Zijlstra int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
108189d3c4aSPeter Zijlstra 
1091da177e4SLinus Torvalds /*
1101da177e4SLinus Torvalds  * Flags in backing_dev_info::capability
111e4ad08feSMiklos Szeredi  *
112e4ad08feSMiklos Szeredi  * The first three flags control whether dirty pages will contribute to the
1131da177e4SLinus Torvalds  * VM's accounting and whether writepages() should be called for dirty pages
1141da177e4SLinus Torvalds  * (something that would not, for example, be appropriate for ramfs)
115e4ad08feSMiklos Szeredi  *
116e4ad08feSMiklos Szeredi  * WARNING: these flags are closely related and should not normally be
117e4ad08feSMiklos Szeredi  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
118e4ad08feSMiklos Szeredi  * three flags into a single convenience macro.
119e4ad08feSMiklos Szeredi  *
120e4ad08feSMiklos Szeredi  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
121e4ad08feSMiklos Szeredi  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
122e4ad08feSMiklos Szeredi  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
1235a537485SMaxim Patlasov  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
12489e9b9e0STejun Heo  *
12589e9b9e0STejun Heo  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
12623c47d2aSMinchan Kim  * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
12723c47d2aSMinchan Kim  *			   inefficient.
1281da177e4SLinus Torvalds  */
129e4ad08feSMiklos Szeredi #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
130e4ad08feSMiklos Szeredi #define BDI_CAP_NO_WRITEBACK	0x00000002
131b4caecd4SChristoph Hellwig #define BDI_CAP_NO_ACCT_WB	0x00000004
132b4caecd4SChristoph Hellwig #define BDI_CAP_STABLE_WRITES	0x00000008
133b4caecd4SChristoph Hellwig #define BDI_CAP_STRICTLIMIT	0x00000010
13489e9b9e0STejun Heo #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
13523c47d2aSMinchan Kim #define BDI_CAP_SYNCHRONOUS_IO	0x00000040
1361da177e4SLinus Torvalds 
137e4ad08feSMiklos Szeredi #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
138e4ad08feSMiklos Szeredi 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
139e4ad08feSMiklos Szeredi 
1405129a469SJörn Engel extern struct backing_dev_info noop_backing_dev_info;
1411da177e4SLinus Torvalds 
142bc05873dSTejun Heo /**
143bc05873dSTejun Heo  * writeback_in_progress - determine whether there is writeback in progress
144bc05873dSTejun Heo  * @wb: bdi_writeback of interest
145bc05873dSTejun Heo  *
146bc05873dSTejun Heo  * Determine whether there is writeback waiting to be handled against a
147bc05873dSTejun Heo  * bdi_writeback.
148bc05873dSTejun Heo  */
149bc05873dSTejun Heo static inline bool writeback_in_progress(struct bdi_writeback *wb)
1501da177e4SLinus Torvalds {
151bc05873dSTejun Heo 	return test_bit(WB_writeback_running, &wb->state);
152bc05873dSTejun Heo }
1531da177e4SLinus Torvalds 
154a212b105STejun Heo static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
155a212b105STejun Heo {
156a212b105STejun Heo 	struct super_block *sb;
157a212b105STejun Heo 
158a212b105STejun Heo 	if (!inode)
159a212b105STejun Heo 		return &noop_backing_dev_info;
160a212b105STejun Heo 
161a212b105STejun Heo 	sb = inode->i_sb;
162a212b105STejun Heo #ifdef CONFIG_BLOCK
163a212b105STejun Heo 	if (sb_is_blkdev_sb(sb))
164efa7c9f9SJan Kara 		return I_BDEV(inode)->bd_bdi;
165a212b105STejun Heo #endif
166a212b105STejun Heo 	return sb->s_bdi;
167a212b105STejun Heo }
168a212b105STejun Heo 
169ec8a6f26STejun Heo static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1701da177e4SLinus Torvalds {
171ec8a6f26STejun Heo 	struct backing_dev_info *bdi = wb->bdi;
172ec8a6f26STejun Heo 
1731da177e4SLinus Torvalds 	if (bdi->congested_fn)
174ec8a6f26STejun Heo 		return bdi->congested_fn(bdi->congested_data, cong_bits);
175ec8a6f26STejun Heo 	return wb->congested->state & cong_bits;
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds 
1788aa7e847SJens Axboe long congestion_wait(int sync, long timeout);
179e3c1ac58SAndrey Ryabinin long wait_iff_congested(int sync, long timeout);
1801da177e4SLinus Torvalds 
18123c47d2aSMinchan Kim static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
18223c47d2aSMinchan Kim {
18323c47d2aSMinchan Kim 	return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
18423c47d2aSMinchan Kim }
18523c47d2aSMinchan Kim 
1867d311cdaSDarrick J. Wong static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
1877d311cdaSDarrick J. Wong {
1887d311cdaSDarrick J. Wong 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
1897d311cdaSDarrick J. Wong }
1907d311cdaSDarrick J. Wong 
191e4ad08feSMiklos Szeredi static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
192e4ad08feSMiklos Szeredi {
193e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
194e4ad08feSMiklos Szeredi }
1951da177e4SLinus Torvalds 
196e4ad08feSMiklos Szeredi static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
197e4ad08feSMiklos Szeredi {
198e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
199e4ad08feSMiklos Szeredi }
2001da177e4SLinus Torvalds 
201e4ad08feSMiklos Szeredi static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
202e4ad08feSMiklos Szeredi {
203e4ad08feSMiklos Szeredi 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
204e4ad08feSMiklos Szeredi 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
205e4ad08feSMiklos Szeredi 				      BDI_CAP_NO_WRITEBACK));
206e4ad08feSMiklos Szeredi }
2071da177e4SLinus Torvalds 
208e4ad08feSMiklos Szeredi static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
209e4ad08feSMiklos Szeredi {
210de1414a6SChristoph Hellwig 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
211e4ad08feSMiklos Szeredi }
212e4ad08feSMiklos Szeredi 
213e4ad08feSMiklos Szeredi static inline bool mapping_cap_account_dirty(struct address_space *mapping)
214e4ad08feSMiklos Szeredi {
215de1414a6SChristoph Hellwig 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
216e4ad08feSMiklos Szeredi }
2171da177e4SLinus Torvalds 
21803ba3782SJens Axboe static inline int bdi_sched_wait(void *word)
21903ba3782SJens Axboe {
22003ba3782SJens Axboe 	schedule();
22103ba3782SJens Axboe 	return 0;
22203ba3782SJens Axboe }
22303ba3782SJens Axboe 
22489e9b9e0STejun Heo #ifdef CONFIG_CGROUP_WRITEBACK
22589e9b9e0STejun Heo 
22652ebea74STejun Heo struct bdi_writeback_congested *
22752ebea74STejun Heo wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
22852ebea74STejun Heo void wb_congested_put(struct bdi_writeback_congested *congested);
22952ebea74STejun Heo struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
23052ebea74STejun Heo 				    struct cgroup_subsys_state *memcg_css,
23152ebea74STejun Heo 				    gfp_t gfp);
23252ebea74STejun Heo void wb_memcg_offline(struct mem_cgroup *memcg);
23352ebea74STejun Heo void wb_blkcg_offline(struct blkcg *blkcg);
234703c2708STejun Heo int inode_congested(struct inode *inode, int cong_bits);
23552ebea74STejun Heo 
23689e9b9e0STejun Heo /**
23789e9b9e0STejun Heo  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
23889e9b9e0STejun Heo  * @inode: inode of interest
23989e9b9e0STejun Heo  *
24089e9b9e0STejun Heo  * cgroup writeback requires support from both the bdi and filesystem.
2419badce00STejun Heo  * Also, both memcg and iocg have to be on the default hierarchy.  Test
2429badce00STejun Heo  * whether all conditions are met.
2439badce00STejun Heo  *
2449badce00STejun Heo  * Note that the test result may change dynamically on the same inode
2459badce00STejun Heo  * depending on how memcg and iocg are configured.
24689e9b9e0STejun Heo  */
24789e9b9e0STejun Heo static inline bool inode_cgwb_enabled(struct inode *inode)
24889e9b9e0STejun Heo {
24989e9b9e0STejun Heo 	struct backing_dev_info *bdi = inode_to_bdi(inode);
25089e9b9e0STejun Heo 
251c0522908STejun Heo 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
252c0522908STejun Heo 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
2539badce00STejun Heo 		bdi_cap_account_dirty(bdi) &&
25489e9b9e0STejun Heo 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
25546b15caaSTejun Heo 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
25689e9b9e0STejun Heo }
25789e9b9e0STejun Heo 
25852ebea74STejun Heo /**
25952ebea74STejun Heo  * wb_find_current - find wb for %current on a bdi
26052ebea74STejun Heo  * @bdi: bdi of interest
26152ebea74STejun Heo  *
26252ebea74STejun Heo  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
26352ebea74STejun Heo  * Must be called under rcu_read_lock() which protects the returend wb.
26452ebea74STejun Heo  * NULL if not found.
26552ebea74STejun Heo  */
26652ebea74STejun Heo static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
26752ebea74STejun Heo {
26852ebea74STejun Heo 	struct cgroup_subsys_state *memcg_css;
26952ebea74STejun Heo 	struct bdi_writeback *wb;
27052ebea74STejun Heo 
27152ebea74STejun Heo 	memcg_css = task_css(current, memory_cgrp_id);
27252ebea74STejun Heo 	if (!memcg_css->parent)
27352ebea74STejun Heo 		return &bdi->wb;
27452ebea74STejun Heo 
27552ebea74STejun Heo 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
27652ebea74STejun Heo 
27752ebea74STejun Heo 	/*
27852ebea74STejun Heo 	 * %current's blkcg equals the effective blkcg of its memcg.  No
27952ebea74STejun Heo 	 * need to use the relatively expensive cgroup_get_e_css().
28052ebea74STejun Heo 	 */
281c165b3e3STejun Heo 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
28252ebea74STejun Heo 		return wb;
28352ebea74STejun Heo 	return NULL;
28452ebea74STejun Heo }
28552ebea74STejun Heo 
28652ebea74STejun Heo /**
28752ebea74STejun Heo  * wb_get_create_current - get or create wb for %current on a bdi
28852ebea74STejun Heo  * @bdi: bdi of interest
28952ebea74STejun Heo  * @gfp: allocation mask
29052ebea74STejun Heo  *
29152ebea74STejun Heo  * Equivalent to wb_get_create() on %current's memcg.  This function is
29252ebea74STejun Heo  * called from a relatively hot path and optimizes the common cases using
29352ebea74STejun Heo  * wb_find_current().
29452ebea74STejun Heo  */
29552ebea74STejun Heo static inline struct bdi_writeback *
29652ebea74STejun Heo wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
29752ebea74STejun Heo {
29852ebea74STejun Heo 	struct bdi_writeback *wb;
29952ebea74STejun Heo 
30052ebea74STejun Heo 	rcu_read_lock();
30152ebea74STejun Heo 	wb = wb_find_current(bdi);
30252ebea74STejun Heo 	if (wb && unlikely(!wb_tryget(wb)))
30352ebea74STejun Heo 		wb = NULL;
30452ebea74STejun Heo 	rcu_read_unlock();
30552ebea74STejun Heo 
30652ebea74STejun Heo 	if (unlikely(!wb)) {
30752ebea74STejun Heo 		struct cgroup_subsys_state *memcg_css;
30852ebea74STejun Heo 
30952ebea74STejun Heo 		memcg_css = task_get_css(current, memory_cgrp_id);
31052ebea74STejun Heo 		wb = wb_get_create(bdi, memcg_css, gfp);
31152ebea74STejun Heo 		css_put(memcg_css);
31252ebea74STejun Heo 	}
31352ebea74STejun Heo 	return wb;
31452ebea74STejun Heo }
31552ebea74STejun Heo 
31652ebea74STejun Heo /**
317aaa2cacfSTejun Heo  * inode_to_wb_is_valid - test whether an inode has a wb associated
318aaa2cacfSTejun Heo  * @inode: inode of interest
319aaa2cacfSTejun Heo  *
320aaa2cacfSTejun Heo  * Returns %true if @inode has a wb associated.  May be called without any
321aaa2cacfSTejun Heo  * locking.
322aaa2cacfSTejun Heo  */
323aaa2cacfSTejun Heo static inline bool inode_to_wb_is_valid(struct inode *inode)
324aaa2cacfSTejun Heo {
325aaa2cacfSTejun Heo 	return inode->i_wb;
326aaa2cacfSTejun Heo }
327aaa2cacfSTejun Heo 
328aaa2cacfSTejun Heo /**
32952ebea74STejun Heo  * inode_to_wb - determine the wb of an inode
33052ebea74STejun Heo  * @inode: inode of interest
33152ebea74STejun Heo  *
332aaa2cacfSTejun Heo  * Returns the wb @inode is currently associated with.  The caller must be
333b93b0163SMatthew Wilcox  * holding either @inode->i_lock, the i_pages lock, or the
334aaa2cacfSTejun Heo  * associated wb's list_lock.
33552ebea74STejun Heo  */
33605b93801SMatthew Wilcox static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
33752ebea74STejun Heo {
338aaa2cacfSTejun Heo #ifdef CONFIG_LOCKDEP
339aaa2cacfSTejun Heo 	WARN_ON_ONCE(debug_locks &&
340aaa2cacfSTejun Heo 		     (!lockdep_is_held(&inode->i_lock) &&
341b93b0163SMatthew Wilcox 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
342aaa2cacfSTejun Heo 		      !lockdep_is_held(&inode->i_wb->list_lock)));
343aaa2cacfSTejun Heo #endif
34452ebea74STejun Heo 	return inode->i_wb;
34552ebea74STejun Heo }
34652ebea74STejun Heo 
347682aa8e1STejun Heo /**
348682aa8e1STejun Heo  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
349682aa8e1STejun Heo  * @inode: target inode
3502e898e4cSGreg Thelen  * @cookie: output param, to be passed to the end function
351682aa8e1STejun Heo  *
352682aa8e1STejun Heo  * The caller wants to access the wb associated with @inode but isn't
353b93b0163SMatthew Wilcox  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
354682aa8e1STejun Heo  * function determines the wb associated with @inode and ensures that the
355682aa8e1STejun Heo  * association doesn't change until the transaction is finished with
356682aa8e1STejun Heo  * unlocked_inode_to_wb_end().
357682aa8e1STejun Heo  *
3582e898e4cSGreg Thelen  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
3592e898e4cSGreg Thelen  * can't sleep during the transaction.  IRQs may or may not be disabled on
3602e898e4cSGreg Thelen  * return.
361682aa8e1STejun Heo  */
362682aa8e1STejun Heo static inline struct bdi_writeback *
3632e898e4cSGreg Thelen unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
364682aa8e1STejun Heo {
365682aa8e1STejun Heo 	rcu_read_lock();
366682aa8e1STejun Heo 
367682aa8e1STejun Heo 	/*
368682aa8e1STejun Heo 	 * Paired with store_release in inode_switch_wb_work_fn() and
369682aa8e1STejun Heo 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
370682aa8e1STejun Heo 	 */
3712e898e4cSGreg Thelen 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
372682aa8e1STejun Heo 
3732e898e4cSGreg Thelen 	if (unlikely(cookie->locked))
3742e898e4cSGreg Thelen 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
375aaa2cacfSTejun Heo 
376aaa2cacfSTejun Heo 	/*
377b93b0163SMatthew Wilcox 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
378b93b0163SMatthew Wilcox 	 * lock.  inode_to_wb() will bark.  Deref directly.
379aaa2cacfSTejun Heo 	 */
380aaa2cacfSTejun Heo 	return inode->i_wb;
381682aa8e1STejun Heo }
382682aa8e1STejun Heo 
383682aa8e1STejun Heo /**
384682aa8e1STejun Heo  * unlocked_inode_to_wb_end - end inode wb access transaction
385682aa8e1STejun Heo  * @inode: target inode
3862e898e4cSGreg Thelen  * @cookie: @cookie from unlocked_inode_to_wb_begin()
387682aa8e1STejun Heo  */
3882e898e4cSGreg Thelen static inline void unlocked_inode_to_wb_end(struct inode *inode,
3892e898e4cSGreg Thelen 					    struct wb_lock_cookie *cookie)
390682aa8e1STejun Heo {
3912e898e4cSGreg Thelen 	if (unlikely(cookie->locked))
3922e898e4cSGreg Thelen 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
393682aa8e1STejun Heo 
394682aa8e1STejun Heo 	rcu_read_unlock();
395682aa8e1STejun Heo }
396682aa8e1STejun Heo 
39789e9b9e0STejun Heo #else	/* CONFIG_CGROUP_WRITEBACK */
39889e9b9e0STejun Heo 
39989e9b9e0STejun Heo static inline bool inode_cgwb_enabled(struct inode *inode)
40089e9b9e0STejun Heo {
40189e9b9e0STejun Heo 	return false;
40289e9b9e0STejun Heo }
40389e9b9e0STejun Heo 
40452ebea74STejun Heo static inline struct bdi_writeback_congested *
40552ebea74STejun Heo wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
40652ebea74STejun Heo {
407*e58dd0deSSebastian Andrzej Siewior 	refcount_inc(&bdi->wb_congested->refcnt);
408a13f35e8STejun Heo 	return bdi->wb_congested;
40952ebea74STejun Heo }
41052ebea74STejun Heo 
41152ebea74STejun Heo static inline void wb_congested_put(struct bdi_writeback_congested *congested)
41252ebea74STejun Heo {
413*e58dd0deSSebastian Andrzej Siewior 	if (refcount_dec_and_test(&congested->refcnt))
414a13f35e8STejun Heo 		kfree(congested);
41552ebea74STejun Heo }
41652ebea74STejun Heo 
41752ebea74STejun Heo static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
41852ebea74STejun Heo {
41952ebea74STejun Heo 	return &bdi->wb;
42052ebea74STejun Heo }
42152ebea74STejun Heo 
42252ebea74STejun Heo static inline struct bdi_writeback *
42352ebea74STejun Heo wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
42452ebea74STejun Heo {
42552ebea74STejun Heo 	return &bdi->wb;
42652ebea74STejun Heo }
42752ebea74STejun Heo 
428aaa2cacfSTejun Heo static inline bool inode_to_wb_is_valid(struct inode *inode)
429aaa2cacfSTejun Heo {
430aaa2cacfSTejun Heo 	return true;
431aaa2cacfSTejun Heo }
432aaa2cacfSTejun Heo 
43352ebea74STejun Heo static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
43452ebea74STejun Heo {
43552ebea74STejun Heo 	return &inode_to_bdi(inode)->wb;
43652ebea74STejun Heo }
43752ebea74STejun Heo 
438682aa8e1STejun Heo static inline struct bdi_writeback *
4392e898e4cSGreg Thelen unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
440682aa8e1STejun Heo {
441682aa8e1STejun Heo 	return inode_to_wb(inode);
442682aa8e1STejun Heo }
443682aa8e1STejun Heo 
4442e898e4cSGreg Thelen static inline void unlocked_inode_to_wb_end(struct inode *inode,
4452e898e4cSGreg Thelen 					    struct wb_lock_cookie *cookie)
446682aa8e1STejun Heo {
447682aa8e1STejun Heo }
448682aa8e1STejun Heo 
44952ebea74STejun Heo static inline void wb_memcg_offline(struct mem_cgroup *memcg)
45052ebea74STejun Heo {
45152ebea74STejun Heo }
45252ebea74STejun Heo 
45352ebea74STejun Heo static inline void wb_blkcg_offline(struct blkcg *blkcg)
45452ebea74STejun Heo {
45552ebea74STejun Heo }
45652ebea74STejun Heo 
457703c2708STejun Heo static inline int inode_congested(struct inode *inode, int cong_bits)
458703c2708STejun Heo {
459703c2708STejun Heo 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
460703c2708STejun Heo }
461703c2708STejun Heo 
46289e9b9e0STejun Heo #endif	/* CONFIG_CGROUP_WRITEBACK */
46389e9b9e0STejun Heo 
464703c2708STejun Heo static inline int inode_read_congested(struct inode *inode)
465703c2708STejun Heo {
466703c2708STejun Heo 	return inode_congested(inode, 1 << WB_sync_congested);
467703c2708STejun Heo }
468703c2708STejun Heo 
469703c2708STejun Heo static inline int inode_write_congested(struct inode *inode)
470703c2708STejun Heo {
471703c2708STejun Heo 	return inode_congested(inode, 1 << WB_async_congested);
472703c2708STejun Heo }
473703c2708STejun Heo 
474703c2708STejun Heo static inline int inode_rw_congested(struct inode *inode)
475703c2708STejun Heo {
476703c2708STejun Heo 	return inode_congested(inode, (1 << WB_sync_congested) |
477703c2708STejun Heo 				      (1 << WB_async_congested));
478703c2708STejun Heo }
479703c2708STejun Heo 
480ec8a6f26STejun Heo static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
481ec8a6f26STejun Heo {
482ec8a6f26STejun Heo 	return wb_congested(&bdi->wb, cong_bits);
483ec8a6f26STejun Heo }
484ec8a6f26STejun Heo 
485ec8a6f26STejun Heo static inline int bdi_read_congested(struct backing_dev_info *bdi)
486ec8a6f26STejun Heo {
487ec8a6f26STejun Heo 	return bdi_congested(bdi, 1 << WB_sync_congested);
488ec8a6f26STejun Heo }
489ec8a6f26STejun Heo 
490ec8a6f26STejun Heo static inline int bdi_write_congested(struct backing_dev_info *bdi)
491ec8a6f26STejun Heo {
492ec8a6f26STejun Heo 	return bdi_congested(bdi, 1 << WB_async_congested);
493ec8a6f26STejun Heo }
494ec8a6f26STejun Heo 
495ec8a6f26STejun Heo static inline int bdi_rw_congested(struct backing_dev_info *bdi)
496ec8a6f26STejun Heo {
497ec8a6f26STejun Heo 	return bdi_congested(bdi, (1 << WB_sync_congested) |
498ec8a6f26STejun Heo 				  (1 << WB_async_congested));
499ec8a6f26STejun Heo }
500ec8a6f26STejun Heo 
5011da177e4SLinus Torvalds #endif	/* _LINUX_BACKING_DEV_H */
502