xref: /linux-6.15/include/linux/backing-dev.h (revision 00df7d51)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/backing-dev.h
4  *
5  * low-level device information and state which is propagated up through
6  * to high-level code.
7  */
8 
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11 
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/device.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
19 
20 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21 {
22 	kref_get(&bdi->refcnt);
23 	return bdi;
24 }
25 
26 struct backing_dev_info *bdi_get_by_id(u64 id);
27 void bdi_put(struct backing_dev_info *bdi);
28 
29 __printf(2, 3)
30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31 __printf(2, 0)
32 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
33 		    va_list args);
34 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
35 void bdi_unregister(struct backing_dev_info *bdi);
36 
37 struct backing_dev_info *bdi_alloc(int node_id);
38 
39 void wb_start_background_writeback(struct bdi_writeback *wb);
40 void wb_workfn(struct work_struct *work);
41 void wb_wakeup_delayed(struct bdi_writeback *wb);
42 
43 void wb_wait_for_completion(struct wb_completion *done);
44 
45 extern spinlock_t bdi_lock;
46 extern struct list_head bdi_list;
47 
48 extern struct workqueue_struct *bdi_wq;
49 extern struct workqueue_struct *bdi_async_bio_wq;
50 
51 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
52 {
53 	return test_bit(WB_has_dirty_io, &wb->state);
54 }
55 
56 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
57 {
58 	/*
59 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
60 	 * any dirty wbs.  See wb_update_write_bandwidth().
61 	 */
62 	return atomic_long_read(&bdi->tot_write_bandwidth);
63 }
64 
65 static inline void wb_stat_mod(struct bdi_writeback *wb,
66 				 enum wb_stat_item item, s64 amount)
67 {
68 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
69 }
70 
71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
72 {
73 	wb_stat_mod(wb, item, 1);
74 }
75 
76 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
77 {
78 	wb_stat_mod(wb, item, -1);
79 }
80 
81 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
82 {
83 	return percpu_counter_read_positive(&wb->stat[item]);
84 }
85 
86 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
87 {
88 	return percpu_counter_sum_positive(&wb->stat[item]);
89 }
90 
91 extern void wb_writeout_inc(struct bdi_writeback *wb);
92 
93 /*
94  * maximal error of a stat counter.
95  */
96 static inline unsigned long wb_stat_error(void)
97 {
98 #ifdef CONFIG_SMP
99 	return nr_cpu_ids * WB_STAT_BATCH;
100 #else
101 	return 1;
102 #endif
103 }
104 
105 /* BDI ratio is expressed as part per 1000000 for finer granularity. */
106 #define BDI_RATIO_SCALE 10000
107 
108 u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
109 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
110 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
111 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
112 
113 /*
114  * Flags in backing_dev_info::capability
115  *
116  * BDI_CAP_WRITEBACK:		Supports dirty page writeback, and dirty pages
117  *				should contribute to accounting
118  * BDI_CAP_WRITEBACK_ACCT:	Automatically account writeback pages
119  * BDI_CAP_STRICTLIMIT:		Keep number of dirty pages below bdi threshold
120  */
121 #define BDI_CAP_WRITEBACK		(1 << 0)
122 #define BDI_CAP_WRITEBACK_ACCT		(1 << 1)
123 #define BDI_CAP_STRICTLIMIT		(1 << 2)
124 
125 extern struct backing_dev_info noop_backing_dev_info;
126 
127 int bdi_init(struct backing_dev_info *bdi);
128 
129 /**
130  * writeback_in_progress - determine whether there is writeback in progress
131  * @wb: bdi_writeback of interest
132  *
133  * Determine whether there is writeback waiting to be handled against a
134  * bdi_writeback.
135  */
136 static inline bool writeback_in_progress(struct bdi_writeback *wb)
137 {
138 	return test_bit(WB_writeback_running, &wb->state);
139 }
140 
141 struct backing_dev_info *inode_to_bdi(struct inode *inode);
142 
143 static inline bool mapping_can_writeback(struct address_space *mapping)
144 {
145 	return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
146 }
147 
148 #ifdef CONFIG_CGROUP_WRITEBACK
149 
150 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
151 				    struct cgroup_subsys_state *memcg_css);
152 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
153 				    struct cgroup_subsys_state *memcg_css,
154 				    gfp_t gfp);
155 void wb_memcg_offline(struct mem_cgroup *memcg);
156 void wb_blkcg_offline(struct cgroup_subsys_state *css);
157 
158 /**
159  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
160  * @inode: inode of interest
161  *
162  * Cgroup writeback requires support from the filesystem.  Also, both memcg and
163  * iocg have to be on the default hierarchy.  Test whether all conditions are
164  * met.
165  *
166  * Note that the test result may change dynamically on the same inode
167  * depending on how memcg and iocg are configured.
168  */
169 static inline bool inode_cgwb_enabled(struct inode *inode)
170 {
171 	struct backing_dev_info *bdi = inode_to_bdi(inode);
172 
173 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
174 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
175 		(bdi->capabilities & BDI_CAP_WRITEBACK) &&
176 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
177 }
178 
179 /**
180  * wb_find_current - find wb for %current on a bdi
181  * @bdi: bdi of interest
182  *
183  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
184  * Must be called under rcu_read_lock() which protects the returend wb.
185  * NULL if not found.
186  */
187 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
188 {
189 	struct cgroup_subsys_state *memcg_css;
190 	struct bdi_writeback *wb;
191 
192 	memcg_css = task_css(current, memory_cgrp_id);
193 	if (!memcg_css->parent)
194 		return &bdi->wb;
195 
196 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
197 
198 	/*
199 	 * %current's blkcg equals the effective blkcg of its memcg.  No
200 	 * need to use the relatively expensive cgroup_get_e_css().
201 	 */
202 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
203 		return wb;
204 	return NULL;
205 }
206 
207 /**
208  * wb_get_create_current - get or create wb for %current on a bdi
209  * @bdi: bdi of interest
210  * @gfp: allocation mask
211  *
212  * Equivalent to wb_get_create() on %current's memcg.  This function is
213  * called from a relatively hot path and optimizes the common cases using
214  * wb_find_current().
215  */
216 static inline struct bdi_writeback *
217 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
218 {
219 	struct bdi_writeback *wb;
220 
221 	rcu_read_lock();
222 	wb = wb_find_current(bdi);
223 	if (wb && unlikely(!wb_tryget(wb)))
224 		wb = NULL;
225 	rcu_read_unlock();
226 
227 	if (unlikely(!wb)) {
228 		struct cgroup_subsys_state *memcg_css;
229 
230 		memcg_css = task_get_css(current, memory_cgrp_id);
231 		wb = wb_get_create(bdi, memcg_css, gfp);
232 		css_put(memcg_css);
233 	}
234 	return wb;
235 }
236 
237 /**
238  * inode_to_wb - determine the wb of an inode
239  * @inode: inode of interest
240  *
241  * Returns the wb @inode is currently associated with.  The caller must be
242  * holding either @inode->i_lock, the i_pages lock, or the
243  * associated wb's list_lock.
244  */
245 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
246 {
247 #ifdef CONFIG_LOCKDEP
248 	WARN_ON_ONCE(debug_locks &&
249 		     (!lockdep_is_held(&inode->i_lock) &&
250 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
251 		      !lockdep_is_held(&inode->i_wb->list_lock)));
252 #endif
253 	return inode->i_wb;
254 }
255 
256 static inline struct bdi_writeback *inode_to_wb_wbc(
257 				struct inode *inode,
258 				struct writeback_control *wbc)
259 {
260 	/*
261 	 * If wbc does not have inode attached, it means cgroup writeback was
262 	 * disabled when wbc started. Just use the default wb in that case.
263 	 */
264 	return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
265 }
266 
267 /**
268  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
269  * @inode: target inode
270  * @cookie: output param, to be passed to the end function
271  *
272  * The caller wants to access the wb associated with @inode but isn't
273  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
274  * function determines the wb associated with @inode and ensures that the
275  * association doesn't change until the transaction is finished with
276  * unlocked_inode_to_wb_end().
277  *
278  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
279  * can't sleep during the transaction.  IRQs may or may not be disabled on
280  * return.
281  */
282 static inline struct bdi_writeback *
283 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
284 {
285 	rcu_read_lock();
286 
287 	/*
288 	 * Paired with store_release in inode_switch_wbs_work_fn() and
289 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
290 	 */
291 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
292 
293 	if (unlikely(cookie->locked))
294 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
295 
296 	/*
297 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
298 	 * lock.  inode_to_wb() will bark.  Deref directly.
299 	 */
300 	return inode->i_wb;
301 }
302 
303 /**
304  * unlocked_inode_to_wb_end - end inode wb access transaction
305  * @inode: target inode
306  * @cookie: @cookie from unlocked_inode_to_wb_begin()
307  */
308 static inline void unlocked_inode_to_wb_end(struct inode *inode,
309 					    struct wb_lock_cookie *cookie)
310 {
311 	if (unlikely(cookie->locked))
312 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
313 
314 	rcu_read_unlock();
315 }
316 
317 #else	/* CONFIG_CGROUP_WRITEBACK */
318 
319 static inline bool inode_cgwb_enabled(struct inode *inode)
320 {
321 	return false;
322 }
323 
324 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
325 {
326 	return &bdi->wb;
327 }
328 
329 static inline struct bdi_writeback *
330 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
331 {
332 	return &bdi->wb;
333 }
334 
335 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
336 {
337 	return &inode_to_bdi(inode)->wb;
338 }
339 
340 static inline struct bdi_writeback *inode_to_wb_wbc(
341 				struct inode *inode,
342 				struct writeback_control *wbc)
343 {
344 	return inode_to_wb(inode);
345 }
346 
347 
348 static inline struct bdi_writeback *
349 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
350 {
351 	return inode_to_wb(inode);
352 }
353 
354 static inline void unlocked_inode_to_wb_end(struct inode *inode,
355 					    struct wb_lock_cookie *cookie)
356 {
357 }
358 
359 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
360 {
361 }
362 
363 static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
364 {
365 }
366 
367 #endif	/* CONFIG_CGROUP_WRITEBACK */
368 
369 const char *bdi_dev_name(struct backing_dev_info *bdi);
370 
371 #endif	/* _LINUX_BACKING_DEV_H */
372