xref: /linux-6.15/include/linux/backing-dev.h (revision 91afb7c3)
1 /*
2  * include/linux/backing-dev.h
3  *
4  * low-level device information and state which is propagated up through
5  * to high-level code.
6  */
7 
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10 
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/writeback.h>
16 #include <linux/memcontrol.h>
17 #include <linux/blk-cgroup.h>
18 #include <linux/backing-dev-defs.h>
19 #include <linux/slab.h>
20 
21 int __must_check bdi_init(struct backing_dev_info *bdi);
22 void bdi_destroy(struct backing_dev_info *bdi);
23 
24 __printf(3, 4)
25 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
26 		const char *fmt, ...);
27 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
29 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
30 			bool range_cyclic, enum wb_reason reason);
31 void wb_start_background_writeback(struct bdi_writeback *wb);
32 void wb_workfn(struct work_struct *work);
33 void wb_wakeup_delayed(struct bdi_writeback *wb);
34 
35 extern spinlock_t bdi_lock;
36 extern struct list_head bdi_list;
37 
38 extern struct workqueue_struct *bdi_wq;
39 
40 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
41 {
42 	return test_bit(WB_has_dirty_io, &wb->state);
43 }
44 
45 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
46 {
47 	/*
48 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
49 	 * any dirty wbs.  See wb_update_write_bandwidth().
50 	 */
51 	return atomic_long_read(&bdi->tot_write_bandwidth);
52 }
53 
54 static inline void __add_wb_stat(struct bdi_writeback *wb,
55 				 enum wb_stat_item item, s64 amount)
56 {
57 	__percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
58 }
59 
60 static inline void __inc_wb_stat(struct bdi_writeback *wb,
61 				 enum wb_stat_item item)
62 {
63 	__add_wb_stat(wb, item, 1);
64 }
65 
66 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
67 {
68 	unsigned long flags;
69 
70 	local_irq_save(flags);
71 	__inc_wb_stat(wb, item);
72 	local_irq_restore(flags);
73 }
74 
75 static inline void __dec_wb_stat(struct bdi_writeback *wb,
76 				 enum wb_stat_item item)
77 {
78 	__add_wb_stat(wb, item, -1);
79 }
80 
81 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
82 {
83 	unsigned long flags;
84 
85 	local_irq_save(flags);
86 	__dec_wb_stat(wb, item);
87 	local_irq_restore(flags);
88 }
89 
90 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
91 {
92 	return percpu_counter_read_positive(&wb->stat[item]);
93 }
94 
95 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
96 				enum wb_stat_item item)
97 {
98 	return percpu_counter_sum_positive(&wb->stat[item]);
99 }
100 
101 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
102 {
103 	s64 sum;
104 	unsigned long flags;
105 
106 	local_irq_save(flags);
107 	sum = __wb_stat_sum(wb, item);
108 	local_irq_restore(flags);
109 
110 	return sum;
111 }
112 
113 extern void wb_writeout_inc(struct bdi_writeback *wb);
114 
115 /*
116  * maximal error of a stat counter.
117  */
118 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
119 {
120 #ifdef CONFIG_SMP
121 	return nr_cpu_ids * WB_STAT_BATCH;
122 #else
123 	return 1;
124 #endif
125 }
126 
127 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
128 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
129 
130 /*
131  * Flags in backing_dev_info::capability
132  *
133  * The first three flags control whether dirty pages will contribute to the
134  * VM's accounting and whether writepages() should be called for dirty pages
135  * (something that would not, for example, be appropriate for ramfs)
136  *
137  * WARNING: these flags are closely related and should not normally be
138  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
139  * three flags into a single convenience macro.
140  *
141  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
142  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
143  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
144  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
145  *
146  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
147  */
148 #define BDI_CAP_NO_ACCT_DIRTY	0x00000001
149 #define BDI_CAP_NO_WRITEBACK	0x00000002
150 #define BDI_CAP_NO_ACCT_WB	0x00000004
151 #define BDI_CAP_STABLE_WRITES	0x00000008
152 #define BDI_CAP_STRICTLIMIT	0x00000010
153 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
154 
155 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
156 	(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
157 
158 extern struct backing_dev_info noop_backing_dev_info;
159 
160 /**
161  * writeback_in_progress - determine whether there is writeback in progress
162  * @wb: bdi_writeback of interest
163  *
164  * Determine whether there is writeback waiting to be handled against a
165  * bdi_writeback.
166  */
167 static inline bool writeback_in_progress(struct bdi_writeback *wb)
168 {
169 	return test_bit(WB_writeback_running, &wb->state);
170 }
171 
172 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
173 {
174 	struct super_block *sb;
175 
176 	if (!inode)
177 		return &noop_backing_dev_info;
178 
179 	sb = inode->i_sb;
180 #ifdef CONFIG_BLOCK
181 	if (sb_is_blkdev_sb(sb))
182 		return blk_get_backing_dev_info(I_BDEV(inode));
183 #endif
184 	return sb->s_bdi;
185 }
186 
187 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
188 {
189 	struct backing_dev_info *bdi = wb->bdi;
190 
191 	if (bdi->congested_fn)
192 		return bdi->congested_fn(bdi->congested_data, cong_bits);
193 	return wb->congested->state & cong_bits;
194 }
195 
196 long congestion_wait(int sync, long timeout);
197 long wait_iff_congested(struct zone *zone, int sync, long timeout);
198 int pdflush_proc_obsolete(struct ctl_table *table, int write,
199 		void __user *buffer, size_t *lenp, loff_t *ppos);
200 
201 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
202 {
203 	return bdi->capabilities & BDI_CAP_STABLE_WRITES;
204 }
205 
206 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
207 {
208 	return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
209 }
210 
211 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
212 {
213 	return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
214 }
215 
216 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
217 {
218 	/* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
219 	return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
220 				      BDI_CAP_NO_WRITEBACK));
221 }
222 
223 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
224 {
225 	return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
226 }
227 
228 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
229 {
230 	return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
231 }
232 
233 static inline int bdi_sched_wait(void *word)
234 {
235 	schedule();
236 	return 0;
237 }
238 
239 #ifdef CONFIG_CGROUP_WRITEBACK
240 
241 struct bdi_writeback_congested *
242 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
243 void wb_congested_put(struct bdi_writeback_congested *congested);
244 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
245 				    struct cgroup_subsys_state *memcg_css,
246 				    gfp_t gfp);
247 void wb_memcg_offline(struct mem_cgroup *memcg);
248 void wb_blkcg_offline(struct blkcg *blkcg);
249 int inode_congested(struct inode *inode, int cong_bits);
250 
251 /**
252  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
253  * @inode: inode of interest
254  *
255  * cgroup writeback requires support from both the bdi and filesystem.
256  * Also, both memcg and iocg have to be on the default hierarchy.  Test
257  * whether all conditions are met.
258  *
259  * Note that the test result may change dynamically on the same inode
260  * depending on how memcg and iocg are configured.
261  */
262 static inline bool inode_cgwb_enabled(struct inode *inode)
263 {
264 	struct backing_dev_info *bdi = inode_to_bdi(inode);
265 
266 	return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
267 		cgroup_on_dfl(blkcg_root_css->cgroup) &&
268 		bdi_cap_account_dirty(bdi) &&
269 		(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
270 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
271 }
272 
273 /**
274  * wb_find_current - find wb for %current on a bdi
275  * @bdi: bdi of interest
276  *
277  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
278  * Must be called under rcu_read_lock() which protects the returend wb.
279  * NULL if not found.
280  */
281 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
282 {
283 	struct cgroup_subsys_state *memcg_css;
284 	struct bdi_writeback *wb;
285 
286 	memcg_css = task_css(current, memory_cgrp_id);
287 	if (!memcg_css->parent)
288 		return &bdi->wb;
289 
290 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
291 
292 	/*
293 	 * %current's blkcg equals the effective blkcg of its memcg.  No
294 	 * need to use the relatively expensive cgroup_get_e_css().
295 	 */
296 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
297 		return wb;
298 	return NULL;
299 }
300 
301 /**
302  * wb_get_create_current - get or create wb for %current on a bdi
303  * @bdi: bdi of interest
304  * @gfp: allocation mask
305  *
306  * Equivalent to wb_get_create() on %current's memcg.  This function is
307  * called from a relatively hot path and optimizes the common cases using
308  * wb_find_current().
309  */
310 static inline struct bdi_writeback *
311 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
312 {
313 	struct bdi_writeback *wb;
314 
315 	rcu_read_lock();
316 	wb = wb_find_current(bdi);
317 	if (wb && unlikely(!wb_tryget(wb)))
318 		wb = NULL;
319 	rcu_read_unlock();
320 
321 	if (unlikely(!wb)) {
322 		struct cgroup_subsys_state *memcg_css;
323 
324 		memcg_css = task_get_css(current, memory_cgrp_id);
325 		wb = wb_get_create(bdi, memcg_css, gfp);
326 		css_put(memcg_css);
327 	}
328 	return wb;
329 }
330 
331 /**
332  * inode_to_wb_is_valid - test whether an inode has a wb associated
333  * @inode: inode of interest
334  *
335  * Returns %true if @inode has a wb associated.  May be called without any
336  * locking.
337  */
338 static inline bool inode_to_wb_is_valid(struct inode *inode)
339 {
340 	return inode->i_wb;
341 }
342 
343 /**
344  * inode_to_wb - determine the wb of an inode
345  * @inode: inode of interest
346  *
347  * Returns the wb @inode is currently associated with.  The caller must be
348  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
349  * associated wb's list_lock.
350  */
351 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
352 {
353 #ifdef CONFIG_LOCKDEP
354 	WARN_ON_ONCE(debug_locks &&
355 		     (!lockdep_is_held(&inode->i_lock) &&
356 		      !lockdep_is_held(&inode->i_mapping->tree_lock) &&
357 		      !lockdep_is_held(&inode->i_wb->list_lock)));
358 #endif
359 	return inode->i_wb;
360 }
361 
362 /**
363  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
364  * @inode: target inode
365  * @lockedp: temp bool output param, to be passed to the end function
366  *
367  * The caller wants to access the wb associated with @inode but isn't
368  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
369  * function determines the wb associated with @inode and ensures that the
370  * association doesn't change until the transaction is finished with
371  * unlocked_inode_to_wb_end().
372  *
373  * The caller must call unlocked_inode_to_wb_end() with *@lockdep
374  * afterwards and can't sleep during transaction.  IRQ may or may not be
375  * disabled on return.
376  */
377 static inline struct bdi_writeback *
378 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
379 {
380 	rcu_read_lock();
381 
382 	/*
383 	 * Paired with store_release in inode_switch_wb_work_fn() and
384 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
385 	 */
386 	*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
387 
388 	if (unlikely(*lockedp))
389 		spin_lock_irq(&inode->i_mapping->tree_lock);
390 
391 	/*
392 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
393 	 * inode_to_wb() will bark.  Deref directly.
394 	 */
395 	return inode->i_wb;
396 }
397 
398 /**
399  * unlocked_inode_to_wb_end - end inode wb access transaction
400  * @inode: target inode
401  * @locked: *@lockedp from unlocked_inode_to_wb_begin()
402  */
403 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
404 {
405 	if (unlikely(locked))
406 		spin_unlock_irq(&inode->i_mapping->tree_lock);
407 
408 	rcu_read_unlock();
409 }
410 
411 struct wb_iter {
412 	int			start_memcg_id;
413 	struct radix_tree_iter	tree_iter;
414 	void			**slot;
415 };
416 
417 static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
418 						   struct backing_dev_info *bdi)
419 {
420 	struct radix_tree_iter *titer = &iter->tree_iter;
421 
422 	WARN_ON_ONCE(!rcu_read_lock_held());
423 
424 	if (iter->start_memcg_id >= 0) {
425 		iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
426 		iter->start_memcg_id = -1;
427 	} else {
428 		iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
429 	}
430 
431 	if (!iter->slot)
432 		iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
433 	if (iter->slot)
434 		return *iter->slot;
435 	return NULL;
436 }
437 
438 static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
439 						   struct backing_dev_info *bdi,
440 						   int start_memcg_id)
441 {
442 	iter->start_memcg_id = start_memcg_id;
443 
444 	if (start_memcg_id)
445 		return __wb_iter_next(iter, bdi);
446 	else
447 		return &bdi->wb;
448 }
449 
450 /**
451  * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
452  * @wb_cur: cursor struct bdi_writeback pointer
453  * @bdi: bdi to walk wb's of
454  * @iter: pointer to struct wb_iter to be used as iteration buffer
455  * @start_memcg_id: memcg ID to start iteration from
456  *
457  * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
458  * memcg ID order starting from @start_memcg_id.  @iter is struct wb_iter
459  * to be used as temp storage during iteration.  rcu_read_lock() must be
460  * held throughout iteration.
461  */
462 #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id)		\
463 	for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id);	\
464 	     (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
465 
466 #else	/* CONFIG_CGROUP_WRITEBACK */
467 
468 static inline bool inode_cgwb_enabled(struct inode *inode)
469 {
470 	return false;
471 }
472 
473 static inline struct bdi_writeback_congested *
474 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
475 {
476 	atomic_inc(&bdi->wb_congested->refcnt);
477 	return bdi->wb_congested;
478 }
479 
480 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
481 {
482 	if (atomic_dec_and_test(&congested->refcnt))
483 		kfree(congested);
484 }
485 
486 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
487 {
488 	return &bdi->wb;
489 }
490 
491 static inline struct bdi_writeback *
492 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
493 {
494 	return &bdi->wb;
495 }
496 
497 static inline bool inode_to_wb_is_valid(struct inode *inode)
498 {
499 	return true;
500 }
501 
502 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
503 {
504 	return &inode_to_bdi(inode)->wb;
505 }
506 
507 static inline struct bdi_writeback *
508 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
509 {
510 	return inode_to_wb(inode);
511 }
512 
513 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
514 {
515 }
516 
517 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
518 {
519 }
520 
521 static inline void wb_blkcg_offline(struct blkcg *blkcg)
522 {
523 }
524 
525 struct wb_iter {
526 	int		next_id;
527 };
528 
529 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id)		\
530 	for ((iter)->next_id = (start_blkcg_id);			\
531 	     ({	(wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
532 
533 static inline int inode_congested(struct inode *inode, int cong_bits)
534 {
535 	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
536 }
537 
538 #endif	/* CONFIG_CGROUP_WRITEBACK */
539 
540 static inline int inode_read_congested(struct inode *inode)
541 {
542 	return inode_congested(inode, 1 << WB_sync_congested);
543 }
544 
545 static inline int inode_write_congested(struct inode *inode)
546 {
547 	return inode_congested(inode, 1 << WB_async_congested);
548 }
549 
550 static inline int inode_rw_congested(struct inode *inode)
551 {
552 	return inode_congested(inode, (1 << WB_sync_congested) |
553 				      (1 << WB_async_congested));
554 }
555 
556 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
557 {
558 	return wb_congested(&bdi->wb, cong_bits);
559 }
560 
561 static inline int bdi_read_congested(struct backing_dev_info *bdi)
562 {
563 	return bdi_congested(bdi, 1 << WB_sync_congested);
564 }
565 
566 static inline int bdi_write_congested(struct backing_dev_info *bdi)
567 {
568 	return bdi_congested(bdi, 1 << WB_async_congested);
569 }
570 
571 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
572 {
573 	return bdi_congested(bdi, (1 << WB_sync_congested) |
574 				  (1 << WB_async_congested));
575 }
576 
577 #endif	/* _LINUX_BACKING_DEV_H */
578