1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/backing-dev.h 4 * 5 * low-level device information and state which is propagated up through 6 * to high-level code. 7 */ 8 9 #ifndef _LINUX_BACKING_DEV_H 10 #define _LINUX_BACKING_DEV_H 11 12 #include <linux/kernel.h> 13 #include <linux/fs.h> 14 #include <linux/sched.h> 15 #include <linux/blkdev.h> 16 #include <linux/device.h> 17 #include <linux/writeback.h> 18 #include <linux/backing-dev-defs.h> 19 #include <linux/slab.h> 20 21 struct blkcg; 22 23 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) 24 { 25 kref_get(&bdi->refcnt); 26 return bdi; 27 } 28 29 struct backing_dev_info *bdi_get_by_id(u64 id); 30 void bdi_put(struct backing_dev_info *bdi); 31 32 __printf(2, 3) 33 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); 34 __printf(2, 0) 35 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, 36 va_list args); 37 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); 38 void bdi_unregister(struct backing_dev_info *bdi); 39 40 struct backing_dev_info *bdi_alloc(int node_id); 41 42 void wb_start_background_writeback(struct bdi_writeback *wb); 43 void wb_workfn(struct work_struct *work); 44 void wb_wakeup_delayed(struct bdi_writeback *wb); 45 46 void wb_wait_for_completion(struct wb_completion *done); 47 48 extern spinlock_t bdi_lock; 49 extern struct list_head bdi_list; 50 51 extern struct workqueue_struct *bdi_wq; 52 extern struct workqueue_struct *bdi_async_bio_wq; 53 54 static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 55 { 56 return test_bit(WB_has_dirty_io, &wb->state); 57 } 58 59 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 60 { 61 /* 62 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 63 * any dirty wbs. See wb_update_write_bandwidth(). 64 */ 65 return atomic_long_read(&bdi->tot_write_bandwidth); 66 } 67 68 static inline void __add_wb_stat(struct bdi_writeback *wb, 69 enum wb_stat_item item, s64 amount) 70 { 71 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); 72 } 73 74 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 75 { 76 __add_wb_stat(wb, item, 1); 77 } 78 79 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 80 { 81 __add_wb_stat(wb, item, -1); 82 } 83 84 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 85 { 86 return percpu_counter_read_positive(&wb->stat[item]); 87 } 88 89 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 90 { 91 return percpu_counter_sum_positive(&wb->stat[item]); 92 } 93 94 extern void wb_writeout_inc(struct bdi_writeback *wb); 95 96 /* 97 * maximal error of a stat counter. 98 */ 99 static inline unsigned long wb_stat_error(void) 100 { 101 #ifdef CONFIG_SMP 102 return nr_cpu_ids * WB_STAT_BATCH; 103 #else 104 return 1; 105 #endif 106 } 107 108 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 109 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 110 111 /* 112 * Flags in backing_dev_info::capability 113 * 114 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages 115 * should contribute to accounting 116 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages 117 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold 118 */ 119 #define BDI_CAP_WRITEBACK (1 << 0) 120 #define BDI_CAP_WRITEBACK_ACCT (1 << 1) 121 #define BDI_CAP_STRICTLIMIT (1 << 2) 122 123 extern struct backing_dev_info noop_backing_dev_info; 124 125 /** 126 * writeback_in_progress - determine whether there is writeback in progress 127 * @wb: bdi_writeback of interest 128 * 129 * Determine whether there is writeback waiting to be handled against a 130 * bdi_writeback. 131 */ 132 static inline bool writeback_in_progress(struct bdi_writeback *wb) 133 { 134 return test_bit(WB_writeback_running, &wb->state); 135 } 136 137 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 138 { 139 struct super_block *sb; 140 141 if (!inode) 142 return &noop_backing_dev_info; 143 144 sb = inode->i_sb; 145 #ifdef CONFIG_BLOCK 146 if (sb_is_blkdev_sb(sb)) 147 return I_BDEV(inode)->bd_disk->bdi; 148 #endif 149 return sb->s_bdi; 150 } 151 152 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) 153 { 154 return wb->congested & cong_bits; 155 } 156 157 long congestion_wait(int sync, long timeout); 158 long wait_iff_congested(int sync, long timeout); 159 160 static inline bool mapping_can_writeback(struct address_space *mapping) 161 { 162 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; 163 } 164 165 static inline int bdi_sched_wait(void *word) 166 { 167 schedule(); 168 return 0; 169 } 170 171 #ifdef CONFIG_CGROUP_WRITEBACK 172 173 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, 174 struct cgroup_subsys_state *memcg_css); 175 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 176 struct cgroup_subsys_state *memcg_css, 177 gfp_t gfp); 178 void wb_memcg_offline(struct mem_cgroup *memcg); 179 void wb_blkcg_offline(struct blkcg *blkcg); 180 int inode_congested(struct inode *inode, int cong_bits); 181 182 /** 183 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 184 * @inode: inode of interest 185 * 186 * Cgroup writeback requires support from the filesystem. Also, both memcg and 187 * iocg have to be on the default hierarchy. Test whether all conditions are 188 * met. 189 * 190 * Note that the test result may change dynamically on the same inode 191 * depending on how memcg and iocg are configured. 192 */ 193 static inline bool inode_cgwb_enabled(struct inode *inode) 194 { 195 struct backing_dev_info *bdi = inode_to_bdi(inode); 196 197 return cgroup_subsys_on_dfl(memory_cgrp_subsys) && 198 cgroup_subsys_on_dfl(io_cgrp_subsys) && 199 (bdi->capabilities & BDI_CAP_WRITEBACK) && 200 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 201 } 202 203 /** 204 * wb_find_current - find wb for %current on a bdi 205 * @bdi: bdi of interest 206 * 207 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 208 * Must be called under rcu_read_lock() which protects the returend wb. 209 * NULL if not found. 210 */ 211 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 212 { 213 struct cgroup_subsys_state *memcg_css; 214 struct bdi_writeback *wb; 215 216 memcg_css = task_css(current, memory_cgrp_id); 217 if (!memcg_css->parent) 218 return &bdi->wb; 219 220 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 221 222 /* 223 * %current's blkcg equals the effective blkcg of its memcg. No 224 * need to use the relatively expensive cgroup_get_e_css(). 225 */ 226 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) 227 return wb; 228 return NULL; 229 } 230 231 /** 232 * wb_get_create_current - get or create wb for %current on a bdi 233 * @bdi: bdi of interest 234 * @gfp: allocation mask 235 * 236 * Equivalent to wb_get_create() on %current's memcg. This function is 237 * called from a relatively hot path and optimizes the common cases using 238 * wb_find_current(). 239 */ 240 static inline struct bdi_writeback * 241 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 242 { 243 struct bdi_writeback *wb; 244 245 rcu_read_lock(); 246 wb = wb_find_current(bdi); 247 if (wb && unlikely(!wb_tryget(wb))) 248 wb = NULL; 249 rcu_read_unlock(); 250 251 if (unlikely(!wb)) { 252 struct cgroup_subsys_state *memcg_css; 253 254 memcg_css = task_get_css(current, memory_cgrp_id); 255 wb = wb_get_create(bdi, memcg_css, gfp); 256 css_put(memcg_css); 257 } 258 return wb; 259 } 260 261 /** 262 * inode_to_wb_is_valid - test whether an inode has a wb associated 263 * @inode: inode of interest 264 * 265 * Returns %true if @inode has a wb associated. May be called without any 266 * locking. 267 */ 268 static inline bool inode_to_wb_is_valid(struct inode *inode) 269 { 270 return inode->i_wb; 271 } 272 273 /** 274 * inode_to_wb - determine the wb of an inode 275 * @inode: inode of interest 276 * 277 * Returns the wb @inode is currently associated with. The caller must be 278 * holding either @inode->i_lock, the i_pages lock, or the 279 * associated wb's list_lock. 280 */ 281 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) 282 { 283 #ifdef CONFIG_LOCKDEP 284 WARN_ON_ONCE(debug_locks && 285 (!lockdep_is_held(&inode->i_lock) && 286 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && 287 !lockdep_is_held(&inode->i_wb->list_lock))); 288 #endif 289 return inode->i_wb; 290 } 291 292 static inline struct bdi_writeback *inode_to_wb_wbc( 293 struct inode *inode, 294 struct writeback_control *wbc) 295 { 296 /* 297 * If wbc does not have inode attached, it means cgroup writeback was 298 * disabled when wbc started. Just use the default wb in that case. 299 */ 300 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; 301 } 302 303 /** 304 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 305 * @inode: target inode 306 * @cookie: output param, to be passed to the end function 307 * 308 * The caller wants to access the wb associated with @inode but isn't 309 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 310 * function determines the wb associated with @inode and ensures that the 311 * association doesn't change until the transaction is finished with 312 * unlocked_inode_to_wb_end(). 313 * 314 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and 315 * can't sleep during the transaction. IRQs may or may not be disabled on 316 * return. 317 */ 318 static inline struct bdi_writeback * 319 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 320 { 321 rcu_read_lock(); 322 323 /* 324 * Paired with store_release in inode_switch_wbs_work_fn() and 325 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 326 */ 327 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 328 329 if (unlikely(cookie->locked)) 330 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); 331 332 /* 333 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 334 * lock. inode_to_wb() will bark. Deref directly. 335 */ 336 return inode->i_wb; 337 } 338 339 /** 340 * unlocked_inode_to_wb_end - end inode wb access transaction 341 * @inode: target inode 342 * @cookie: @cookie from unlocked_inode_to_wb_begin() 343 */ 344 static inline void unlocked_inode_to_wb_end(struct inode *inode, 345 struct wb_lock_cookie *cookie) 346 { 347 if (unlikely(cookie->locked)) 348 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); 349 350 rcu_read_unlock(); 351 } 352 353 #else /* CONFIG_CGROUP_WRITEBACK */ 354 355 static inline bool inode_cgwb_enabled(struct inode *inode) 356 { 357 return false; 358 } 359 360 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 361 { 362 return &bdi->wb; 363 } 364 365 static inline struct bdi_writeback * 366 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 367 { 368 return &bdi->wb; 369 } 370 371 static inline bool inode_to_wb_is_valid(struct inode *inode) 372 { 373 return true; 374 } 375 376 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 377 { 378 return &inode_to_bdi(inode)->wb; 379 } 380 381 static inline struct bdi_writeback *inode_to_wb_wbc( 382 struct inode *inode, 383 struct writeback_control *wbc) 384 { 385 return inode_to_wb(inode); 386 } 387 388 389 static inline struct bdi_writeback * 390 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 391 { 392 return inode_to_wb(inode); 393 } 394 395 static inline void unlocked_inode_to_wb_end(struct inode *inode, 396 struct wb_lock_cookie *cookie) 397 { 398 } 399 400 static inline void wb_memcg_offline(struct mem_cgroup *memcg) 401 { 402 } 403 404 static inline void wb_blkcg_offline(struct blkcg *blkcg) 405 { 406 } 407 408 static inline int inode_congested(struct inode *inode, int cong_bits) 409 { 410 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 411 } 412 413 #endif /* CONFIG_CGROUP_WRITEBACK */ 414 415 static inline int inode_read_congested(struct inode *inode) 416 { 417 return inode_congested(inode, 1 << WB_sync_congested); 418 } 419 420 static inline int inode_write_congested(struct inode *inode) 421 { 422 return inode_congested(inode, 1 << WB_async_congested); 423 } 424 425 static inline int inode_rw_congested(struct inode *inode) 426 { 427 return inode_congested(inode, (1 << WB_sync_congested) | 428 (1 << WB_async_congested)); 429 } 430 431 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) 432 { 433 return wb_congested(&bdi->wb, cong_bits); 434 } 435 436 static inline int bdi_read_congested(struct backing_dev_info *bdi) 437 { 438 return bdi_congested(bdi, 1 << WB_sync_congested); 439 } 440 441 static inline int bdi_write_congested(struct backing_dev_info *bdi) 442 { 443 return bdi_congested(bdi, 1 << WB_async_congested); 444 } 445 446 static inline int bdi_rw_congested(struct backing_dev_info *bdi) 447 { 448 return bdi_congested(bdi, (1 << WB_sync_congested) | 449 (1 << WB_async_congested)); 450 } 451 452 const char *bdi_dev_name(struct backing_dev_info *bdi); 453 454 #endif /* _LINUX_BACKING_DEV_H */ 455