1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/backing-dev.h 4 * 5 * low-level device information and state which is propagated up through 6 * to high-level code. 7 */ 8 9 #ifndef _LINUX_BACKING_DEV_H 10 #define _LINUX_BACKING_DEV_H 11 12 #include <linux/kernel.h> 13 #include <linux/fs.h> 14 #include <linux/sched.h> 15 #include <linux/blkdev.h> 16 #include <linux/writeback.h> 17 #include <linux/blk-cgroup.h> 18 #include <linux/backing-dev-defs.h> 19 #include <linux/slab.h> 20 21 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) 22 { 23 kref_get(&bdi->refcnt); 24 return bdi; 25 } 26 27 void bdi_put(struct backing_dev_info *bdi); 28 29 __printf(2, 3) 30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); 31 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, 32 va_list args); 33 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); 34 void bdi_unregister(struct backing_dev_info *bdi); 35 36 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); 37 static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) 38 { 39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); 40 } 41 42 void wb_start_background_writeback(struct bdi_writeback *wb); 43 void wb_workfn(struct work_struct *work); 44 void wb_wakeup_delayed(struct bdi_writeback *wb); 45 46 extern spinlock_t bdi_lock; 47 extern struct list_head bdi_list; 48 49 extern struct workqueue_struct *bdi_wq; 50 51 static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 52 { 53 return test_bit(WB_has_dirty_io, &wb->state); 54 } 55 56 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 57 { 58 /* 59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 60 * any dirty wbs. See wb_update_write_bandwidth(). 61 */ 62 return atomic_long_read(&bdi->tot_write_bandwidth); 63 } 64 65 static inline void __add_wb_stat(struct bdi_writeback *wb, 66 enum wb_stat_item item, s64 amount) 67 { 68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); 69 } 70 71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 72 { 73 __add_wb_stat(wb, item, 1); 74 } 75 76 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 77 { 78 __add_wb_stat(wb, item, -1); 79 } 80 81 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 82 { 83 return percpu_counter_read_positive(&wb->stat[item]); 84 } 85 86 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 87 { 88 return percpu_counter_sum_positive(&wb->stat[item]); 89 } 90 91 extern void wb_writeout_inc(struct bdi_writeback *wb); 92 93 /* 94 * maximal error of a stat counter. 95 */ 96 static inline unsigned long wb_stat_error(struct bdi_writeback *wb) 97 { 98 #ifdef CONFIG_SMP 99 return nr_cpu_ids * WB_STAT_BATCH; 100 #else 101 return 1; 102 #endif 103 } 104 105 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 106 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 107 108 /* 109 * Flags in backing_dev_info::capability 110 * 111 * The first three flags control whether dirty pages will contribute to the 112 * VM's accounting and whether writepages() should be called for dirty pages 113 * (something that would not, for example, be appropriate for ramfs) 114 * 115 * WARNING: these flags are closely related and should not normally be 116 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these 117 * three flags into a single convenience macro. 118 * 119 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 120 * BDI_CAP_NO_WRITEBACK: Don't write pages back 121 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 122 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 123 * 124 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. 125 */ 126 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 127 #define BDI_CAP_NO_WRITEBACK 0x00000002 128 #define BDI_CAP_NO_ACCT_WB 0x00000004 129 #define BDI_CAP_STABLE_WRITES 0x00000008 130 #define BDI_CAP_STRICTLIMIT 0x00000010 131 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020 132 133 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 134 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 135 136 extern struct backing_dev_info noop_backing_dev_info; 137 138 /** 139 * writeback_in_progress - determine whether there is writeback in progress 140 * @wb: bdi_writeback of interest 141 * 142 * Determine whether there is writeback waiting to be handled against a 143 * bdi_writeback. 144 */ 145 static inline bool writeback_in_progress(struct bdi_writeback *wb) 146 { 147 return test_bit(WB_writeback_running, &wb->state); 148 } 149 150 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 151 { 152 struct super_block *sb; 153 154 if (!inode) 155 return &noop_backing_dev_info; 156 157 sb = inode->i_sb; 158 #ifdef CONFIG_BLOCK 159 if (sb_is_blkdev_sb(sb)) 160 return I_BDEV(inode)->bd_bdi; 161 #endif 162 return sb->s_bdi; 163 } 164 165 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) 166 { 167 struct backing_dev_info *bdi = wb->bdi; 168 169 if (bdi->congested_fn) 170 return bdi->congested_fn(bdi->congested_data, cong_bits); 171 return wb->congested->state & cong_bits; 172 } 173 174 long congestion_wait(int sync, long timeout); 175 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); 176 177 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) 178 { 179 return bdi->capabilities & BDI_CAP_STABLE_WRITES; 180 } 181 182 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 183 { 184 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); 185 } 186 187 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) 188 { 189 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); 190 } 191 192 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) 193 { 194 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ 195 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | 196 BDI_CAP_NO_WRITEBACK)); 197 } 198 199 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 200 { 201 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); 202 } 203 204 static inline bool mapping_cap_account_dirty(struct address_space *mapping) 205 { 206 return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); 207 } 208 209 static inline int bdi_sched_wait(void *word) 210 { 211 schedule(); 212 return 0; 213 } 214 215 #ifdef CONFIG_CGROUP_WRITEBACK 216 217 struct bdi_writeback_congested * 218 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); 219 void wb_congested_put(struct bdi_writeback_congested *congested); 220 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 221 struct cgroup_subsys_state *memcg_css, 222 gfp_t gfp); 223 void wb_memcg_offline(struct mem_cgroup *memcg); 224 void wb_blkcg_offline(struct blkcg *blkcg); 225 int inode_congested(struct inode *inode, int cong_bits); 226 227 /** 228 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 229 * @inode: inode of interest 230 * 231 * cgroup writeback requires support from both the bdi and filesystem. 232 * Also, both memcg and iocg have to be on the default hierarchy. Test 233 * whether all conditions are met. 234 * 235 * Note that the test result may change dynamically on the same inode 236 * depending on how memcg and iocg are configured. 237 */ 238 static inline bool inode_cgwb_enabled(struct inode *inode) 239 { 240 struct backing_dev_info *bdi = inode_to_bdi(inode); 241 242 return cgroup_subsys_on_dfl(memory_cgrp_subsys) && 243 cgroup_subsys_on_dfl(io_cgrp_subsys) && 244 bdi_cap_account_dirty(bdi) && 245 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 246 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 247 } 248 249 /** 250 * wb_find_current - find wb for %current on a bdi 251 * @bdi: bdi of interest 252 * 253 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 254 * Must be called under rcu_read_lock() which protects the returend wb. 255 * NULL if not found. 256 */ 257 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 258 { 259 struct cgroup_subsys_state *memcg_css; 260 struct bdi_writeback *wb; 261 262 memcg_css = task_css(current, memory_cgrp_id); 263 if (!memcg_css->parent) 264 return &bdi->wb; 265 266 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 267 268 /* 269 * %current's blkcg equals the effective blkcg of its memcg. No 270 * need to use the relatively expensive cgroup_get_e_css(). 271 */ 272 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) 273 return wb; 274 return NULL; 275 } 276 277 /** 278 * wb_get_create_current - get or create wb for %current on a bdi 279 * @bdi: bdi of interest 280 * @gfp: allocation mask 281 * 282 * Equivalent to wb_get_create() on %current's memcg. This function is 283 * called from a relatively hot path and optimizes the common cases using 284 * wb_find_current(). 285 */ 286 static inline struct bdi_writeback * 287 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 288 { 289 struct bdi_writeback *wb; 290 291 rcu_read_lock(); 292 wb = wb_find_current(bdi); 293 if (wb && unlikely(!wb_tryget(wb))) 294 wb = NULL; 295 rcu_read_unlock(); 296 297 if (unlikely(!wb)) { 298 struct cgroup_subsys_state *memcg_css; 299 300 memcg_css = task_get_css(current, memory_cgrp_id); 301 wb = wb_get_create(bdi, memcg_css, gfp); 302 css_put(memcg_css); 303 } 304 return wb; 305 } 306 307 /** 308 * inode_to_wb_is_valid - test whether an inode has a wb associated 309 * @inode: inode of interest 310 * 311 * Returns %true if @inode has a wb associated. May be called without any 312 * locking. 313 */ 314 static inline bool inode_to_wb_is_valid(struct inode *inode) 315 { 316 return inode->i_wb; 317 } 318 319 /** 320 * inode_to_wb - determine the wb of an inode 321 * @inode: inode of interest 322 * 323 * Returns the wb @inode is currently associated with. The caller must be 324 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the 325 * associated wb's list_lock. 326 */ 327 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 328 { 329 #ifdef CONFIG_LOCKDEP 330 WARN_ON_ONCE(debug_locks && 331 (!lockdep_is_held(&inode->i_lock) && 332 !lockdep_is_held(&inode->i_mapping->tree_lock) && 333 !lockdep_is_held(&inode->i_wb->list_lock))); 334 #endif 335 return inode->i_wb; 336 } 337 338 /** 339 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 340 * @inode: target inode 341 * @lockedp: temp bool output param, to be passed to the end function 342 * 343 * The caller wants to access the wb associated with @inode but isn't 344 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This 345 * function determines the wb associated with @inode and ensures that the 346 * association doesn't change until the transaction is finished with 347 * unlocked_inode_to_wb_end(). 348 * 349 * The caller must call unlocked_inode_to_wb_end() with *@lockdep 350 * afterwards and can't sleep during transaction. IRQ may or may not be 351 * disabled on return. 352 */ 353 static inline struct bdi_writeback * 354 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 355 { 356 rcu_read_lock(); 357 358 /* 359 * Paired with store_release in inode_switch_wb_work_fn() and 360 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 361 */ 362 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 363 364 if (unlikely(*lockedp)) 365 spin_lock_irq(&inode->i_mapping->tree_lock); 366 367 /* 368 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. 369 * inode_to_wb() will bark. Deref directly. 370 */ 371 return inode->i_wb; 372 } 373 374 /** 375 * unlocked_inode_to_wb_end - end inode wb access transaction 376 * @inode: target inode 377 * @locked: *@lockedp from unlocked_inode_to_wb_begin() 378 */ 379 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 380 { 381 if (unlikely(locked)) 382 spin_unlock_irq(&inode->i_mapping->tree_lock); 383 384 rcu_read_unlock(); 385 } 386 387 #else /* CONFIG_CGROUP_WRITEBACK */ 388 389 static inline bool inode_cgwb_enabled(struct inode *inode) 390 { 391 return false; 392 } 393 394 static inline struct bdi_writeback_congested * 395 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) 396 { 397 atomic_inc(&bdi->wb_congested->refcnt); 398 return bdi->wb_congested; 399 } 400 401 static inline void wb_congested_put(struct bdi_writeback_congested *congested) 402 { 403 if (atomic_dec_and_test(&congested->refcnt)) 404 kfree(congested); 405 } 406 407 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 408 { 409 return &bdi->wb; 410 } 411 412 static inline struct bdi_writeback * 413 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 414 { 415 return &bdi->wb; 416 } 417 418 static inline bool inode_to_wb_is_valid(struct inode *inode) 419 { 420 return true; 421 } 422 423 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 424 { 425 return &inode_to_bdi(inode)->wb; 426 } 427 428 static inline struct bdi_writeback * 429 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 430 { 431 return inode_to_wb(inode); 432 } 433 434 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 435 { 436 } 437 438 static inline void wb_memcg_offline(struct mem_cgroup *memcg) 439 { 440 } 441 442 static inline void wb_blkcg_offline(struct blkcg *blkcg) 443 { 444 } 445 446 static inline int inode_congested(struct inode *inode, int cong_bits) 447 { 448 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 449 } 450 451 #endif /* CONFIG_CGROUP_WRITEBACK */ 452 453 static inline int inode_read_congested(struct inode *inode) 454 { 455 return inode_congested(inode, 1 << WB_sync_congested); 456 } 457 458 static inline int inode_write_congested(struct inode *inode) 459 { 460 return inode_congested(inode, 1 << WB_async_congested); 461 } 462 463 static inline int inode_rw_congested(struct inode *inode) 464 { 465 return inode_congested(inode, (1 << WB_sync_congested) | 466 (1 << WB_async_congested)); 467 } 468 469 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) 470 { 471 return wb_congested(&bdi->wb, cong_bits); 472 } 473 474 static inline int bdi_read_congested(struct backing_dev_info *bdi) 475 { 476 return bdi_congested(bdi, 1 << WB_sync_congested); 477 } 478 479 static inline int bdi_write_congested(struct backing_dev_info *bdi) 480 { 481 return bdi_congested(bdi, 1 << WB_async_congested); 482 } 483 484 static inline int bdi_rw_congested(struct backing_dev_info *bdi) 485 { 486 return bdi_congested(bdi, (1 << WB_sync_congested) | 487 (1 << WB_async_congested)); 488 } 489 490 #endif /* _LINUX_BACKING_DEV_H */ 491