1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/backing-dev.h 4 * 5 * low-level device information and state which is propagated up through 6 * to high-level code. 7 */ 8 9 #ifndef _LINUX_BACKING_DEV_H 10 #define _LINUX_BACKING_DEV_H 11 12 #include <linux/kernel.h> 13 #include <linux/fs.h> 14 #include <linux/sched.h> 15 #include <linux/device.h> 16 #include <linux/writeback.h> 17 #include <linux/backing-dev-defs.h> 18 #include <linux/slab.h> 19 20 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) 21 { 22 kref_get(&bdi->refcnt); 23 return bdi; 24 } 25 26 struct backing_dev_info *bdi_get_by_id(u64 id); 27 void bdi_put(struct backing_dev_info *bdi); 28 29 __printf(2, 3) 30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); 31 __printf(2, 0) 32 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, 33 va_list args); 34 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); 35 void bdi_unregister(struct backing_dev_info *bdi); 36 37 struct backing_dev_info *bdi_alloc(int node_id); 38 39 void wb_start_background_writeback(struct bdi_writeback *wb); 40 void wb_workfn(struct work_struct *work); 41 void wb_wakeup_delayed(struct bdi_writeback *wb); 42 43 void wb_wait_for_completion(struct wb_completion *done); 44 45 extern spinlock_t bdi_lock; 46 extern struct list_head bdi_list; 47 48 extern struct workqueue_struct *bdi_wq; 49 extern struct workqueue_struct *bdi_async_bio_wq; 50 51 static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 52 { 53 return test_bit(WB_has_dirty_io, &wb->state); 54 } 55 56 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 57 { 58 /* 59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 60 * any dirty wbs. See wb_update_write_bandwidth(). 61 */ 62 return atomic_long_read(&bdi->tot_write_bandwidth); 63 } 64 65 static inline void wb_stat_mod(struct bdi_writeback *wb, 66 enum wb_stat_item item, s64 amount) 67 { 68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); 69 } 70 71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 72 { 73 wb_stat_mod(wb, item, 1); 74 } 75 76 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 77 { 78 wb_stat_mod(wb, item, -1); 79 } 80 81 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 82 { 83 return percpu_counter_read_positive(&wb->stat[item]); 84 } 85 86 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 87 { 88 return percpu_counter_sum_positive(&wb->stat[item]); 89 } 90 91 extern void wb_writeout_inc(struct bdi_writeback *wb); 92 93 /* 94 * maximal error of a stat counter. 95 */ 96 static inline unsigned long wb_stat_error(void) 97 { 98 #ifdef CONFIG_SMP 99 return nr_cpu_ids * WB_STAT_BATCH; 100 #else 101 return 1; 102 #endif 103 } 104 105 /* BDI ratio is expressed as part per 1000000 for finer granularity. */ 106 #define BDI_RATIO_SCALE 10000 107 108 u64 bdi_get_max_bytes(struct backing_dev_info *bdi); 109 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 110 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 111 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); 112 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); 113 114 /* 115 * Flags in backing_dev_info::capability 116 * 117 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages 118 * should contribute to accounting 119 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages 120 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold 121 */ 122 #define BDI_CAP_WRITEBACK (1 << 0) 123 #define BDI_CAP_WRITEBACK_ACCT (1 << 1) 124 #define BDI_CAP_STRICTLIMIT (1 << 2) 125 126 extern struct backing_dev_info noop_backing_dev_info; 127 128 int bdi_init(struct backing_dev_info *bdi); 129 130 /** 131 * writeback_in_progress - determine whether there is writeback in progress 132 * @wb: bdi_writeback of interest 133 * 134 * Determine whether there is writeback waiting to be handled against a 135 * bdi_writeback. 136 */ 137 static inline bool writeback_in_progress(struct bdi_writeback *wb) 138 { 139 return test_bit(WB_writeback_running, &wb->state); 140 } 141 142 struct backing_dev_info *inode_to_bdi(struct inode *inode); 143 144 static inline bool mapping_can_writeback(struct address_space *mapping) 145 { 146 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; 147 } 148 149 #ifdef CONFIG_CGROUP_WRITEBACK 150 151 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, 152 struct cgroup_subsys_state *memcg_css); 153 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 154 struct cgroup_subsys_state *memcg_css, 155 gfp_t gfp); 156 void wb_memcg_offline(struct mem_cgroup *memcg); 157 void wb_blkcg_offline(struct cgroup_subsys_state *css); 158 159 /** 160 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 161 * @inode: inode of interest 162 * 163 * Cgroup writeback requires support from the filesystem. Also, both memcg and 164 * iocg have to be on the default hierarchy. Test whether all conditions are 165 * met. 166 * 167 * Note that the test result may change dynamically on the same inode 168 * depending on how memcg and iocg are configured. 169 */ 170 static inline bool inode_cgwb_enabled(struct inode *inode) 171 { 172 struct backing_dev_info *bdi = inode_to_bdi(inode); 173 174 return cgroup_subsys_on_dfl(memory_cgrp_subsys) && 175 cgroup_subsys_on_dfl(io_cgrp_subsys) && 176 (bdi->capabilities & BDI_CAP_WRITEBACK) && 177 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 178 } 179 180 /** 181 * wb_find_current - find wb for %current on a bdi 182 * @bdi: bdi of interest 183 * 184 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 185 * Must be called under rcu_read_lock() which protects the returend wb. 186 * NULL if not found. 187 */ 188 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 189 { 190 struct cgroup_subsys_state *memcg_css; 191 struct bdi_writeback *wb; 192 193 memcg_css = task_css(current, memory_cgrp_id); 194 if (!memcg_css->parent) 195 return &bdi->wb; 196 197 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 198 199 /* 200 * %current's blkcg equals the effective blkcg of its memcg. No 201 * need to use the relatively expensive cgroup_get_e_css(). 202 */ 203 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) 204 return wb; 205 return NULL; 206 } 207 208 /** 209 * wb_get_create_current - get or create wb for %current on a bdi 210 * @bdi: bdi of interest 211 * @gfp: allocation mask 212 * 213 * Equivalent to wb_get_create() on %current's memcg. This function is 214 * called from a relatively hot path and optimizes the common cases using 215 * wb_find_current(). 216 */ 217 static inline struct bdi_writeback * 218 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 219 { 220 struct bdi_writeback *wb; 221 222 rcu_read_lock(); 223 wb = wb_find_current(bdi); 224 if (wb && unlikely(!wb_tryget(wb))) 225 wb = NULL; 226 rcu_read_unlock(); 227 228 if (unlikely(!wb)) { 229 struct cgroup_subsys_state *memcg_css; 230 231 memcg_css = task_get_css(current, memory_cgrp_id); 232 wb = wb_get_create(bdi, memcg_css, gfp); 233 css_put(memcg_css); 234 } 235 return wb; 236 } 237 238 /** 239 * inode_to_wb - determine the wb of an inode 240 * @inode: inode of interest 241 * 242 * Returns the wb @inode is currently associated with. The caller must be 243 * holding either @inode->i_lock, the i_pages lock, or the 244 * associated wb's list_lock. 245 */ 246 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) 247 { 248 #ifdef CONFIG_LOCKDEP 249 WARN_ON_ONCE(debug_locks && 250 (!lockdep_is_held(&inode->i_lock) && 251 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && 252 !lockdep_is_held(&inode->i_wb->list_lock))); 253 #endif 254 return inode->i_wb; 255 } 256 257 static inline struct bdi_writeback *inode_to_wb_wbc( 258 struct inode *inode, 259 struct writeback_control *wbc) 260 { 261 /* 262 * If wbc does not have inode attached, it means cgroup writeback was 263 * disabled when wbc started. Just use the default wb in that case. 264 */ 265 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; 266 } 267 268 /** 269 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 270 * @inode: target inode 271 * @cookie: output param, to be passed to the end function 272 * 273 * The caller wants to access the wb associated with @inode but isn't 274 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 275 * function determines the wb associated with @inode and ensures that the 276 * association doesn't change until the transaction is finished with 277 * unlocked_inode_to_wb_end(). 278 * 279 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and 280 * can't sleep during the transaction. IRQs may or may not be disabled on 281 * return. 282 */ 283 static inline struct bdi_writeback * 284 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 285 { 286 rcu_read_lock(); 287 288 /* 289 * Paired with store_release in inode_switch_wbs_work_fn() and 290 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 291 */ 292 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 293 294 if (unlikely(cookie->locked)) 295 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); 296 297 /* 298 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 299 * lock. inode_to_wb() will bark. Deref directly. 300 */ 301 return inode->i_wb; 302 } 303 304 /** 305 * unlocked_inode_to_wb_end - end inode wb access transaction 306 * @inode: target inode 307 * @cookie: @cookie from unlocked_inode_to_wb_begin() 308 */ 309 static inline void unlocked_inode_to_wb_end(struct inode *inode, 310 struct wb_lock_cookie *cookie) 311 { 312 if (unlikely(cookie->locked)) 313 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); 314 315 rcu_read_unlock(); 316 } 317 318 #else /* CONFIG_CGROUP_WRITEBACK */ 319 320 static inline bool inode_cgwb_enabled(struct inode *inode) 321 { 322 return false; 323 } 324 325 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 326 { 327 return &bdi->wb; 328 } 329 330 static inline struct bdi_writeback * 331 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 332 { 333 return &bdi->wb; 334 } 335 336 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 337 { 338 return &inode_to_bdi(inode)->wb; 339 } 340 341 static inline struct bdi_writeback *inode_to_wb_wbc( 342 struct inode *inode, 343 struct writeback_control *wbc) 344 { 345 return inode_to_wb(inode); 346 } 347 348 349 static inline struct bdi_writeback * 350 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 351 { 352 return inode_to_wb(inode); 353 } 354 355 static inline void unlocked_inode_to_wb_end(struct inode *inode, 356 struct wb_lock_cookie *cookie) 357 { 358 } 359 360 static inline void wb_memcg_offline(struct mem_cgroup *memcg) 361 { 362 } 363 364 static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) 365 { 366 } 367 368 #endif /* CONFIG_CGROUP_WRITEBACK */ 369 370 const char *bdi_dev_name(struct backing_dev_info *bdi); 371 372 #endif /* _LINUX_BACKING_DEV_H */ 373