1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/writeback.h 4 */ 5 #ifndef WRITEBACK_H 6 #define WRITEBACK_H 7 8 #include <linux/sched.h> 9 #include <linux/workqueue.h> 10 #include <linux/fs.h> 11 #include <linux/flex_proportions.h> 12 #include <linux/backing-dev-defs.h> 13 #include <linux/blk_types.h> 14 15 struct bio; 16 17 DECLARE_PER_CPU(int, dirty_throttle_leaks); 18 19 /* 20 * The 1/4 region under the global dirty thresh is for smooth dirty throttling: 21 * 22 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) 23 * 24 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long 25 * time) for the dirty pages to drop, unless written enough pages. 26 * 27 * The global dirty threshold is normally equal to the global dirty limit, 28 * except when the system suddenly allocates a lot of anonymous memory and 29 * knocks down the global dirty threshold quickly, in which case the global 30 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. 31 */ 32 #define DIRTY_SCOPE 8 33 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) 34 35 struct backing_dev_info; 36 37 /* 38 * fs/fs-writeback.c 39 */ 40 enum writeback_sync_modes { 41 WB_SYNC_NONE, /* Don't wait on anything */ 42 WB_SYNC_ALL, /* Wait on every mapping */ 43 }; 44 45 /* 46 * A control structure which tells the writeback code what to do. These are 47 * always on the stack, and hence need no locking. They are always initialised 48 * in a manner such that unspecified fields are set to zero. 49 */ 50 struct writeback_control { 51 long nr_to_write; /* Write this many pages, and decrement 52 this for each page written */ 53 long pages_skipped; /* Pages which were not written */ 54 55 /* 56 * For a_ops->writepages(): if start or end are non-zero then this is 57 * a hint that the filesystem need only write out the pages inside that 58 * byterange. The byte at `end' is included in the writeout request. 59 */ 60 loff_t range_start; 61 loff_t range_end; 62 63 enum writeback_sync_modes sync_mode; 64 65 unsigned for_kupdate:1; /* A kupdate writeback */ 66 unsigned for_background:1; /* A background writeback */ 67 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ 68 unsigned for_reclaim:1; /* Invoked from the page allocator */ 69 unsigned range_cyclic:1; /* range_start is cyclic */ 70 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 71 unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */ 72 73 /* 74 * When writeback IOs are bounced through async layers, only the 75 * initial synchronous phase should be accounted towards inode 76 * cgroup ownership arbitration to avoid confusion. Later stages 77 * can set the following flag to disable the accounting. 78 */ 79 unsigned no_cgroup_owner:1; 80 81 unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */ 82 83 #ifdef CONFIG_CGROUP_WRITEBACK 84 struct bdi_writeback *wb; /* wb this writeback is issued under */ 85 struct inode *inode; /* inode being written out */ 86 87 /* foreign inode detection, see wbc_detach_inode() */ 88 int wb_id; /* current wb id */ 89 int wb_lcand_id; /* last foreign candidate wb id */ 90 int wb_tcand_id; /* this foreign candidate wb id */ 91 size_t wb_bytes; /* bytes written by current wb */ 92 size_t wb_lcand_bytes; /* bytes written by last candidate */ 93 size_t wb_tcand_bytes; /* bytes written by this candidate */ 94 #endif 95 }; 96 97 static inline int wbc_to_write_flags(struct writeback_control *wbc) 98 { 99 int flags = 0; 100 101 if (wbc->punt_to_cgroup) 102 flags = REQ_CGROUP_PUNT; 103 104 if (wbc->sync_mode == WB_SYNC_ALL) 105 flags |= REQ_SYNC; 106 else if (wbc->for_kupdate || wbc->for_background) 107 flags |= REQ_BACKGROUND; 108 109 return flags; 110 } 111 112 #ifdef CONFIG_CGROUP_WRITEBACK 113 #define wbc_blkcg_css(wbc) \ 114 ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) 115 #else 116 #define wbc_blkcg_css(wbc) (blkcg_root_css) 117 #endif /* CONFIG_CGROUP_WRITEBACK */ 118 119 /* 120 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to 121 * and are measured against each other in. There always is one global 122 * domain, global_wb_domain, that every wb in the system is a member of. 123 * This allows measuring the relative bandwidth of each wb to distribute 124 * dirtyable memory accordingly. 125 */ 126 struct wb_domain { 127 spinlock_t lock; 128 129 /* 130 * Scale the writeback cache size proportional to the relative 131 * writeout speed. 132 * 133 * We do this by keeping a floating proportion between BDIs, based 134 * on page writeback completions [end_page_writeback()]. Those 135 * devices that write out pages fastest will get the larger share, 136 * while the slower will get a smaller share. 137 * 138 * We use page writeout completions because we are interested in 139 * getting rid of dirty pages. Having them written out is the 140 * primary goal. 141 * 142 * We introduce a concept of time, a period over which we measure 143 * these events, because demand can/will vary over time. The length 144 * of this period itself is measured in page writeback completions. 145 */ 146 struct fprop_global completions; 147 struct timer_list period_timer; /* timer for aging of completions */ 148 unsigned long period_time; 149 150 /* 151 * The dirtyable memory and dirty threshold could be suddenly 152 * knocked down by a large amount (eg. on the startup of KVM in a 153 * swapless system). This may throw the system into deep dirty 154 * exceeded state and throttle heavy/light dirtiers alike. To 155 * retain good responsiveness, maintain global_dirty_limit for 156 * tracking slowly down to the knocked down dirty threshold. 157 * 158 * Both fields are protected by ->lock. 159 */ 160 unsigned long dirty_limit_tstamp; 161 unsigned long dirty_limit; 162 }; 163 164 /** 165 * wb_domain_size_changed - memory available to a wb_domain has changed 166 * @dom: wb_domain of interest 167 * 168 * This function should be called when the amount of memory available to 169 * @dom has changed. It resets @dom's dirty limit parameters to prevent 170 * the past values which don't match the current configuration from skewing 171 * dirty throttling. Without this, when memory size of a wb_domain is 172 * greatly reduced, the dirty throttling logic may allow too many pages to 173 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in 174 * that situation. 175 */ 176 static inline void wb_domain_size_changed(struct wb_domain *dom) 177 { 178 spin_lock(&dom->lock); 179 dom->dirty_limit_tstamp = jiffies; 180 dom->dirty_limit = 0; 181 spin_unlock(&dom->lock); 182 } 183 184 /* 185 * fs/fs-writeback.c 186 */ 187 struct bdi_writeback; 188 void writeback_inodes_sb(struct super_block *, enum wb_reason reason); 189 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 190 enum wb_reason reason); 191 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); 192 void sync_inodes_sb(struct super_block *); 193 void wakeup_flusher_threads(enum wb_reason reason); 194 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 195 enum wb_reason reason); 196 void inode_wait_for_writeback(struct inode *inode); 197 void inode_io_list_del(struct inode *inode); 198 199 /* writeback.h requires fs.h; it, too, is not included from here. */ 200 static inline void wait_on_inode(struct inode *inode) 201 { 202 might_sleep(); 203 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); 204 } 205 206 #ifdef CONFIG_CGROUP_WRITEBACK 207 208 #include <linux/cgroup.h> 209 #include <linux/bio.h> 210 211 void __inode_attach_wb(struct inode *inode, struct page *page); 212 void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 213 struct inode *inode) 214 __releases(&inode->i_lock); 215 void wbc_detach_inode(struct writeback_control *wbc); 216 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, 217 size_t bytes); 218 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, 219 enum wb_reason reason, struct wb_completion *done); 220 void cgroup_writeback_umount(void); 221 bool cleanup_offline_cgwb(struct bdi_writeback *wb); 222 223 /** 224 * inode_attach_wb - associate an inode with its wb 225 * @inode: inode of interest 226 * @page: page being dirtied (may be NULL) 227 * 228 * If @inode doesn't have its wb, associate it with the wb matching the 229 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o 230 * @inode->i_lock. 231 */ 232 static inline void inode_attach_wb(struct inode *inode, struct page *page) 233 { 234 if (!inode->i_wb) 235 __inode_attach_wb(inode, page); 236 } 237 238 /** 239 * inode_detach_wb - disassociate an inode from its wb 240 * @inode: inode of interest 241 * 242 * @inode is being freed. Detach from its wb. 243 */ 244 static inline void inode_detach_wb(struct inode *inode) 245 { 246 if (inode->i_wb) { 247 WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); 248 wb_put(inode->i_wb); 249 inode->i_wb = NULL; 250 } 251 } 252 253 /** 254 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite 255 * @wbc: writeback_control of interest 256 * @inode: target inode 257 * 258 * This function is to be used by __filemap_fdatawrite_range(), which is an 259 * alternative entry point into writeback code, and first ensures @inode is 260 * associated with a bdi_writeback and attaches it to @wbc. 261 */ 262 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, 263 struct inode *inode) 264 { 265 spin_lock(&inode->i_lock); 266 inode_attach_wb(inode, NULL); 267 wbc_attach_and_unlock_inode(wbc, inode); 268 } 269 270 /** 271 * wbc_init_bio - writeback specific initializtion of bio 272 * @wbc: writeback_control for the writeback in progress 273 * @bio: bio to be initialized 274 * 275 * @bio is a part of the writeback in progress controlled by @wbc. Perform 276 * writeback specific initialization. This is used to apply the cgroup 277 * writeback context. Must be called after the bio has been associated with 278 * a device. 279 */ 280 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) 281 { 282 /* 283 * pageout() path doesn't attach @wbc to the inode being written 284 * out. This is intentional as we don't want the function to block 285 * behind a slow cgroup. Ultimately, we want pageout() to kick off 286 * regular writeback instead of writing things out itself. 287 */ 288 if (wbc->wb) 289 bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); 290 } 291 292 #else /* CONFIG_CGROUP_WRITEBACK */ 293 294 static inline void inode_attach_wb(struct inode *inode, struct page *page) 295 { 296 } 297 298 static inline void inode_detach_wb(struct inode *inode) 299 { 300 } 301 302 static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 303 struct inode *inode) 304 __releases(&inode->i_lock) 305 { 306 spin_unlock(&inode->i_lock); 307 } 308 309 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, 310 struct inode *inode) 311 { 312 } 313 314 static inline void wbc_detach_inode(struct writeback_control *wbc) 315 { 316 } 317 318 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) 319 { 320 } 321 322 static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, 323 struct page *page, size_t bytes) 324 { 325 } 326 327 static inline void cgroup_writeback_umount(void) 328 { 329 } 330 331 #endif /* CONFIG_CGROUP_WRITEBACK */ 332 333 /* 334 * mm/page-writeback.c 335 */ 336 void laptop_io_completion(struct backing_dev_info *info); 337 void laptop_sync_completion(void); 338 void laptop_mode_timer_fn(struct timer_list *t); 339 bool node_dirty_ok(struct pglist_data *pgdat); 340 int wb_domain_init(struct wb_domain *dom, gfp_t gfp); 341 #ifdef CONFIG_CGROUP_WRITEBACK 342 void wb_domain_exit(struct wb_domain *dom); 343 #endif 344 345 extern struct wb_domain global_wb_domain; 346 347 /* These are exported to sysctl. */ 348 extern int dirty_background_ratio; 349 extern unsigned long dirty_background_bytes; 350 extern int vm_dirty_ratio; 351 extern unsigned long vm_dirty_bytes; 352 extern unsigned int dirty_writeback_interval; 353 extern unsigned int dirty_expire_interval; 354 extern unsigned int dirtytime_expire_interval; 355 extern int vm_highmem_is_dirtyable; 356 extern int laptop_mode; 357 358 int dirty_background_ratio_handler(struct ctl_table *table, int write, 359 void *buffer, size_t *lenp, loff_t *ppos); 360 int dirty_background_bytes_handler(struct ctl_table *table, int write, 361 void *buffer, size_t *lenp, loff_t *ppos); 362 int dirty_ratio_handler(struct ctl_table *table, int write, 363 void *buffer, size_t *lenp, loff_t *ppos); 364 int dirty_bytes_handler(struct ctl_table *table, int write, 365 void *buffer, size_t *lenp, loff_t *ppos); 366 int dirtytime_interval_handler(struct ctl_table *table, int write, 367 void *buffer, size_t *lenp, loff_t *ppos); 368 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, 369 void *buffer, size_t *lenp, loff_t *ppos); 370 371 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 372 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); 373 374 void wb_update_bandwidth(struct bdi_writeback *wb); 375 void balance_dirty_pages_ratelimited(struct address_space *mapping); 376 bool wb_over_bg_thresh(struct bdi_writeback *wb); 377 378 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 379 void *data); 380 381 int generic_writepages(struct address_space *mapping, 382 struct writeback_control *wbc); 383 void tag_pages_for_writeback(struct address_space *mapping, 384 pgoff_t start, pgoff_t end); 385 int write_cache_pages(struct address_space *mapping, 386 struct writeback_control *wbc, writepage_t writepage, 387 void *data); 388 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 389 void writeback_set_ratelimit(void); 390 void tag_pages_for_writeback(struct address_space *mapping, 391 pgoff_t start, pgoff_t end); 392 393 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); 394 void folio_account_redirty(struct folio *folio); 395 static inline void account_page_redirty(struct page *page) 396 { 397 folio_account_redirty(page_folio(page)); 398 } 399 bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); 400 bool redirty_page_for_writepage(struct writeback_control *, struct page *); 401 402 void sb_mark_inode_writeback(struct inode *inode); 403 void sb_clear_inode_writeback(struct inode *inode); 404 405 #endif /* WRITEBACK_H */ 406