1 /* 2 * include/linux/backing-dev.h 3 * 4 * low-level device information and state which is propagated up through 5 * to high-level code. 6 */ 7 8 #ifndef _LINUX_BACKING_DEV_H 9 #define _LINUX_BACKING_DEV_H 10 11 #include <linux/percpu_counter.h> 12 #include <linux/log2.h> 13 #include <linux/flex_proportions.h> 14 #include <linux/kernel.h> 15 #include <linux/fs.h> 16 #include <linux/sched.h> 17 #include <linux/timer.h> 18 #include <linux/writeback.h> 19 #include <linux/atomic.h> 20 #include <linux/sysctl.h> 21 #include <linux/workqueue.h> 22 23 struct page; 24 struct device; 25 struct dentry; 26 27 /* 28 * Bits in backing_dev_info.state 29 */ 30 enum bdi_state { 31 BDI_async_congested, /* The async (write) queue is getting full */ 32 BDI_sync_congested, /* The sync queue is getting full */ 33 BDI_registered, /* bdi_register() was done */ 34 BDI_writeback_running, /* Writeback is in progress */ 35 }; 36 37 typedef int (congested_fn)(void *, int); 38 39 enum bdi_stat_item { 40 BDI_RECLAIMABLE, 41 BDI_WRITEBACK, 42 BDI_DIRTIED, 43 BDI_WRITTEN, 44 NR_BDI_STAT_ITEMS 45 }; 46 47 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 48 49 struct bdi_writeback { 50 struct backing_dev_info *bdi; /* our parent bdi */ 51 52 unsigned long last_old_flush; /* last old data flush */ 53 54 struct delayed_work dwork; /* work item used for writeback */ 55 struct list_head b_dirty; /* dirty inodes */ 56 struct list_head b_io; /* parked for writeback */ 57 struct list_head b_more_io; /* parked for more writeback */ 58 struct list_head b_dirty_time; /* time stamps are dirty */ 59 spinlock_t list_lock; /* protects the b_* lists */ 60 }; 61 62 struct backing_dev_info { 63 struct list_head bdi_list; 64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 65 unsigned long state; /* Always use atomic bitops on this */ 66 unsigned int capabilities; /* Device capabilities */ 67 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 68 void *congested_data; /* Pointer to aux data for congested func */ 69 70 char *name; 71 72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; 73 74 unsigned long bw_time_stamp; /* last time write bw is updated */ 75 unsigned long dirtied_stamp; 76 unsigned long written_stamp; /* pages written at bw_time_stamp */ 77 unsigned long write_bandwidth; /* the estimated write bandwidth */ 78 unsigned long avg_write_bandwidth; /* further smoothed write bw */ 79 80 /* 81 * The base dirty throttle rate, re-calculated on every 200ms. 82 * All the bdi tasks' dirty rate will be curbed under it. 83 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit 84 * in small steps and is much more smooth/stable than the latter. 85 */ 86 unsigned long dirty_ratelimit; 87 unsigned long balanced_dirty_ratelimit; 88 89 struct fprop_local_percpu completions; 90 int dirty_exceeded; 91 92 unsigned int min_ratio; 93 unsigned int max_ratio, max_prop_frac; 94 95 struct bdi_writeback wb; /* default writeback info for this bdi */ 96 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */ 97 98 struct list_head work_list; 99 100 struct device *dev; 101 102 struct timer_list laptop_mode_wb_timer; 103 104 #ifdef CONFIG_DEBUG_FS 105 struct dentry *debug_dir; 106 struct dentry *debug_stats; 107 #endif 108 }; 109 110 struct backing_dev_info *inode_to_bdi(struct inode *inode); 111 112 int __must_check bdi_init(struct backing_dev_info *bdi); 113 void bdi_destroy(struct backing_dev_info *bdi); 114 115 __printf(3, 4) 116 int bdi_register(struct backing_dev_info *bdi, struct device *parent, 117 const char *fmt, ...); 118 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 119 void bdi_unregister(struct backing_dev_info *bdi); 120 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 121 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 122 enum wb_reason reason); 123 void bdi_start_background_writeback(struct backing_dev_info *bdi); 124 void bdi_writeback_workfn(struct work_struct *work); 125 int bdi_has_dirty_io(struct backing_dev_info *bdi); 126 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 127 128 extern spinlock_t bdi_lock; 129 extern struct list_head bdi_list; 130 131 extern struct workqueue_struct *bdi_wq; 132 133 static inline int wb_has_dirty_io(struct bdi_writeback *wb) 134 { 135 return !list_empty(&wb->b_dirty) || 136 !list_empty(&wb->b_io) || 137 !list_empty(&wb->b_more_io); 138 } 139 140 static inline void __add_bdi_stat(struct backing_dev_info *bdi, 141 enum bdi_stat_item item, s64 amount) 142 { 143 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); 144 } 145 146 static inline void __inc_bdi_stat(struct backing_dev_info *bdi, 147 enum bdi_stat_item item) 148 { 149 __add_bdi_stat(bdi, item, 1); 150 } 151 152 static inline void inc_bdi_stat(struct backing_dev_info *bdi, 153 enum bdi_stat_item item) 154 { 155 unsigned long flags; 156 157 local_irq_save(flags); 158 __inc_bdi_stat(bdi, item); 159 local_irq_restore(flags); 160 } 161 162 static inline void __dec_bdi_stat(struct backing_dev_info *bdi, 163 enum bdi_stat_item item) 164 { 165 __add_bdi_stat(bdi, item, -1); 166 } 167 168 static inline void dec_bdi_stat(struct backing_dev_info *bdi, 169 enum bdi_stat_item item) 170 { 171 unsigned long flags; 172 173 local_irq_save(flags); 174 __dec_bdi_stat(bdi, item); 175 local_irq_restore(flags); 176 } 177 178 static inline s64 bdi_stat(struct backing_dev_info *bdi, 179 enum bdi_stat_item item) 180 { 181 return percpu_counter_read_positive(&bdi->bdi_stat[item]); 182 } 183 184 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, 185 enum bdi_stat_item item) 186 { 187 return percpu_counter_sum_positive(&bdi->bdi_stat[item]); 188 } 189 190 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, 191 enum bdi_stat_item item) 192 { 193 s64 sum; 194 unsigned long flags; 195 196 local_irq_save(flags); 197 sum = __bdi_stat_sum(bdi, item); 198 local_irq_restore(flags); 199 200 return sum; 201 } 202 203 extern void bdi_writeout_inc(struct backing_dev_info *bdi); 204 205 /* 206 * maximal error of a stat counter. 207 */ 208 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) 209 { 210 #ifdef CONFIG_SMP 211 return nr_cpu_ids * BDI_STAT_BATCH; 212 #else 213 return 1; 214 #endif 215 } 216 217 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 218 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 219 220 /* 221 * Flags in backing_dev_info::capability 222 * 223 * The first three flags control whether dirty pages will contribute to the 224 * VM's accounting and whether writepages() should be called for dirty pages 225 * (something that would not, for example, be appropriate for ramfs) 226 * 227 * WARNING: these flags are closely related and should not normally be 228 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these 229 * three flags into a single convenience macro. 230 * 231 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 232 * BDI_CAP_NO_WRITEBACK: Don't write pages back 233 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 234 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 235 */ 236 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 237 #define BDI_CAP_NO_WRITEBACK 0x00000002 238 #define BDI_CAP_NO_ACCT_WB 0x00000004 239 #define BDI_CAP_STABLE_WRITES 0x00000008 240 #define BDI_CAP_STRICTLIMIT 0x00000010 241 242 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 243 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 244 245 extern struct backing_dev_info noop_backing_dev_info; 246 247 int writeback_in_progress(struct backing_dev_info *bdi); 248 249 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) 250 { 251 if (bdi->congested_fn) 252 return bdi->congested_fn(bdi->congested_data, bdi_bits); 253 return (bdi->state & bdi_bits); 254 } 255 256 static inline int bdi_read_congested(struct backing_dev_info *bdi) 257 { 258 return bdi_congested(bdi, 1 << BDI_sync_congested); 259 } 260 261 static inline int bdi_write_congested(struct backing_dev_info *bdi) 262 { 263 return bdi_congested(bdi, 1 << BDI_async_congested); 264 } 265 266 static inline int bdi_rw_congested(struct backing_dev_info *bdi) 267 { 268 return bdi_congested(bdi, (1 << BDI_sync_congested) | 269 (1 << BDI_async_congested)); 270 } 271 272 enum { 273 BLK_RW_ASYNC = 0, 274 BLK_RW_SYNC = 1, 275 }; 276 277 void clear_bdi_congested(struct backing_dev_info *bdi, int sync); 278 void set_bdi_congested(struct backing_dev_info *bdi, int sync); 279 long congestion_wait(int sync, long timeout); 280 long wait_iff_congested(struct zone *zone, int sync, long timeout); 281 int pdflush_proc_obsolete(struct ctl_table *table, int write, 282 void __user *buffer, size_t *lenp, loff_t *ppos); 283 284 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) 285 { 286 return bdi->capabilities & BDI_CAP_STABLE_WRITES; 287 } 288 289 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 290 { 291 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); 292 } 293 294 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) 295 { 296 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); 297 } 298 299 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) 300 { 301 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ 302 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | 303 BDI_CAP_NO_WRITEBACK)); 304 } 305 306 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 307 { 308 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); 309 } 310 311 static inline bool mapping_cap_account_dirty(struct address_space *mapping) 312 { 313 return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); 314 } 315 316 static inline int bdi_sched_wait(void *word) 317 { 318 schedule(); 319 return 0; 320 } 321 322 #endif /* _LINUX_BACKING_DEV_H */ 323