1 #ifndef __LINUX_BACKING_DEV_DEFS_H 2 #define __LINUX_BACKING_DEV_DEFS_H 3 4 #include <linux/list.h> 5 #include <linux/radix-tree.h> 6 #include <linux/rbtree.h> 7 #include <linux/spinlock.h> 8 #include <linux/percpu_counter.h> 9 #include <linux/percpu-refcount.h> 10 #include <linux/flex_proportions.h> 11 #include <linux/timer.h> 12 #include <linux/workqueue.h> 13 14 struct page; 15 struct device; 16 struct dentry; 17 18 /* 19 * Bits in bdi_writeback.state 20 */ 21 enum wb_state { 22 WB_registered, /* bdi_register() was done */ 23 WB_writeback_running, /* Writeback is in progress */ 24 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ 25 }; 26 27 enum wb_congested_state { 28 WB_async_congested, /* The async (write) queue is getting full */ 29 WB_sync_congested, /* The sync queue is getting full */ 30 }; 31 32 typedef int (congested_fn)(void *, int); 33 34 enum wb_stat_item { 35 WB_RECLAIMABLE, 36 WB_WRITEBACK, 37 WB_DIRTIED, 38 WB_WRITTEN, 39 NR_WB_STAT_ITEMS 40 }; 41 42 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 43 44 /* 45 * For cgroup writeback, multiple wb's may map to the same blkcg. Those 46 * wb's can operate mostly independently but should share the congested 47 * state. To facilitate such sharing, the congested state is tracked using 48 * the following struct which is created on demand, indexed by blkcg ID on 49 * its bdi, and refcounted. 50 */ 51 struct bdi_writeback_congested { 52 unsigned long state; /* WB_[a]sync_congested flags */ 53 atomic_t refcnt; /* nr of attached wb's and blkg */ 54 55 #ifdef CONFIG_CGROUP_WRITEBACK 56 struct backing_dev_info *bdi; /* the associated bdi */ 57 int blkcg_id; /* ID of the associated blkcg */ 58 struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ 59 #endif 60 }; 61 62 /* 63 * Each wb (bdi_writeback) can perform writeback operations, is measured 64 * and throttled, independently. Without cgroup writeback, each bdi 65 * (bdi_writeback) is served by its embedded bdi->wb. 66 * 67 * On the default hierarchy, blkcg implicitly enables memcg. This allows 68 * using memcg's page ownership for attributing writeback IOs, and every 69 * memcg - blkcg combination can be served by its own wb by assigning a 70 * dedicated wb to each memcg, which enables isolation across different 71 * cgroups and propagation of IO back pressure down from the IO layer upto 72 * the tasks which are generating the dirty pages to be written back. 73 * 74 * A cgroup wb is indexed on its bdi by the ID of the associated memcg, 75 * refcounted with the number of inodes attached to it, and pins the memcg 76 * and the corresponding blkcg. As the corresponding blkcg for a memcg may 77 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb 78 * is tested for blkcg after lookup and removed from index on mismatch so 79 * that a new wb for the combination can be created. 80 */ 81 struct bdi_writeback { 82 struct backing_dev_info *bdi; /* our parent bdi */ 83 84 unsigned long state; /* Always use atomic bitops on this */ 85 unsigned long last_old_flush; /* last old data flush */ 86 87 struct list_head b_dirty; /* dirty inodes */ 88 struct list_head b_io; /* parked for writeback */ 89 struct list_head b_more_io; /* parked for more writeback */ 90 struct list_head b_dirty_time; /* time stamps are dirty */ 91 spinlock_t list_lock; /* protects the b_* lists */ 92 93 struct percpu_counter stat[NR_WB_STAT_ITEMS]; 94 95 struct bdi_writeback_congested *congested; 96 97 unsigned long bw_time_stamp; /* last time write bw is updated */ 98 unsigned long dirtied_stamp; 99 unsigned long written_stamp; /* pages written at bw_time_stamp */ 100 unsigned long write_bandwidth; /* the estimated write bandwidth */ 101 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ 102 103 /* 104 * The base dirty throttle rate, re-calculated on every 200ms. 105 * All the bdi tasks' dirty rate will be curbed under it. 106 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit 107 * in small steps and is much more smooth/stable than the latter. 108 */ 109 unsigned long dirty_ratelimit; 110 unsigned long balanced_dirty_ratelimit; 111 112 struct fprop_local_percpu completions; 113 int dirty_exceeded; 114 115 spinlock_t work_lock; /* protects work_list & dwork scheduling */ 116 struct list_head work_list; 117 struct delayed_work dwork; /* work item used for writeback */ 118 119 unsigned long dirty_sleep; /* last wait */ 120 121 struct list_head bdi_node; /* anchored at bdi->wb_list */ 122 123 #ifdef CONFIG_CGROUP_WRITEBACK 124 struct percpu_ref refcnt; /* used only for !root wb's */ 125 struct fprop_local_percpu memcg_completions; 126 struct cgroup_subsys_state *memcg_css; /* the associated memcg */ 127 struct cgroup_subsys_state *blkcg_css; /* and blkcg */ 128 struct list_head memcg_node; /* anchored at memcg->cgwb_list */ 129 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ 130 131 union { 132 struct work_struct release_work; 133 struct rcu_head rcu; 134 }; 135 #endif 136 }; 137 138 struct backing_dev_info { 139 struct list_head bdi_list; 140 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ 141 unsigned long io_pages; /* max allowed IO size */ 142 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 143 void *congested_data; /* Pointer to aux data for congested func */ 144 145 char *name; 146 147 unsigned int capabilities; /* Device capabilities */ 148 unsigned int min_ratio; 149 unsigned int max_ratio, max_prop_frac; 150 151 /* 152 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are 153 * any dirty wbs, which is depended upon by bdi_has_dirty(). 154 */ 155 atomic_long_t tot_write_bandwidth; 156 157 struct bdi_writeback wb; /* the root writeback info for this bdi */ 158 struct list_head wb_list; /* list of all wbs */ 159 #ifdef CONFIG_CGROUP_WRITEBACK 160 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 161 struct rb_root cgwb_congested_tree; /* their congested states */ 162 atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ 163 #else 164 struct bdi_writeback_congested *wb_congested; 165 #endif 166 wait_queue_head_t wb_waitq; 167 168 struct device *dev; 169 struct device *owner; 170 171 struct timer_list laptop_mode_wb_timer; 172 173 #ifdef CONFIG_DEBUG_FS 174 struct dentry *debug_dir; 175 struct dentry *debug_stats; 176 #endif 177 }; 178 179 enum { 180 BLK_RW_ASYNC = 0, 181 BLK_RW_SYNC = 1, 182 }; 183 184 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); 185 void set_wb_congested(struct bdi_writeback_congested *congested, int sync); 186 187 static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) 188 { 189 clear_wb_congested(bdi->wb.congested, sync); 190 } 191 192 static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) 193 { 194 set_wb_congested(bdi->wb.congested, sync); 195 } 196 197 #ifdef CONFIG_CGROUP_WRITEBACK 198 199 /** 200 * wb_tryget - try to increment a wb's refcount 201 * @wb: bdi_writeback to get 202 */ 203 static inline bool wb_tryget(struct bdi_writeback *wb) 204 { 205 if (wb != &wb->bdi->wb) 206 return percpu_ref_tryget(&wb->refcnt); 207 return true; 208 } 209 210 /** 211 * wb_get - increment a wb's refcount 212 * @wb: bdi_writeback to get 213 */ 214 static inline void wb_get(struct bdi_writeback *wb) 215 { 216 if (wb != &wb->bdi->wb) 217 percpu_ref_get(&wb->refcnt); 218 } 219 220 /** 221 * wb_put - decrement a wb's refcount 222 * @wb: bdi_writeback to put 223 */ 224 static inline void wb_put(struct bdi_writeback *wb) 225 { 226 if (wb != &wb->bdi->wb) 227 percpu_ref_put(&wb->refcnt); 228 } 229 230 /** 231 * wb_dying - is a wb dying? 232 * @wb: bdi_writeback of interest 233 * 234 * Returns whether @wb is unlinked and being drained. 235 */ 236 static inline bool wb_dying(struct bdi_writeback *wb) 237 { 238 return percpu_ref_is_dying(&wb->refcnt); 239 } 240 241 #else /* CONFIG_CGROUP_WRITEBACK */ 242 243 static inline bool wb_tryget(struct bdi_writeback *wb) 244 { 245 return true; 246 } 247 248 static inline void wb_get(struct bdi_writeback *wb) 249 { 250 } 251 252 static inline void wb_put(struct bdi_writeback *wb) 253 { 254 } 255 256 static inline bool wb_dying(struct bdi_writeback *wb) 257 { 258 return false; 259 } 260 261 #endif /* CONFIG_CGROUP_WRITEBACK */ 262 263 #endif /* __LINUX_BACKING_DEV_DEFS_H */ 264