1 #ifndef __LINUX_BACKING_DEV_DEFS_H
2 #define __LINUX_BACKING_DEV_DEFS_H
3 
4 #include <linux/list.h>
5 #include <linux/radix-tree.h>
6 #include <linux/rbtree.h>
7 #include <linux/spinlock.h>
8 #include <linux/percpu_counter.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/flex_proportions.h>
11 #include <linux/timer.h>
12 #include <linux/workqueue.h>
13 #include <linux/kref.h>
14 
15 struct page;
16 struct device;
17 struct dentry;
18 
19 /*
20  * Bits in bdi_writeback.state
21  */
22 enum wb_state {
23 	WB_registered,		/* bdi_register() was done */
24 	WB_writeback_running,	/* Writeback is in progress */
25 	WB_has_dirty_io,	/* Dirty inodes on ->b_{dirty|io|more_io} */
26 };
27 
28 enum wb_congested_state {
29 	WB_async_congested,	/* The async (write) queue is getting full */
30 	WB_sync_congested,	/* The sync queue is getting full */
31 };
32 
33 typedef int (congested_fn)(void *, int);
34 
35 enum wb_stat_item {
36 	WB_RECLAIMABLE,
37 	WB_WRITEBACK,
38 	WB_DIRTIED,
39 	WB_WRITTEN,
40 	NR_WB_STAT_ITEMS
41 };
42 
43 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
44 
45 /*
46  * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
47  * wb's can operate mostly independently but should share the congested
48  * state.  To facilitate such sharing, the congested state is tracked using
49  * the following struct which is created on demand, indexed by blkcg ID on
50  * its bdi, and refcounted.
51  */
52 struct bdi_writeback_congested {
53 	unsigned long state;		/* WB_[a]sync_congested flags */
54 	atomic_t refcnt;		/* nr of attached wb's and blkg */
55 
56 #ifdef CONFIG_CGROUP_WRITEBACK
57 	struct backing_dev_info *bdi;	/* the associated bdi */
58 	int blkcg_id;			/* ID of the associated blkcg */
59 	struct rb_node rb_node;		/* on bdi->cgwb_congestion_tree */
60 #endif
61 };
62 
63 /*
64  * Each wb (bdi_writeback) can perform writeback operations, is measured
65  * and throttled, independently.  Without cgroup writeback, each bdi
66  * (bdi_writeback) is served by its embedded bdi->wb.
67  *
68  * On the default hierarchy, blkcg implicitly enables memcg.  This allows
69  * using memcg's page ownership for attributing writeback IOs, and every
70  * memcg - blkcg combination can be served by its own wb by assigning a
71  * dedicated wb to each memcg, which enables isolation across different
72  * cgroups and propagation of IO back pressure down from the IO layer upto
73  * the tasks which are generating the dirty pages to be written back.
74  *
75  * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
76  * refcounted with the number of inodes attached to it, and pins the memcg
77  * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
78  * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
79  * is tested for blkcg after lookup and removed from index on mismatch so
80  * that a new wb for the combination can be created.
81  */
82 struct bdi_writeback {
83 	struct backing_dev_info *bdi;	/* our parent bdi */
84 
85 	unsigned long state;		/* Always use atomic bitops on this */
86 	unsigned long last_old_flush;	/* last old data flush */
87 
88 	struct list_head b_dirty;	/* dirty inodes */
89 	struct list_head b_io;		/* parked for writeback */
90 	struct list_head b_more_io;	/* parked for more writeback */
91 	struct list_head b_dirty_time;	/* time stamps are dirty */
92 	spinlock_t list_lock;		/* protects the b_* lists */
93 
94 	struct percpu_counter stat[NR_WB_STAT_ITEMS];
95 
96 	struct bdi_writeback_congested *congested;
97 
98 	unsigned long bw_time_stamp;	/* last time write bw is updated */
99 	unsigned long dirtied_stamp;
100 	unsigned long written_stamp;	/* pages written at bw_time_stamp */
101 	unsigned long write_bandwidth;	/* the estimated write bandwidth */
102 	unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
103 
104 	/*
105 	 * The base dirty throttle rate, re-calculated on every 200ms.
106 	 * All the bdi tasks' dirty rate will be curbed under it.
107 	 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
108 	 * in small steps and is much more smooth/stable than the latter.
109 	 */
110 	unsigned long dirty_ratelimit;
111 	unsigned long balanced_dirty_ratelimit;
112 
113 	struct fprop_local_percpu completions;
114 	int dirty_exceeded;
115 
116 	spinlock_t work_lock;		/* protects work_list & dwork scheduling */
117 	struct list_head work_list;
118 	struct delayed_work dwork;	/* work item used for writeback */
119 
120 	unsigned long dirty_sleep;	/* last wait */
121 
122 	struct list_head bdi_node;	/* anchored at bdi->wb_list */
123 
124 #ifdef CONFIG_CGROUP_WRITEBACK
125 	struct percpu_ref refcnt;	/* used only for !root wb's */
126 	struct fprop_local_percpu memcg_completions;
127 	struct cgroup_subsys_state *memcg_css; /* the associated memcg */
128 	struct cgroup_subsys_state *blkcg_css; /* and blkcg */
129 	struct list_head memcg_node;	/* anchored at memcg->cgwb_list */
130 	struct list_head blkcg_node;	/* anchored at blkcg->cgwb_list */
131 
132 	union {
133 		struct work_struct release_work;
134 		struct rcu_head rcu;
135 	};
136 #endif
137 };
138 
139 struct backing_dev_info {
140 	struct list_head bdi_list;
141 	unsigned long ra_pages;	/* max readahead in PAGE_SIZE units */
142 	unsigned long io_pages;	/* max allowed IO size */
143 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
144 	void *congested_data;	/* Pointer to aux data for congested func */
145 
146 	char *name;
147 
148 	struct kref refcnt;	/* Reference counter for the structure */
149 	unsigned int capabilities; /* Device capabilities */
150 	unsigned int min_ratio;
151 	unsigned int max_ratio, max_prop_frac;
152 
153 	/*
154 	 * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
155 	 * any dirty wbs, which is depended upon by bdi_has_dirty().
156 	 */
157 	atomic_long_t tot_write_bandwidth;
158 
159 	struct bdi_writeback wb;  /* the root writeback info for this bdi */
160 	struct list_head wb_list; /* list of all wbs */
161 #ifdef CONFIG_CGROUP_WRITEBACK
162 	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
163 	struct rb_root cgwb_congested_tree; /* their congested states */
164 	atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
165 #else
166 	struct bdi_writeback_congested *wb_congested;
167 #endif
168 	wait_queue_head_t wb_waitq;
169 
170 	struct device *dev;
171 	struct device *owner;
172 
173 	struct timer_list laptop_mode_wb_timer;
174 
175 #ifdef CONFIG_DEBUG_FS
176 	struct dentry *debug_dir;
177 	struct dentry *debug_stats;
178 #endif
179 };
180 
181 enum {
182 	BLK_RW_ASYNC	= 0,
183 	BLK_RW_SYNC	= 1,
184 };
185 
186 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
187 void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
188 
189 static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
190 {
191 	clear_wb_congested(bdi->wb.congested, sync);
192 }
193 
194 static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
195 {
196 	set_wb_congested(bdi->wb.congested, sync);
197 }
198 
199 #ifdef CONFIG_CGROUP_WRITEBACK
200 
201 /**
202  * wb_tryget - try to increment a wb's refcount
203  * @wb: bdi_writeback to get
204  */
205 static inline bool wb_tryget(struct bdi_writeback *wb)
206 {
207 	if (wb != &wb->bdi->wb)
208 		return percpu_ref_tryget(&wb->refcnt);
209 	return true;
210 }
211 
212 /**
213  * wb_get - increment a wb's refcount
214  * @wb: bdi_writeback to get
215  */
216 static inline void wb_get(struct bdi_writeback *wb)
217 {
218 	if (wb != &wb->bdi->wb)
219 		percpu_ref_get(&wb->refcnt);
220 }
221 
222 /**
223  * wb_put - decrement a wb's refcount
224  * @wb: bdi_writeback to put
225  */
226 static inline void wb_put(struct bdi_writeback *wb)
227 {
228 	if (wb != &wb->bdi->wb)
229 		percpu_ref_put(&wb->refcnt);
230 }
231 
232 /**
233  * wb_dying - is a wb dying?
234  * @wb: bdi_writeback of interest
235  *
236  * Returns whether @wb is unlinked and being drained.
237  */
238 static inline bool wb_dying(struct bdi_writeback *wb)
239 {
240 	return percpu_ref_is_dying(&wb->refcnt);
241 }
242 
243 #else	/* CONFIG_CGROUP_WRITEBACK */
244 
245 static inline bool wb_tryget(struct bdi_writeback *wb)
246 {
247 	return true;
248 }
249 
250 static inline void wb_get(struct bdi_writeback *wb)
251 {
252 }
253 
254 static inline void wb_put(struct bdi_writeback *wb)
255 {
256 }
257 
258 static inline bool wb_dying(struct bdi_writeback *wb)
259 {
260 	return false;
261 }
262 
263 #endif	/* CONFIG_CGROUP_WRITEBACK */
264 
265 #endif	/* __LINUX_BACKING_DEV_DEFS_H */
266