2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/writeback.h>
18 #include <asm/atomic.h>
25 * Bits in backing_dev_info.state
28 BDI_pending, /* On its way to being activated */
29 BDI_wb_alloc, /* Default embedded wb allocated */
30 BDI_async_congested, /* The async (write) queue is getting full */
31 BDI_sync_congested, /* The sync queue is getting full */
32 BDI_registered, /* bdi_register() was done */
33 BDI_unused, /* Available bits start here */
36 typedef int (congested_fn)(void *, int);
44 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46 struct bdi_writeback {
47 struct list_head list; /* hangs off the bdi */
49 struct backing_dev_info *bdi; /* our parent bdi */
52 unsigned long last_old_flush; /* last old data flush */
54 struct task_struct *task; /* writeback task */
56 struct rb_root flush_tree;
59 struct backing_dev_info {
60 struct list_head bdi_list;
61 struct rcu_head rcu_head;
62 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
63 unsigned long state; /* Always use atomic bitops on this */
64 unsigned int capabilities; /* Device capabilities */
65 congested_fn *congested_fn; /* Function pointer if device is md/dm */
66 void *congested_data; /* Pointer to aux data for congested func */
67 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
74 struct prop_local_percpu completions;
77 unsigned int min_ratio;
78 unsigned int max_ratio, max_prop_frac;
80 struct bdi_writeback wb; /* default writeback info for this bdi */
81 spinlock_t wb_lock; /* protects update side of wb_list */
82 struct list_head wb_list; /* the flusher threads hanging off this bdi */
83 unsigned long wb_mask; /* bitmask of registered tasks */
84 unsigned int wb_cnt; /* number of registered tasks */
86 struct list_head work_list;
90 #ifdef CONFIG_DEBUG_FS
91 struct dentry *debug_dir;
92 struct dentry *debug_stats;
96 int bdi_init(struct backing_dev_info *bdi);
97 void bdi_destroy(struct backing_dev_info *bdi);
99 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
100 const char *fmt, ...);
101 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
102 void bdi_unregister(struct backing_dev_info *bdi);
103 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
105 int bdi_writeback_task(struct bdi_writeback *wb);
106 bool bdi_has_dirty_io(struct backing_dev_info *bdi);
108 extern spinlock_t bdi_lock;
109 extern struct list_head bdi_list;
111 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
113 return !RB_EMPTY_ROOT(&wb->flush_tree);
116 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
117 enum bdi_stat_item item, s64 amount)
119 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
122 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
123 enum bdi_stat_item item)
125 __add_bdi_stat(bdi, item, 1);
128 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
129 enum bdi_stat_item item)
133 local_irq_save(flags);
134 __inc_bdi_stat(bdi, item);
135 local_irq_restore(flags);
138 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
139 enum bdi_stat_item item)
141 __add_bdi_stat(bdi, item, -1);
144 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
145 enum bdi_stat_item item)
149 local_irq_save(flags);
150 __dec_bdi_stat(bdi, item);
151 local_irq_restore(flags);
154 static inline s64 bdi_stat(struct backing_dev_info *bdi,
155 enum bdi_stat_item item)
157 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
160 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
161 enum bdi_stat_item item)
163 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
166 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
167 enum bdi_stat_item item)
172 local_irq_save(flags);
173 sum = __bdi_stat_sum(bdi, item);
174 local_irq_restore(flags);
179 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
182 * maximal error of a stat counter.
184 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
187 return nr_cpu_ids * BDI_STAT_BATCH;
193 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
194 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
197 * Flags in backing_dev_info::capability
199 * The first three flags control whether dirty pages will contribute to the
200 * VM's accounting and whether writepages() should be called for dirty pages
201 * (something that would not, for example, be appropriate for ramfs)
203 * WARNING: these flags are closely related and should not normally be
204 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
205 * three flags into a single convenience macro.
207 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
208 * BDI_CAP_NO_WRITEBACK: Don't write pages back
209 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
211 * These flags let !MMU mmap() govern direct device mapping vs immediate
212 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
214 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
215 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
216 * BDI_CAP_READ_MAP: Can be mapped for reading
217 * BDI_CAP_WRITE_MAP: Can be mapped for writing
218 * BDI_CAP_EXEC_MAP: Can be mapped for execution
220 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
222 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
223 #define BDI_CAP_NO_WRITEBACK 0x00000002
224 #define BDI_CAP_MAP_COPY 0x00000004
225 #define BDI_CAP_MAP_DIRECT 0x00000008
226 #define BDI_CAP_READ_MAP 0x00000010
227 #define BDI_CAP_WRITE_MAP 0x00000020
228 #define BDI_CAP_EXEC_MAP 0x00000040
229 #define BDI_CAP_NO_ACCT_WB 0x00000080
230 #define BDI_CAP_SWAP_BACKED 0x00000100
232 #define BDI_CAP_VMFLAGS \
233 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
235 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
236 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
238 #if defined(VM_MAYREAD) && \
239 (BDI_CAP_READ_MAP != VM_MAYREAD || \
240 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
241 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
242 #error please change backing_dev_info::capabilities flags
245 extern struct backing_dev_info default_backing_dev_info;
246 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
248 int writeback_in_progress(struct backing_dev_info *bdi);
250 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
252 if (bdi->congested_fn)
253 return bdi->congested_fn(bdi->congested_data, bdi_bits);
254 return (bdi->state & bdi_bits);
257 static inline int bdi_read_congested(struct backing_dev_info *bdi)
259 return bdi_congested(bdi, 1 << BDI_sync_congested);
262 static inline int bdi_write_congested(struct backing_dev_info *bdi)
264 return bdi_congested(bdi, 1 << BDI_async_congested);
267 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
269 return bdi_congested(bdi, (1 << BDI_sync_congested) |
270 (1 << BDI_async_congested));
278 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
279 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
280 long congestion_wait(int sync, long timeout);
283 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
285 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
288 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
290 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
293 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
295 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
296 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
297 BDI_CAP_NO_WRITEBACK));
300 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
302 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
305 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
307 return bdi == &default_backing_dev_info;
310 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
312 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
315 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
317 return bdi_cap_account_dirty(mapping->backing_dev_info);
320 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
322 return bdi_cap_swap_backed(mapping->backing_dev_info);
325 static inline int bdi_sched_wait(void *word)
331 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
334 if (bdi && bdi->unplug_io_fn)
335 bdi->unplug_io_fn(bdi, page);
338 static inline void blk_run_address_space(struct address_space *mapping)
341 blk_run_backing_dev(mapping->backing_dev_info, NULL);
344 #endif /* _LINUX_BACKING_DEV_H */