2 * include/linux/writeback.h
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
10 #include <linux/flex_proportions.h>
12 DECLARE_PER_CPU(int, dirty_throttle_leaks);
15 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
17 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
19 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
20 * time) for the dirty pages to drop, unless written enough pages.
22 * The global dirty threshold is normally equal to the global dirty limit,
23 * except when the system suddenly allocates a lot of anonymous memory and
24 * knocks down the global dirty threshold quickly, in which case the global
25 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
28 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
30 struct backing_dev_info;
35 enum writeback_sync_modes {
36 WB_SYNC_NONE, /* Don't wait on anything */
37 WB_SYNC_ALL, /* Wait on every mapping */
41 * why some writeback work was initiated
45 WB_REASON_TRY_TO_FREE_PAGES,
48 WB_REASON_LAPTOP_TIMER,
49 WB_REASON_FREE_MORE_MEM,
50 WB_REASON_FS_FREE_SPACE,
52 * There is no bdi forker thread any more and works are done
53 * by emergency worker, however, this is TPs userland visible
54 * and we'll be exposing exactly the same information,
55 * so it has a mismatch name.
57 WB_REASON_FORKER_THREAD,
63 * A control structure which tells the writeback code what to do. These are
64 * always on the stack, and hence need no locking. They are always initialised
65 * in a manner such that unspecified fields are set to zero.
67 struct writeback_control {
68 long nr_to_write; /* Write this many pages, and decrement
69 this for each page written */
70 long pages_skipped; /* Pages which were not written */
73 * For a_ops->writepages(): if start or end are non-zero then this is
74 * a hint that the filesystem need only write out the pages inside that
75 * byterange. The byte at `end' is included in the writeout request.
80 enum writeback_sync_modes sync_mode;
82 unsigned for_kupdate:1; /* A kupdate writeback */
83 unsigned for_background:1; /* A background writeback */
84 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
85 unsigned for_reclaim:1; /* Invoked from the page allocator */
86 unsigned range_cyclic:1; /* range_start is cyclic */
87 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
91 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
92 * and are measured against each other in. There always is one global
93 * domain, global_wb_domain, that every wb in the system is a member of.
94 * This allows measuring the relative bandwidth of each wb to distribute
95 * dirtyable memory accordingly.
101 * Scale the writeback cache size proportional to the relative
104 * We do this by keeping a floating proportion between BDIs, based
105 * on page writeback completions [end_page_writeback()]. Those
106 * devices that write out pages fastest will get the larger share,
107 * while the slower will get a smaller share.
109 * We use page writeout completions because we are interested in
110 * getting rid of dirty pages. Having them written out is the
113 * We introduce a concept of time, a period over which we measure
114 * these events, because demand can/will vary over time. The length
115 * of this period itself is measured in page writeback completions.
117 struct fprop_global completions;
118 struct timer_list period_timer; /* timer for aging of completions */
119 unsigned long period_time;
122 * The dirtyable memory and dirty threshold could be suddenly
123 * knocked down by a large amount (eg. on the startup of KVM in a
124 * swapless system). This may throw the system into deep dirty
125 * exceeded state and throttle heavy/light dirtiers alike. To
126 * retain good responsiveness, maintain global_dirty_limit for
127 * tracking slowly down to the knocked down dirty threshold.
129 * Both fields are protected by ->lock.
131 unsigned long dirty_limit_tstamp;
132 unsigned long dirty_limit;
138 struct bdi_writeback;
139 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
140 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
141 enum wb_reason reason);
142 bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
143 bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
144 enum wb_reason reason);
145 void sync_inodes_sb(struct super_block *);
146 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
147 void inode_wait_for_writeback(struct inode *inode);
149 /* writeback.h requires fs.h; it, too, is not included from here. */
150 static inline void wait_on_inode(struct inode *inode)
153 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
157 * mm/page-writeback.c
160 void laptop_io_completion(struct backing_dev_info *info);
161 void laptop_sync_completion(void);
162 void laptop_mode_sync(struct work_struct *work);
163 void laptop_mode_timer_fn(unsigned long data);
165 static inline void laptop_sync_completion(void) { }
167 void throttle_vm_writeout(gfp_t gfp_mask);
168 bool zone_dirty_ok(struct zone *zone);
169 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
170 #ifdef CONFIG_CGROUP_WRITEBACK
171 void wb_domain_exit(struct wb_domain *dom);
174 extern struct wb_domain global_wb_domain;
176 /* These are exported to sysctl. */
177 extern int dirty_background_ratio;
178 extern unsigned long dirty_background_bytes;
179 extern int vm_dirty_ratio;
180 extern unsigned long vm_dirty_bytes;
181 extern unsigned int dirty_writeback_interval;
182 extern unsigned int dirty_expire_interval;
183 extern unsigned int dirtytime_expire_interval;
184 extern int vm_highmem_is_dirtyable;
185 extern int block_dump;
186 extern int laptop_mode;
188 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
189 void __user *buffer, size_t *lenp,
191 extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
192 void __user *buffer, size_t *lenp,
194 extern int dirty_ratio_handler(struct ctl_table *table, int write,
195 void __user *buffer, size_t *lenp,
197 extern int dirty_bytes_handler(struct ctl_table *table, int write,
198 void __user *buffer, size_t *lenp,
200 int dirtytime_interval_handler(struct ctl_table *table, int write,
201 void __user *buffer, size_t *lenp, loff_t *ppos);
204 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
205 void __user *, size_t *, loff_t *);
207 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
208 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
210 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
211 void page_writeback_init(void);
212 void balance_dirty_pages_ratelimited(struct address_space *mapping);
213 bool wb_over_bg_thresh(struct bdi_writeback *wb);
215 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
218 int generic_writepages(struct address_space *mapping,
219 struct writeback_control *wbc);
220 void tag_pages_for_writeback(struct address_space *mapping,
221 pgoff_t start, pgoff_t end);
222 int write_cache_pages(struct address_space *mapping,
223 struct writeback_control *wbc, writepage_t writepage,
225 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
226 void writeback_set_ratelimit(void);
227 void tag_pages_for_writeback(struct address_space *mapping,
228 pgoff_t start, pgoff_t end);
230 void account_page_redirty(struct page *page);
232 #endif /* WRITEBACK_H */