#include <linux/tracepoint.h>
#include "internal.h"
-/*
- * The maximum number of pages to writeout in a single bdi flush/kupdate
- * operation. We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode. Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES 1024L
-
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
return false;
}
-static long writeback_chunk_size(struct wb_writeback_work *work)
+static long writeback_chunk_size(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
{
long pages;
*/
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
pages = LONG_MAX;
- else
- pages = min(MAX_WRITEBACK_PAGES, work->nr_pages);
+ else {
+ pages = min(bdi->avg_write_bandwidth / 2,
+ global_dirty_limit / DIRTY_SCOPE);
+ pages = min(pages, work->nr_pages);
+ pages = round_down(pages + MIN_WRITEBACK_PAGES,
+ MIN_WRITEBACK_PAGES);
+ }
return pages;
}
continue;
}
__iget(inode);
- write_chunk = writeback_chunk_size(work);
+ write_chunk = writeback_chunk_size(wb->bdi, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
#include <linux/fs.h>
/*
+ * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
+ *
+ * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
+ *
* The 1/16 region above the global dirty limit will be put to maximum pauses:
*
* (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
+#define DIRTY_SCOPE 8
+#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
#define DIRTY_MAXPAUSE_AREA 16
#define DIRTY_PASSGOOD_AREA 8
+/*
+ * 4MB minimal write chunk size
+ */
+#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
+
struct backing_dev_info;
/*