4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/backing-dev.h>
27 #include <linux/buffer_head.h>
30 #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
33 * We don't actually have pdflush, but this one is exported though /proc...
35 int nr_pdflush_threads;
38 * Passed into wb_writeback(), essentially a subset of writeback_control
40 struct wb_writeback_args {
42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode;
49 * Work items for the bdi_writeback threads
52 struct list_head list; /* pending work list */
53 struct rcu_head rcu_head; /* for RCU free/clear of work */
55 unsigned long seen; /* threads that have seen this work */
56 atomic_t pending; /* number of threads still to do work */
58 struct wb_writeback_args args; /* writeback arguments */
60 unsigned long state; /* flag bits, see WS_* */
68 #define WS_USED (1 << WS_USED_B)
69 #define WS_ONSTACK (1 << WS_ONSTACK_B)
71 static inline bool bdi_work_on_stack(struct bdi_work *work)
73 return test_bit(WS_ONSTACK_B, &work->state);
76 static inline void bdi_work_init(struct bdi_work *work,
77 struct wb_writeback_args *args)
79 INIT_RCU_HEAD(&work->rcu_head);
81 work->state = WS_USED;
85 * writeback_in_progress - determine whether there is writeback in progress
86 * @bdi: the device's backing_dev_info structure.
88 * Determine whether there is writeback waiting to be handled against a
91 int writeback_in_progress(struct backing_dev_info *bdi)
93 return !list_empty(&bdi->work_list);
96 static void bdi_work_clear(struct bdi_work *work)
98 clear_bit(WS_USED_B, &work->state);
99 smp_mb__after_clear_bit();
100 wake_up_bit(&work->state, WS_USED_B);
103 static void bdi_work_free(struct rcu_head *head)
105 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
107 if (!bdi_work_on_stack(work))
110 bdi_work_clear(work);
113 static void wb_work_complete(struct bdi_work *work)
115 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
116 int onstack = bdi_work_on_stack(work);
119 * For allocated work, we can clear the done/seen bit right here.
120 * For on-stack work, we need to postpone both the clear and free
121 * to after the RCU grace period, since the stack could be invalidated
122 * as soon as bdi_work_clear() has done the wakeup.
125 bdi_work_clear(work);
126 if (sync_mode == WB_SYNC_NONE || onstack)
127 call_rcu(&work->rcu_head, bdi_work_free);
130 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
133 * The caller has retrieved the work arguments from this work,
134 * drop our reference. If this is the last ref, delete and free it
136 if (atomic_dec_and_test(&work->pending)) {
137 struct backing_dev_info *bdi = wb->bdi;
139 spin_lock(&bdi->wb_lock);
140 list_del_rcu(&work->list);
141 spin_unlock(&bdi->wb_lock);
143 wb_work_complete(work);
147 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
149 work->seen = bdi->wb_mask;
151 atomic_set(&work->pending, bdi->wb_cnt);
152 BUG_ON(!bdi->wb_cnt);
155 * list_add_tail_rcu() contains the necessary barriers to
156 * make sure the above stores are seen before the item is
157 * noticed on the list
159 spin_lock(&bdi->wb_lock);
160 list_add_tail_rcu(&work->list, &bdi->work_list);
161 spin_unlock(&bdi->wb_lock);
164 * If the default thread isn't there, make sure we add it. When
165 * it gets created and wakes up, we'll run this work.
167 if (unlikely(list_empty_careful(&bdi->wb_list)))
168 wake_up_process(default_backing_dev_info.wb.task);
170 struct bdi_writeback *wb = &bdi->wb;
173 * End work now if this wb has no dirty IO pending. Otherwise
174 * wakeup the handling thread
176 if (!wb_has_dirty_io(wb))
177 wb_clear_pending(wb, work);
179 wake_up_process(wb->task);
184 * Used for on-stack allocated work items. The caller needs to wait until
185 * the wb threads have acked the work before it's safe to continue.
187 static void bdi_wait_on_work_clear(struct bdi_work *work)
189 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
190 TASK_UNINTERRUPTIBLE);
193 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
194 struct wb_writeback_args *args)
196 struct bdi_work *work;
199 * This is WB_SYNC_NONE writeback, so if allocation fails just
200 * wakeup the thread for old dirty data writeback
202 work = kmalloc(sizeof(*work), GFP_ATOMIC);
204 bdi_work_init(work, args);
205 bdi_queue_work(bdi, work);
207 struct bdi_writeback *wb = &bdi->wb;
210 wake_up_process(wb->task);
215 * bdi_sync_writeback - start and wait for writeback
216 * @bdi: the backing device to write from
217 * @sb: write inodes from this super_block
220 * This does WB_SYNC_ALL data integrity writeback and waits for the
221 * IO to complete. Callers must hold the sb s_umount semaphore for
222 * reading, to avoid having the super disappear before we are done.
224 static void bdi_sync_writeback(struct backing_dev_info *bdi,
225 struct super_block *sb)
227 struct wb_writeback_args args = {
229 .sync_mode = WB_SYNC_ALL,
230 .nr_pages = LONG_MAX,
233 struct bdi_work work;
235 bdi_work_init(&work, &args);
236 work.state |= WS_ONSTACK;
238 bdi_queue_work(bdi, &work);
239 bdi_wait_on_work_clear(&work);
243 * bdi_start_writeback - start writeback
244 * @bdi: the backing device to write from
245 * @nr_pages: the number of pages to write
248 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
249 * started when this function returns, we make no guarentees on
250 * completion. Caller need not hold sb s_umount semaphore.
253 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
255 struct wb_writeback_args args = {
256 .sync_mode = WB_SYNC_NONE,
257 .nr_pages = nr_pages,
261 bdi_alloc_queue_work(bdi, &args);
265 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
266 * furthest end of its superblock's dirty-inode list.
268 * Before stamping the inode's ->dirtied_when, we check to see whether it is
269 * already the most-recently-dirtied inode on the b_dirty list. If that is
270 * the case then the inode must have been redirtied while it was being written
271 * out and we don't reset its dirtied_when.
273 static void redirty_tail(struct inode *inode)
275 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
277 if (!list_empty(&wb->b_dirty)) {
280 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
281 if (time_before(inode->dirtied_when, tail->dirtied_when))
282 inode->dirtied_when = jiffies;
284 list_move(&inode->i_list, &wb->b_dirty);
288 * requeue inode for re-scanning after bdi->b_io list is exhausted.
290 static void requeue_io(struct inode *inode)
292 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
294 list_move(&inode->i_list, &wb->b_more_io);
297 static void inode_sync_complete(struct inode *inode)
300 * Prevent speculative execution through spin_unlock(&inode_lock);
303 wake_up_bit(&inode->i_state, __I_SYNC);
306 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
308 bool ret = time_after(inode->dirtied_when, t);
311 * For inodes being constantly redirtied, dirtied_when can get stuck.
312 * It _appears_ to be in the future, but is actually in distant past.
313 * This test is necessary to prevent such wrapped-around relative times
314 * from permanently stopping the whole pdflush writeback.
316 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
322 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
324 static void move_expired_inodes(struct list_head *delaying_queue,
325 struct list_head *dispatch_queue,
326 unsigned long *older_than_this)
328 while (!list_empty(delaying_queue)) {
329 struct inode *inode = list_entry(delaying_queue->prev,
330 struct inode, i_list);
331 if (older_than_this &&
332 inode_dirtied_after(inode, *older_than_this))
334 list_move(&inode->i_list, dispatch_queue);
339 * Queue all expired dirty inodes for io, eldest first.
341 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
343 list_splice_init(&wb->b_more_io, wb->b_io.prev);
344 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
347 static int write_inode(struct inode *inode, int sync)
349 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
350 return inode->i_sb->s_op->write_inode(inode, sync);
355 * Wait for writeback on an inode to complete.
357 static void inode_wait_for_writeback(struct inode *inode)
359 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
360 wait_queue_head_t *wqh;
362 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
364 spin_unlock(&inode_lock);
365 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
366 spin_lock(&inode_lock);
367 } while (inode->i_state & I_SYNC);
371 * Write out an inode's dirty pages. Called under inode_lock. Either the
372 * caller has ref on the inode (either via __iget or via syscall against an fd)
373 * or the inode has I_WILL_FREE set (via generic_forget_inode)
375 * If `wait' is set, wait on the writeout.
377 * The whole writeout design is quite complex and fragile. We want to avoid
378 * starvation of particular inodes when others are being redirtied, prevent
381 * Called under inode_lock.
384 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
386 struct address_space *mapping = inode->i_mapping;
387 int wait = wbc->sync_mode == WB_SYNC_ALL;
391 if (!atomic_read(&inode->i_count))
392 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
394 WARN_ON(inode->i_state & I_WILL_FREE);
396 if (inode->i_state & I_SYNC) {
398 * If this inode is locked for writeback and we are not doing
399 * writeback-for-data-integrity, move it to b_more_io so that
400 * writeback can proceed with the other inodes on s_io.
402 * We'll have another go at writing back this inode when we
403 * completed a full scan of b_io.
411 * It's a data-integrity sync. We must wait.
413 inode_wait_for_writeback(inode);
416 BUG_ON(inode->i_state & I_SYNC);
418 /* Set I_SYNC, reset I_DIRTY */
419 dirty = inode->i_state & I_DIRTY;
420 inode->i_state |= I_SYNC;
421 inode->i_state &= ~I_DIRTY;
423 spin_unlock(&inode_lock);
425 ret = do_writepages(mapping, wbc);
427 /* Don't write the inode if only I_DIRTY_PAGES was set */
428 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
429 int err = write_inode(inode, wait);
435 int err = filemap_fdatawait(mapping);
440 spin_lock(&inode_lock);
441 inode->i_state &= ~I_SYNC;
442 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
443 if (!(inode->i_state & I_DIRTY) &&
444 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
446 * We didn't write back all the pages. nfs_writepages()
447 * sometimes bales out without doing anything. Redirty
448 * the inode; Move it from b_io onto b_more_io/b_dirty.
451 * akpm: if the caller was the kupdate function we put
452 * this inode at the head of b_dirty so it gets first
453 * consideration. Otherwise, move it to the tail, for
454 * the reasons described there. I'm not really sure
455 * how much sense this makes. Presumably I had a good
456 * reasons for doing it this way, and I'd rather not
457 * muck with it at present.
459 if (wbc->for_kupdate) {
461 * For the kupdate function we move the inode
462 * to b_more_io so it will get more writeout as
463 * soon as the queue becomes uncongested.
465 inode->i_state |= I_DIRTY_PAGES;
466 if (wbc->nr_to_write <= 0) {
468 * slice used up: queue for next turn
473 * somehow blocked: retry later
479 * Otherwise fully redirty the inode so that
480 * other inodes on this superblock will get some
481 * writeout. Otherwise heavy writing to one
482 * file would indefinitely suspend writeout of
483 * all the other files.
485 inode->i_state |= I_DIRTY_PAGES;
488 } else if (inode->i_state & I_DIRTY) {
490 * Someone redirtied the inode while were writing back
494 } else if (atomic_read(&inode->i_count)) {
496 * The inode is clean, inuse
498 list_move(&inode->i_list, &inode_in_use);
501 * The inode is clean, unused
503 list_move(&inode->i_list, &inode_unused);
506 inode_sync_complete(inode);
511 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
512 * before calling writeback. So make sure that we do pin it, so it doesn't
513 * go away while we are writing inodes from it.
515 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
518 static int pin_sb_for_writeback(struct writeback_control *wbc,
521 struct super_block *sb = inode->i_sb;
524 * Caller must already hold the ref for this
526 if (wbc->sync_mode == WB_SYNC_ALL) {
527 WARN_ON(!rwsem_is_locked(&sb->s_umount));
533 if (down_read_trylock(&sb->s_umount)) {
535 spin_unlock(&sb_lock);
539 * umounted, drop rwsem again and fall through to failure
541 up_read(&sb->s_umount);
545 spin_unlock(&sb_lock);
549 static void unpin_sb_for_writeback(struct writeback_control *wbc,
552 struct super_block *sb = inode->i_sb;
554 if (wbc->sync_mode == WB_SYNC_ALL)
557 up_read(&sb->s_umount);
561 static void writeback_inodes_wb(struct bdi_writeback *wb,
562 struct writeback_control *wbc)
564 struct super_block *sb = wbc->sb;
565 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
566 const unsigned long start = jiffies; /* livelock avoidance */
568 spin_lock(&inode_lock);
570 if (!wbc->for_kupdate || list_empty(&wb->b_io))
571 queue_io(wb, wbc->older_than_this);
573 while (!list_empty(&wb->b_io)) {
574 struct inode *inode = list_entry(wb->b_io.prev,
575 struct inode, i_list);
579 * super block given and doesn't match, skip this inode
581 if (sb && sb != inode->i_sb) {
586 if (!bdi_cap_writeback_dirty(wb->bdi)) {
590 * Dirty memory-backed blockdev: the ramdisk
591 * driver does this. Skip just this inode
596 * Dirty memory-backed inode against a filesystem other
597 * than the kernel-internal bdev filesystem. Skip the
603 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
608 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
609 wbc->encountered_congestion = 1;
611 break; /* Skip a congested fs */
613 continue; /* Skip a congested blockdev */
617 * Was this inode dirtied after sync_sb_inodes was called?
618 * This keeps sync from extra jobs and livelock.
620 if (inode_dirtied_after(inode, start))
623 if (pin_sb_for_writeback(wbc, inode)) {
628 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
630 pages_skipped = wbc->pages_skipped;
631 writeback_single_inode(inode, wbc);
632 unpin_sb_for_writeback(wbc, inode);
633 if (wbc->pages_skipped != pages_skipped) {
635 * writeback is not making progress due to locked
636 * buffers. Skip this inode for now.
640 spin_unlock(&inode_lock);
643 spin_lock(&inode_lock);
644 if (wbc->nr_to_write <= 0) {
648 if (!list_empty(&wb->b_more_io))
652 spin_unlock(&inode_lock);
653 /* Leave any unwritten inodes on b_io */
656 void writeback_inodes_wbc(struct writeback_control *wbc)
658 struct backing_dev_info *bdi = wbc->bdi;
660 writeback_inodes_wb(&bdi->wb, wbc);
664 * The maximum number of pages to writeout in a single bdi flush/kupdate
665 * operation. We do this so we don't hold I_SYNC against an inode for
666 * enormous amounts of time, which would block a userspace task which has
667 * been forced to throttle against that inode. Also, the code reevaluates
668 * the dirty each time it has written this many pages.
670 #define MAX_WRITEBACK_PAGES 1024
672 static inline bool over_bground_thresh(void)
674 unsigned long background_thresh, dirty_thresh;
676 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
678 return (global_page_state(NR_FILE_DIRTY) +
679 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
683 * Explicit flushing or periodic writeback of "old" data.
685 * Define "old": the first time one of an inode's pages is dirtied, we mark the
686 * dirtying-time in the inode's address_space. So this periodic writeback code
687 * just walks the superblock inode list, writing back any inodes which are
688 * older than a specific point in time.
690 * Try to run once per dirty_writeback_interval. But if a writeback event
691 * takes longer than a dirty_writeback_interval interval, then leave a
694 * older_than_this takes precedence over nr_to_write. So we'll only write back
695 * all dirty pages if they are all attached to "old" mappings.
697 static long wb_writeback(struct bdi_writeback *wb,
698 struct wb_writeback_args *args)
700 struct writeback_control wbc = {
703 .sync_mode = args->sync_mode,
704 .older_than_this = NULL,
705 .for_kupdate = args->for_kupdate,
706 .range_cyclic = args->range_cyclic,
708 unsigned long oldest_jif;
711 if (wbc.for_kupdate) {
712 wbc.older_than_this = &oldest_jif;
713 oldest_jif = jiffies -
714 msecs_to_jiffies(dirty_expire_interval * 10);
716 if (!wbc.range_cyclic) {
718 wbc.range_end = LLONG_MAX;
723 * Don't flush anything for non-integrity writeback where
724 * no nr_pages was given
726 if (!args->for_kupdate && args->nr_pages <= 0 &&
727 args->sync_mode == WB_SYNC_NONE)
731 * If no specific pages were given and this is just a
732 * periodic background writeout and we are below the
733 * background dirty threshold, don't do anything
735 if (args->for_kupdate && args->nr_pages <= 0 &&
736 !over_bground_thresh())
740 wbc.encountered_congestion = 0;
741 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
742 wbc.pages_skipped = 0;
743 writeback_inodes_wb(wb, &wbc);
744 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
745 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
748 * If we ran out of stuff to write, bail unless more_io got set
750 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
751 if (wbc.more_io && !wbc.for_kupdate)
761 * Return the next bdi_work struct that hasn't been processed by this
762 * wb thread yet. ->seen is initially set for each thread that exists
763 * for this device, when a thread first notices a piece of work it
764 * clears its bit. Depending on writeback type, the thread will notify
765 * completion on either receiving the work (WB_SYNC_NONE) or after
766 * it is done (WB_SYNC_ALL).
768 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
769 struct bdi_writeback *wb)
771 struct bdi_work *work, *ret = NULL;
775 list_for_each_entry_rcu(work, &bdi->work_list, list) {
776 if (!test_bit(wb->nr, &work->seen))
778 clear_bit(wb->nr, &work->seen);
788 static long wb_check_old_data_flush(struct bdi_writeback *wb)
790 unsigned long expired;
793 expired = wb->last_old_flush +
794 msecs_to_jiffies(dirty_writeback_interval * 10);
795 if (time_before(jiffies, expired))
798 wb->last_old_flush = jiffies;
799 nr_pages = global_page_state(NR_FILE_DIRTY) +
800 global_page_state(NR_UNSTABLE_NFS) +
801 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
804 struct wb_writeback_args args = {
805 .nr_pages = nr_pages,
806 .sync_mode = WB_SYNC_NONE,
811 return wb_writeback(wb, &args);
818 * Retrieve work items and do the writeback they describe
820 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
822 struct backing_dev_info *bdi = wb->bdi;
823 struct bdi_work *work;
826 while ((work = get_next_work_item(bdi, wb)) != NULL) {
827 struct wb_writeback_args args = work->args;
830 * Override sync mode, in case we must wait for completion
833 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
836 * If this isn't a data integrity operation, just notify
837 * that we have seen this work and we are now starting it.
839 if (args.sync_mode == WB_SYNC_NONE)
840 wb_clear_pending(wb, work);
842 wrote += wb_writeback(wb, &args);
845 * This is a data integrity writeback, so only do the
846 * notification when we have completed the work.
848 if (args.sync_mode == WB_SYNC_ALL)
849 wb_clear_pending(wb, work);
853 * Check for periodic writeback, kupdated() style
855 wrote += wb_check_old_data_flush(wb);
861 * Handle writeback of dirty data for the device backed by this bdi. Also
862 * wakes up periodically and does kupdated style flushing.
864 int bdi_writeback_task(struct bdi_writeback *wb)
866 unsigned long last_active = jiffies;
867 unsigned long wait_jiffies = -1UL;
870 while (!kthread_should_stop()) {
871 pages_written = wb_do_writeback(wb, 0);
874 last_active = jiffies;
875 else if (wait_jiffies != -1UL) {
876 unsigned long max_idle;
879 * Longest period of inactivity that we tolerate. If we
880 * see dirty data again later, the task will get
881 * recreated automatically.
883 max_idle = max(5UL * 60 * HZ, wait_jiffies);
884 if (time_after(jiffies, max_idle + last_active))
888 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
889 schedule_timeout_interruptible(wait_jiffies);
897 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
898 * writeback, for integrity writeback see bdi_sync_writeback().
900 static void bdi_writeback_all(struct super_block *sb, long nr_pages)
902 struct wb_writeback_args args = {
904 .nr_pages = nr_pages,
905 .sync_mode = WB_SYNC_NONE,
907 struct backing_dev_info *bdi;
911 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
912 if (!bdi_has_dirty_io(bdi))
915 bdi_alloc_queue_work(bdi, &args);
922 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
925 void wakeup_flusher_threads(long nr_pages)
928 nr_pages = global_page_state(NR_FILE_DIRTY) +
929 global_page_state(NR_UNSTABLE_NFS);
930 bdi_writeback_all(NULL, nr_pages);
933 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
935 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
936 struct dentry *dentry;
937 const char *name = "?";
939 dentry = d_find_alias(inode);
941 spin_lock(&dentry->d_lock);
942 name = (const char *) dentry->d_name.name;
945 "%s(%d): dirtied inode %lu (%s) on %s\n",
946 current->comm, task_pid_nr(current), inode->i_ino,
947 name, inode->i_sb->s_id);
949 spin_unlock(&dentry->d_lock);
956 * __mark_inode_dirty - internal function
957 * @inode: inode to mark
958 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
959 * Mark an inode as dirty. Callers should use mark_inode_dirty or
960 * mark_inode_dirty_sync.
962 * Put the inode on the super block's dirty list.
964 * CAREFUL! We mark it dirty unconditionally, but move it onto the
965 * dirty list only if it is hashed or if it refers to a blockdev.
966 * If it was not hashed, it will never be added to the dirty list
967 * even if it is later hashed, as it will have been marked dirty already.
969 * In short, make sure you hash any inodes _before_ you start marking
972 * This function *must* be atomic for the I_DIRTY_PAGES case -
973 * set_page_dirty() is called under spinlock in several places.
975 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
976 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
977 * the kernel-internal blockdev inode represents the dirtying time of the
978 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
979 * page->mapping->host, so the page-dirtying time is recorded in the internal
982 void __mark_inode_dirty(struct inode *inode, int flags)
984 struct super_block *sb = inode->i_sb;
987 * Don't do this for I_DIRTY_PAGES - that doesn't actually
988 * dirty the inode itself
990 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
991 if (sb->s_op->dirty_inode)
992 sb->s_op->dirty_inode(inode);
996 * make sure that changes are seen by all cpus before we test i_state
1001 /* avoid the locking if we can */
1002 if ((inode->i_state & flags) == flags)
1005 if (unlikely(block_dump))
1006 block_dump___mark_inode_dirty(inode);
1008 spin_lock(&inode_lock);
1009 if ((inode->i_state & flags) != flags) {
1010 const int was_dirty = inode->i_state & I_DIRTY;
1012 inode->i_state |= flags;
1015 * If the inode is being synced, just update its dirty state.
1016 * The unlocker will place the inode on the appropriate
1017 * superblock list, based upon its state.
1019 if (inode->i_state & I_SYNC)
1023 * Only add valid (hashed) inodes to the superblock's
1024 * dirty list. Add blockdev inodes as well.
1026 if (!S_ISBLK(inode->i_mode)) {
1027 if (hlist_unhashed(&inode->i_hash))
1030 if (inode->i_state & (I_FREEING|I_CLEAR))
1034 * If the inode was already on b_dirty/b_io/b_more_io, don't
1035 * reposition it (that would break b_dirty time-ordering).
1038 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1039 struct backing_dev_info *bdi = wb->bdi;
1041 if (bdi_cap_writeback_dirty(bdi) &&
1042 !test_bit(BDI_registered, &bdi->state)) {
1044 printk(KERN_ERR "bdi-%s not registered\n",
1048 inode->dirtied_when = jiffies;
1049 list_move(&inode->i_list, &wb->b_dirty);
1053 spin_unlock(&inode_lock);
1055 EXPORT_SYMBOL(__mark_inode_dirty);
1058 * Write out a superblock's list of dirty inodes. A wait will be performed
1059 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1061 * If older_than_this is non-NULL, then only write out inodes which
1062 * had their first dirtying at a time earlier than *older_than_this.
1064 * If we're a pdlfush thread, then implement pdflush collision avoidance
1065 * against the entire list.
1067 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1068 * This function assumes that the blockdev superblock's inodes are backed by
1069 * a variety of queues, so all inodes are searched. For other superblocks,
1070 * assume that all inodes are backed by the same queue.
1072 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1073 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1074 * on the writer throttling path, and we get decent balancing between many
1075 * throttled threads: we don't want them all piling up on inode_sync_wait.
1077 static void wait_sb_inodes(struct super_block *sb)
1079 struct inode *inode, *old_inode = NULL;
1082 * We need to be protected against the filesystem going from
1083 * r/o to r/w or vice versa.
1085 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1087 spin_lock(&inode_lock);
1090 * Data integrity sync. Must wait for all pages under writeback,
1091 * because there may have been pages dirtied before our sync
1092 * call, but which had writeout started before we write it out.
1093 * In which case, the inode may not be on the dirty list, but
1094 * we still have to wait for that writeout.
1096 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1097 struct address_space *mapping;
1099 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1101 mapping = inode->i_mapping;
1102 if (mapping->nrpages == 0)
1105 spin_unlock(&inode_lock);
1107 * We hold a reference to 'inode' so it couldn't have
1108 * been removed from s_inodes list while we dropped the
1109 * inode_lock. We cannot iput the inode now as we can
1110 * be holding the last reference and we cannot iput it
1111 * under inode_lock. So we keep the reference and iput
1117 filemap_fdatawait(mapping);
1121 spin_lock(&inode_lock);
1123 spin_unlock(&inode_lock);
1128 * writeback_inodes_sb - writeback dirty inodes from given super_block
1129 * @sb: the superblock
1131 * Start writeback on some inodes on this super_block. No guarantees are made
1132 * on how many (if any) will be written, and this function does not wait
1133 * for IO completion of submitted IO. The number of pages submitted is
1136 void writeback_inodes_sb(struct super_block *sb)
1138 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1139 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1142 nr_to_write = nr_dirty + nr_unstable +
1143 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1145 bdi_writeback_all(sb, nr_to_write);
1147 EXPORT_SYMBOL(writeback_inodes_sb);
1150 * sync_inodes_sb - sync sb inode pages
1151 * @sb: the superblock
1153 * This function writes and waits on any dirty inode belonging to this
1154 * super_block. The number of pages synced is returned.
1156 void sync_inodes_sb(struct super_block *sb)
1158 bdi_sync_writeback(sb->s_bdi, sb);
1161 EXPORT_SYMBOL(sync_inodes_sb);
1164 * write_inode_now - write an inode to disk
1165 * @inode: inode to write to disk
1166 * @sync: whether the write should be synchronous or not
1168 * This function commits an inode to disk immediately if it is dirty. This is
1169 * primarily needed by knfsd.
1171 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1173 int write_inode_now(struct inode *inode, int sync)
1176 struct writeback_control wbc = {
1177 .nr_to_write = LONG_MAX,
1178 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1180 .range_end = LLONG_MAX,
1183 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1184 wbc.nr_to_write = 0;
1187 spin_lock(&inode_lock);
1188 ret = writeback_single_inode(inode, &wbc);
1189 spin_unlock(&inode_lock);
1191 inode_sync_wait(inode);
1194 EXPORT_SYMBOL(write_inode_now);
1197 * sync_inode - write an inode and its pages to disk.
1198 * @inode: the inode to sync
1199 * @wbc: controls the writeback mode
1201 * sync_inode() will write an inode and its pages to disk. It will also
1202 * correctly update the inode on its superblock's dirty inode lists and will
1203 * update inode->i_state.
1205 * The caller must have a ref on the inode.
1207 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1211 spin_lock(&inode_lock);
1212 ret = writeback_single_inode(inode, wbc);
1213 spin_unlock(&inode_lock);
1216 EXPORT_SYMBOL(sync_inode);