2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_trans.h"
27 #include "xfs_inode_item.h"
28 #include "xfs_alloc.h"
29 #include "xfs_error.h"
30 #include "xfs_iomap.h"
31 #include "xfs_trace.h"
33 #include "xfs_bmap_util.h"
34 #include "xfs_bmap_btree.h"
35 #include <linux/aio.h>
36 #include <linux/gfp.h>
37 #include <linux/mpage.h>
38 #include <linux/pagevec.h>
39 #include <linux/writeback.h>
47 struct buffer_head *bh, *head;
49 *delalloc = *unwritten = 0;
51 bh = head = page_buffers(page);
53 if (buffer_unwritten(bh))
55 else if (buffer_delay(bh))
57 } while ((bh = bh->b_this_page) != head);
60 STATIC struct block_device *
61 xfs_find_bdev_for_inode(
64 struct xfs_inode *ip = XFS_I(inode);
65 struct xfs_mount *mp = ip->i_mount;
67 if (XFS_IS_REALTIME_INODE(ip))
68 return mp->m_rtdev_targp->bt_bdev;
70 return mp->m_ddev_targp->bt_bdev;
74 * We're now finished for good with this ioend structure.
75 * Update the page state via the associated buffer_heads,
76 * release holds on the inode and bio, and finally free
77 * up memory. Do not use the ioend after this.
83 struct buffer_head *bh, *next;
85 for (bh = ioend->io_buffer_head; bh; bh = next) {
87 bh->b_end_io(bh, !ioend->io_error);
90 mempool_free(ioend, xfs_ioend_pool);
94 * Fast and loose check if this write could update the on-disk inode size.
96 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
98 return ioend->io_offset + ioend->io_size >
99 XFS_I(ioend->io_inode)->i_d.di_size;
103 xfs_setfilesize_trans_alloc(
104 struct xfs_ioend *ioend)
106 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
107 struct xfs_trans *tp;
110 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
112 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
114 xfs_trans_cancel(tp, 0);
118 ioend->io_append_trans = tp;
121 * We may pass freeze protection with a transaction. So tell lockdep
124 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
127 * We hand off the transaction to the completion thread now, so
128 * clear the flag here.
130 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
135 * Update on-disk file size now that data has been written to disk.
139 struct xfs_ioend *ioend)
141 struct xfs_inode *ip = XFS_I(ioend->io_inode);
142 struct xfs_trans *tp = ioend->io_append_trans;
146 * The transaction may have been allocated in the I/O submission thread,
147 * thus we need to mark ourselves as beeing in a transaction manually.
148 * Similarly for freeze protection.
150 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
151 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
154 xfs_ilock(ip, XFS_ILOCK_EXCL);
155 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
157 xfs_iunlock(ip, XFS_ILOCK_EXCL);
158 xfs_trans_cancel(tp, 0);
162 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
164 ip->i_d.di_size = isize;
165 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
166 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
168 return xfs_trans_commit(tp, 0);
172 * Schedule IO completion handling on the final put of an ioend.
174 * If there is no work to do we might as well call it a day and free the
179 struct xfs_ioend *ioend)
181 if (atomic_dec_and_test(&ioend->io_remaining)) {
182 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
184 if (ioend->io_type == XFS_IO_UNWRITTEN)
185 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
186 else if (ioend->io_append_trans ||
187 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
188 queue_work(mp->m_data_workqueue, &ioend->io_work);
190 xfs_destroy_ioend(ioend);
195 * IO write completion.
199 struct work_struct *work)
201 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
202 struct xfs_inode *ip = XFS_I(ioend->io_inode);
205 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
206 ioend->io_error = -EIO;
213 * For unwritten extents we need to issue transactions to convert a
214 * range to normal written extens after the data I/O has finished.
216 if (ioend->io_type == XFS_IO_UNWRITTEN) {
217 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
219 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
221 * For direct I/O we do not know if we need to allocate blocks
222 * or not so we can't preallocate an append transaction as that
223 * results in nested reservations and log space deadlocks. Hence
224 * allocate the transaction here. While this is sub-optimal and
225 * can block IO completion for some time, we're stuck with doing
226 * it this way until we can pass the ioend to the direct IO
227 * allocation callbacks and avoid nesting that way.
229 error = xfs_setfilesize_trans_alloc(ioend);
232 error = xfs_setfilesize(ioend);
233 } else if (ioend->io_append_trans) {
234 error = xfs_setfilesize(ioend);
236 ASSERT(!xfs_ioend_is_append(ioend));
241 ioend->io_error = error;
242 xfs_destroy_ioend(ioend);
246 * Call IO completion handling in caller context on the final put of an ioend.
249 xfs_finish_ioend_sync(
250 struct xfs_ioend *ioend)
252 if (atomic_dec_and_test(&ioend->io_remaining))
253 xfs_end_io(&ioend->io_work);
257 * Allocate and initialise an IO completion structure.
258 * We need to track unwritten extent write completion here initially.
259 * We'll need to extend this for updating the ondisk inode size later
269 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
272 * Set the count to 1 initially, which will prevent an I/O
273 * completion callback from happening before we have started
274 * all the I/O from calling the completion routine too early.
276 atomic_set(&ioend->io_remaining, 1);
277 ioend->io_isdirect = 0;
279 ioend->io_list = NULL;
280 ioend->io_type = type;
281 ioend->io_inode = inode;
282 ioend->io_buffer_head = NULL;
283 ioend->io_buffer_tail = NULL;
284 ioend->io_offset = 0;
286 ioend->io_append_trans = NULL;
288 INIT_WORK(&ioend->io_work, xfs_end_io);
296 struct xfs_bmbt_irec *imap,
300 struct xfs_inode *ip = XFS_I(inode);
301 struct xfs_mount *mp = ip->i_mount;
302 ssize_t count = 1 << inode->i_blkbits;
303 xfs_fileoff_t offset_fsb, end_fsb;
305 int bmapi_flags = XFS_BMAPI_ENTIRE;
308 if (XFS_FORCED_SHUTDOWN(mp))
311 if (type == XFS_IO_UNWRITTEN)
312 bmapi_flags |= XFS_BMAPI_IGSTATE;
314 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
317 xfs_ilock(ip, XFS_ILOCK_SHARED);
320 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
321 (ip->i_df.if_flags & XFS_IFEXTENTS));
322 ASSERT(offset <= mp->m_super->s_maxbytes);
324 if (offset + count > mp->m_super->s_maxbytes)
325 count = mp->m_super->s_maxbytes - offset;
326 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
327 offset_fsb = XFS_B_TO_FSBT(mp, offset);
328 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
329 imap, &nimaps, bmapi_flags);
330 xfs_iunlock(ip, XFS_ILOCK_SHARED);
335 if (type == XFS_IO_DELALLOC &&
336 (!nimaps || isnullstartblock(imap->br_startblock))) {
337 error = xfs_iomap_write_allocate(ip, offset, imap);
339 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
344 if (type == XFS_IO_UNWRITTEN) {
346 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
347 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
351 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
358 struct xfs_bmbt_irec *imap,
361 offset >>= inode->i_blkbits;
363 return offset >= imap->br_startoff &&
364 offset < imap->br_startoff + imap->br_blockcount;
368 * BIO completion handler for buffered IO.
375 xfs_ioend_t *ioend = bio->bi_private;
377 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
378 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
380 /* Toss bio and pass work off to an xfsdatad thread */
381 bio->bi_private = NULL;
382 bio->bi_end_io = NULL;
385 xfs_finish_ioend(ioend);
389 xfs_submit_ioend_bio(
390 struct writeback_control *wbc,
394 atomic_inc(&ioend->io_remaining);
395 bio->bi_private = ioend;
396 bio->bi_end_io = xfs_end_bio;
397 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
402 struct buffer_head *bh)
404 int nvecs = bio_get_nr_vecs(bh->b_bdev);
405 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
407 ASSERT(bio->bi_private == NULL);
408 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
409 bio->bi_bdev = bh->b_bdev;
414 xfs_start_buffer_writeback(
415 struct buffer_head *bh)
417 ASSERT(buffer_mapped(bh));
418 ASSERT(buffer_locked(bh));
419 ASSERT(!buffer_delay(bh));
420 ASSERT(!buffer_unwritten(bh));
422 mark_buffer_async_write(bh);
423 set_buffer_uptodate(bh);
424 clear_buffer_dirty(bh);
428 xfs_start_page_writeback(
433 ASSERT(PageLocked(page));
434 ASSERT(!PageWriteback(page));
437 * if the page was not fully cleaned, we need to ensure that the higher
438 * layers come back to it correctly. That means we need to keep the page
439 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
440 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
441 * write this page in this writeback sweep will be made.
444 clear_page_dirty_for_io(page);
445 set_page_writeback(page);
447 set_page_writeback_keepwrite(page);
451 /* If no buffers on the page are to be written, finish it here */
453 end_page_writeback(page);
456 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
458 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
462 * Submit all of the bios for all of the ioends we have saved up, covering the
463 * initial writepage page and also any probed pages.
465 * Because we may have multiple ioends spanning a page, we need to start
466 * writeback on all the buffers before we submit them for I/O. If we mark the
467 * buffers as we got, then we can end up with a page that only has buffers
468 * marked async write and I/O complete on can occur before we mark the other
469 * buffers async write.
471 * The end result of this is that we trip a bug in end_page_writeback() because
472 * we call it twice for the one page as the code in end_buffer_async_write()
473 * assumes that all buffers on the page are started at the same time.
475 * The fix is two passes across the ioend list - one to start writeback on the
476 * buffer_heads, and then submit them for I/O on the second pass.
478 * If @fail is non-zero, it means that we have a situation where some part of
479 * the submission process has failed after we have marked paged for writeback
480 * and unlocked them. In this situation, we need to fail the ioend chain rather
481 * than submit it to IO. This typically only happens on a filesystem shutdown.
485 struct writeback_control *wbc,
489 xfs_ioend_t *head = ioend;
491 struct buffer_head *bh;
493 sector_t lastblock = 0;
495 /* Pass 1 - start writeback */
497 next = ioend->io_list;
498 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
499 xfs_start_buffer_writeback(bh);
500 } while ((ioend = next) != NULL);
502 /* Pass 2 - submit I/O */
505 next = ioend->io_list;
509 * If we are failing the IO now, just mark the ioend with an
510 * error and finish it. This will run IO completion immediately
511 * as there is only one reference to the ioend at this point in
515 ioend->io_error = fail;
516 xfs_finish_ioend(ioend);
520 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
524 bio = xfs_alloc_ioend_bio(bh);
525 } else if (bh->b_blocknr != lastblock + 1) {
526 xfs_submit_ioend_bio(wbc, ioend, bio);
530 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
531 xfs_submit_ioend_bio(wbc, ioend, bio);
535 lastblock = bh->b_blocknr;
538 xfs_submit_ioend_bio(wbc, ioend, bio);
539 xfs_finish_ioend(ioend);
540 } while ((ioend = next) != NULL);
544 * Cancel submission of all buffer_heads so far in this endio.
545 * Toss the endio too. Only ever called for the initial page
546 * in a writepage request, so only ever one page.
553 struct buffer_head *bh, *next_bh;
556 next = ioend->io_list;
557 bh = ioend->io_buffer_head;
559 next_bh = bh->b_private;
560 clear_buffer_async_write(bh);
562 * The unwritten flag is cleared when added to the
563 * ioend. We're not submitting for I/O so mark the
564 * buffer unwritten again for next time around.
566 if (ioend->io_type == XFS_IO_UNWRITTEN)
567 set_buffer_unwritten(bh);
569 } while ((bh = next_bh) != NULL);
571 mempool_free(ioend, xfs_ioend_pool);
572 } while ((ioend = next) != NULL);
576 * Test to see if we've been building up a completion structure for
577 * earlier buffers -- if so, we try to append to this ioend if we
578 * can, otherwise we finish off any current ioend and start another.
579 * Return true if we've finished the given ioend.
584 struct buffer_head *bh,
587 xfs_ioend_t **result,
590 xfs_ioend_t *ioend = *result;
592 if (!ioend || need_ioend || type != ioend->io_type) {
593 xfs_ioend_t *previous = *result;
595 ioend = xfs_alloc_ioend(inode, type);
596 ioend->io_offset = offset;
597 ioend->io_buffer_head = bh;
598 ioend->io_buffer_tail = bh;
600 previous->io_list = ioend;
603 ioend->io_buffer_tail->b_private = bh;
604 ioend->io_buffer_tail = bh;
607 bh->b_private = NULL;
608 ioend->io_size += bh->b_size;
614 struct buffer_head *bh,
615 struct xfs_bmbt_irec *imap,
619 struct xfs_mount *m = XFS_I(inode)->i_mount;
620 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
621 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
623 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
624 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
626 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
627 ((offset - iomap_offset) >> inode->i_blkbits);
629 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
632 set_buffer_mapped(bh);
638 struct buffer_head *bh,
639 struct xfs_bmbt_irec *imap,
642 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
643 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
645 xfs_map_buffer(inode, bh, imap, offset);
646 set_buffer_mapped(bh);
647 clear_buffer_delay(bh);
648 clear_buffer_unwritten(bh);
652 * Test if a given page contains at least one buffer of a given @type.
653 * If @check_all_buffers is true, then we walk all the buffers in the page to
654 * try to find one of the type passed in. If it is not set, then the caller only
655 * needs to check the first buffer on the page for a match.
661 bool check_all_buffers)
663 struct buffer_head *bh;
664 struct buffer_head *head;
666 if (PageWriteback(page))
670 if (!page_has_buffers(page))
673 bh = head = page_buffers(page);
675 if (buffer_unwritten(bh)) {
676 if (type == XFS_IO_UNWRITTEN)
678 } else if (buffer_delay(bh)) {
679 if (type == XFS_IO_DELALLOC)
681 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
682 if (type == XFS_IO_OVERWRITE)
686 /* If we are only checking the first buffer, we are done now. */
687 if (!check_all_buffers)
689 } while ((bh = bh->b_this_page) != head);
695 * Allocate & map buffers for page given the extent map. Write it out.
696 * except for the original page of a writepage, this is called on
697 * delalloc/unwritten pages only, for the original page it is possible
698 * that the page has no mapping at all.
705 struct xfs_bmbt_irec *imap,
706 xfs_ioend_t **ioendp,
707 struct writeback_control *wbc)
709 struct buffer_head *bh, *head;
710 xfs_off_t end_offset;
711 unsigned long p_offset;
714 int count = 0, done = 0, uptodate = 1;
715 xfs_off_t offset = page_offset(page);
717 if (page->index != tindex)
719 if (!trylock_page(page))
721 if (PageWriteback(page))
722 goto fail_unlock_page;
723 if (page->mapping != inode->i_mapping)
724 goto fail_unlock_page;
725 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
726 goto fail_unlock_page;
729 * page_dirty is initially a count of buffers on the page before
730 * EOF and is decremented as we move each into a cleanable state.
734 * End offset is the highest offset that this page should represent.
735 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
736 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
737 * hence give us the correct page_dirty count. On any other page,
738 * it will be zero and in that case we need page_dirty to be the
739 * count of buffers on the page.
741 end_offset = min_t(unsigned long long,
742 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
746 * If the current map does not span the entire page we are about to try
747 * to write, then give up. The only way we can write a page that spans
748 * multiple mappings in a single writeback iteration is via the
749 * xfs_vm_writepage() function. Data integrity writeback requires the
750 * entire page to be written in a single attempt, otherwise the part of
751 * the page we don't write here doesn't get written as part of the data
754 * For normal writeback, we also don't attempt to write partial pages
755 * here as it simply means that write_cache_pages() will see it under
756 * writeback and ignore the page until some point in the future, at
757 * which time this will be the only page in the file that needs
758 * writeback. Hence for more optimal IO patterns, we should always
759 * avoid partial page writeback due to multiple mappings on a page here.
761 if (!xfs_imap_valid(inode, imap, end_offset))
762 goto fail_unlock_page;
764 len = 1 << inode->i_blkbits;
765 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
767 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
768 page_dirty = p_offset / len;
771 * The moment we find a buffer that doesn't match our current type
772 * specification or can't be written, abort the loop and start
773 * writeback. As per the above xfs_imap_valid() check, only
774 * xfs_vm_writepage() can handle partial page writeback fully - we are
775 * limited here to the buffers that are contiguous with the current
776 * ioend, and hence a buffer we can't write breaks that contiguity and
777 * we have to defer the rest of the IO to xfs_vm_writepage().
779 bh = head = page_buffers(page);
781 if (offset >= end_offset)
783 if (!buffer_uptodate(bh))
785 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
790 if (buffer_unwritten(bh) || buffer_delay(bh) ||
792 if (buffer_unwritten(bh))
793 type = XFS_IO_UNWRITTEN;
794 else if (buffer_delay(bh))
795 type = XFS_IO_DELALLOC;
797 type = XFS_IO_OVERWRITE;
800 * imap should always be valid because of the above
801 * partial page end_offset check on the imap.
803 ASSERT(xfs_imap_valid(inode, imap, offset));
806 if (type != XFS_IO_OVERWRITE)
807 xfs_map_at_offset(inode, bh, imap, offset);
808 xfs_add_to_ioend(inode, bh, offset, type,
817 } while (offset += len, (bh = bh->b_this_page) != head);
819 if (uptodate && bh == head)
820 SetPageUptodate(page);
823 if (--wbc->nr_to_write <= 0 &&
824 wbc->sync_mode == WB_SYNC_NONE)
827 xfs_start_page_writeback(page, !page_dirty, count);
837 * Convert & write out a cluster of pages in the same extent as defined
838 * by mp and following the start page.
844 struct xfs_bmbt_irec *imap,
845 xfs_ioend_t **ioendp,
846 struct writeback_control *wbc,
852 pagevec_init(&pvec, 0);
853 while (!done && tindex <= tlast) {
854 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
856 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
859 for (i = 0; i < pagevec_count(&pvec); i++) {
860 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
866 pagevec_release(&pvec);
872 xfs_vm_invalidatepage(
877 trace_xfs_invalidatepage(page->mapping->host, page, offset,
879 block_invalidatepage(page, offset, length);
883 * If the page has delalloc buffers on it, we need to punch them out before we
884 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
885 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
886 * is done on that same region - the delalloc extent is returned when none is
887 * supposed to be there.
889 * We prevent this by truncating away the delalloc regions on the page before
890 * invalidating it. Because they are delalloc, we can do this without needing a
891 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
892 * truncation without a transaction as there is no space left for block
893 * reservation (typically why we see a ENOSPC in writeback).
895 * This is not a performance critical path, so for now just do the punching a
896 * buffer head at a time.
899 xfs_aops_discard_page(
902 struct inode *inode = page->mapping->host;
903 struct xfs_inode *ip = XFS_I(inode);
904 struct buffer_head *bh, *head;
905 loff_t offset = page_offset(page);
907 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
910 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
913 xfs_alert(ip->i_mount,
914 "page discard on page %p, inode 0x%llx, offset %llu.",
915 page, ip->i_ino, offset);
917 xfs_ilock(ip, XFS_ILOCK_EXCL);
918 bh = head = page_buffers(page);
921 xfs_fileoff_t start_fsb;
923 if (!buffer_delay(bh))
926 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
927 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
929 /* something screwed, just bail */
930 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
931 xfs_alert(ip->i_mount,
932 "page discard unable to remove delalloc mapping.");
937 offset += 1 << inode->i_blkbits;
939 } while ((bh = bh->b_this_page) != head);
941 xfs_iunlock(ip, XFS_ILOCK_EXCL);
943 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
948 * Write out a dirty page.
950 * For delalloc space on the page we need to allocate space and flush it.
951 * For unwritten space on the page we need to start the conversion to
952 * regular allocated space.
953 * For any other dirty buffer heads on the page we should flush them.
958 struct writeback_control *wbc)
960 struct inode *inode = page->mapping->host;
961 struct buffer_head *bh, *head;
962 struct xfs_bmbt_irec imap;
963 xfs_ioend_t *ioend = NULL, *iohead = NULL;
966 __uint64_t end_offset;
967 pgoff_t end_index, last_index;
969 int err, imap_valid = 0, uptodate = 1;
973 trace_xfs_writepage(inode, page, 0, 0);
975 ASSERT(page_has_buffers(page));
978 * Refuse to write the page out if we are called from reclaim context.
980 * This avoids stack overflows when called from deeply used stacks in
981 * random callers for direct reclaim or memcg reclaim. We explicitly
982 * allow reclaim from kswapd as the stack usage there is relatively low.
984 * This should never happen except in the case of a VM regression so
987 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
992 * Given that we do not allow direct reclaim to call us, we should
993 * never be called while in a filesystem transaction.
995 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
998 /* Is this page beyond the end of the file? */
999 offset = i_size_read(inode);
1000 end_index = offset >> PAGE_CACHE_SHIFT;
1001 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1004 * The page index is less than the end_index, adjust the end_offset
1005 * to the highest offset that this page should represent.
1006 * -----------------------------------------------------
1007 * | file mapping | <EOF> |
1008 * -----------------------------------------------------
1009 * | Page ... | Page N-2 | Page N-1 | Page N | |
1010 * ^--------------------------------^----------|--------
1011 * | desired writeback range | see else |
1012 * ---------------------------------^------------------|
1014 if (page->index < end_index)
1015 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
1018 * Check whether the page to write out is beyond or straddles
1020 * -------------------------------------------------------
1021 * | file mapping | <EOF> |
1022 * -------------------------------------------------------
1023 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1024 * ^--------------------------------^-----------|---------
1026 * ---------------------------------^-----------|--------|
1028 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
1031 * Skip the page if it is fully outside i_size, e.g. due to a
1032 * truncate operation that is in progress. We must redirty the
1033 * page so that reclaim stops reclaiming it. Otherwise
1034 * xfs_vm_releasepage() is called on it and gets confused.
1036 * Note that the end_index is unsigned long, it would overflow
1037 * if the given offset is greater than 16TB on 32-bit system
1038 * and if we do check the page is fully outside i_size or not
1039 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1040 * will be evaluated to 0. Hence this page will be redirtied
1041 * and be written out repeatedly which would result in an
1042 * infinite loop, the user program that perform this operation
1043 * will hang. Instead, we can verify this situation by checking
1044 * if the page to write is totally beyond the i_size or if it's
1045 * offset is just equal to the EOF.
1047 if (page->index > end_index ||
1048 (page->index == end_index && offset_into_page == 0))
1052 * The page straddles i_size. It must be zeroed out on each
1053 * and every writepage invocation because it may be mmapped.
1054 * "A file is mapped in multiples of the page size. For a file
1055 * that is not a multiple of the page size, the remaining
1056 * memory is zeroed when mapped, and writes to that region are
1057 * not written out to the file."
1059 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1061 /* Adjust the end_offset to the end of file */
1062 end_offset = offset;
1065 len = 1 << inode->i_blkbits;
1067 bh = head = page_buffers(page);
1068 offset = page_offset(page);
1069 type = XFS_IO_OVERWRITE;
1071 if (wbc->sync_mode == WB_SYNC_NONE)
1077 if (offset >= end_offset)
1079 if (!buffer_uptodate(bh))
1083 * set_page_dirty dirties all buffers in a page, independent
1084 * of their state. The dirty state however is entirely
1085 * meaningless for holes (!mapped && uptodate), so skip
1086 * buffers covering holes here.
1088 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1093 if (buffer_unwritten(bh)) {
1094 if (type != XFS_IO_UNWRITTEN) {
1095 type = XFS_IO_UNWRITTEN;
1098 } else if (buffer_delay(bh)) {
1099 if (type != XFS_IO_DELALLOC) {
1100 type = XFS_IO_DELALLOC;
1103 } else if (buffer_uptodate(bh)) {
1104 if (type != XFS_IO_OVERWRITE) {
1105 type = XFS_IO_OVERWRITE;
1109 if (PageUptodate(page))
1110 ASSERT(buffer_mapped(bh));
1112 * This buffer is not uptodate and will not be
1113 * written to disk. Ensure that we will put any
1114 * subsequent writeable buffers into a new
1122 imap_valid = xfs_imap_valid(inode, &imap, offset);
1125 * If we didn't have a valid mapping then we need to
1126 * put the new mapping into a separate ioend structure.
1127 * This ensures non-contiguous extents always have
1128 * separate ioends, which is particularly important
1129 * for unwritten extent conversion at I/O completion
1133 err = xfs_map_blocks(inode, offset, &imap, type,
1137 imap_valid = xfs_imap_valid(inode, &imap, offset);
1141 if (type != XFS_IO_OVERWRITE)
1142 xfs_map_at_offset(inode, bh, &imap, offset);
1143 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1151 } while (offset += len, ((bh = bh->b_this_page) != head));
1153 if (uptodate && bh == head)
1154 SetPageUptodate(page);
1156 xfs_start_page_writeback(page, 1, count);
1158 /* if there is no IO to be submitted for this page, we are done */
1165 * Any errors from this point onwards need tobe reported through the IO
1166 * completion path as we have marked the initial page as under writeback
1170 xfs_off_t end_index;
1172 end_index = imap.br_startoff + imap.br_blockcount;
1175 end_index <<= inode->i_blkbits;
1178 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1180 /* check against file size */
1181 if (end_index > last_index)
1182 end_index = last_index;
1184 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1190 * Reserve log space if we might write beyond the on-disk inode size.
1193 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1194 err = xfs_setfilesize_trans_alloc(ioend);
1196 xfs_submit_ioend(wbc, iohead, err);
1202 xfs_cancel_ioend(iohead);
1207 xfs_aops_discard_page(page);
1208 ClearPageUptodate(page);
1213 redirty_page_for_writepage(wbc, page);
1220 struct address_space *mapping,
1221 struct writeback_control *wbc)
1223 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1224 return generic_writepages(mapping, wbc);
1228 * Called to move a page into cleanable state - and from there
1229 * to be released. The page should already be clean. We always
1230 * have buffer heads in this call.
1232 * Returns 1 if the page is ok to release, 0 otherwise.
1239 int delalloc, unwritten;
1241 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1243 xfs_count_page_state(page, &delalloc, &unwritten);
1245 if (WARN_ON_ONCE(delalloc))
1247 if (WARN_ON_ONCE(unwritten))
1250 return try_to_free_buffers(page);
1255 struct inode *inode,
1257 struct buffer_head *bh_result,
1261 struct xfs_inode *ip = XFS_I(inode);
1262 struct xfs_mount *mp = ip->i_mount;
1263 xfs_fileoff_t offset_fsb, end_fsb;
1266 struct xfs_bmbt_irec imap;
1272 if (XFS_FORCED_SHUTDOWN(mp))
1275 offset = (xfs_off_t)iblock << inode->i_blkbits;
1276 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1277 size = bh_result->b_size;
1279 if (!create && direct && offset >= i_size_read(inode))
1283 * Direct I/O is usually done on preallocated files, so try getting
1284 * a block mapping without an exclusive lock first. For buffered
1285 * writes we already have the exclusive iolock anyway, so avoiding
1286 * a lock roundtrip here by taking the ilock exclusive from the
1287 * beginning is a useful micro optimization.
1289 if (create && !direct) {
1290 lockmode = XFS_ILOCK_EXCL;
1291 xfs_ilock(ip, lockmode);
1293 lockmode = xfs_ilock_data_map_shared(ip);
1296 ASSERT(offset <= mp->m_super->s_maxbytes);
1297 if (offset + size > mp->m_super->s_maxbytes)
1298 size = mp->m_super->s_maxbytes - offset;
1299 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1300 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1302 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1303 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1309 (imap.br_startblock == HOLESTARTBLOCK ||
1310 imap.br_startblock == DELAYSTARTBLOCK))) {
1311 if (direct || xfs_get_extsz_hint(ip)) {
1313 * Drop the ilock in preparation for starting the block
1314 * allocation transaction. It will be retaken
1315 * exclusively inside xfs_iomap_write_direct for the
1316 * actual allocation.
1318 xfs_iunlock(ip, lockmode);
1319 error = xfs_iomap_write_direct(ip, offset, size,
1326 * Delalloc reservations do not require a transaction,
1327 * we can go on without dropping the lock here. If we
1328 * are allocating a new delalloc block, make sure that
1329 * we set the new flag so that we mark the buffer new so
1330 * that we know that it is newly allocated if the write
1333 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1335 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1339 xfs_iunlock(ip, lockmode);
1342 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1343 } else if (nimaps) {
1344 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1345 xfs_iunlock(ip, lockmode);
1347 trace_xfs_get_blocks_notfound(ip, offset, size);
1351 if (imap.br_startblock != HOLESTARTBLOCK &&
1352 imap.br_startblock != DELAYSTARTBLOCK) {
1354 * For unwritten extents do not report a disk address on
1355 * the read case (treat as if we're reading into a hole).
1357 if (create || !ISUNWRITTEN(&imap))
1358 xfs_map_buffer(inode, bh_result, &imap, offset);
1359 if (create && ISUNWRITTEN(&imap)) {
1361 bh_result->b_private = inode;
1362 set_buffer_defer_completion(bh_result);
1364 set_buffer_unwritten(bh_result);
1369 * If this is a realtime file, data may be on a different device.
1370 * to that pointed to from the buffer_head b_bdev currently.
1372 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1375 * If we previously allocated a block out beyond eof and we are now
1376 * coming back to use it then we will need to flag it as new even if it
1377 * has a disk address.
1379 * With sub-block writes into unwritten extents we also need to mark
1380 * the buffer as new so that the unwritten parts of the buffer gets
1384 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1385 (offset >= i_size_read(inode)) ||
1386 (new || ISUNWRITTEN(&imap))))
1387 set_buffer_new(bh_result);
1389 if (imap.br_startblock == DELAYSTARTBLOCK) {
1392 set_buffer_uptodate(bh_result);
1393 set_buffer_mapped(bh_result);
1394 set_buffer_delay(bh_result);
1399 * If this is O_DIRECT or the mpage code calling tell them how large
1400 * the mapping is, so that we can avoid repeated get_blocks calls.
1402 * If the mapping spans EOF, then we have to break the mapping up as the
1403 * mapping for blocks beyond EOF must be marked new so that sub block
1404 * regions can be correctly zeroed. We can't do this for mappings within
1405 * EOF unless the mapping was just allocated or is unwritten, otherwise
1406 * the callers would overwrite existing data with zeros. Hence we have
1407 * to split the mapping into a range up to and including EOF, and a
1408 * second mapping for beyond EOF.
1410 if (direct || size > (1 << inode->i_blkbits)) {
1411 xfs_off_t mapping_size;
1413 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1414 mapping_size <<= inode->i_blkbits;
1416 ASSERT(mapping_size > 0);
1417 if (mapping_size > size)
1418 mapping_size = size;
1419 if (offset < i_size_read(inode) &&
1420 offset + mapping_size >= i_size_read(inode)) {
1421 /* limit mapping to block that spans EOF */
1422 mapping_size = roundup_64(i_size_read(inode) - offset,
1423 1 << inode->i_blkbits);
1425 if (mapping_size > LONG_MAX)
1426 mapping_size = LONG_MAX;
1428 bh_result->b_size = mapping_size;
1434 xfs_iunlock(ip, lockmode);
1440 struct inode *inode,
1442 struct buffer_head *bh_result,
1445 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1449 xfs_get_blocks_direct(
1450 struct inode *inode,
1452 struct buffer_head *bh_result,
1455 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1459 * Complete a direct I/O write request.
1461 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1462 * need to issue a transaction to convert the range from unwritten to written
1463 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1464 * to do this and we are done. But in case this was a successful AIO
1465 * request this handler is called from interrupt context, from which we
1466 * can't start transactions. In that case offload the I/O completion to
1467 * the workqueues we also use for buffered I/O completion.
1470 xfs_end_io_direct_write(
1476 struct xfs_ioend *ioend = iocb->private;
1479 * While the generic direct I/O code updates the inode size, it does
1480 * so only after the end_io handler is called, which means our
1481 * end_io handler thinks the on-disk size is outside the in-core
1482 * size. To prevent this just update it a little bit earlier here.
1484 if (offset + size > i_size_read(ioend->io_inode))
1485 i_size_write(ioend->io_inode, offset + size);
1488 * blockdev_direct_IO can return an error even after the I/O
1489 * completion handler was called. Thus we need to protect
1490 * against double-freeing.
1492 iocb->private = NULL;
1494 ioend->io_offset = offset;
1495 ioend->io_size = size;
1496 if (private && size > 0)
1497 ioend->io_type = XFS_IO_UNWRITTEN;
1499 xfs_finish_ioend_sync(ioend);
1506 struct iov_iter *iter,
1509 struct inode *inode = iocb->ki_filp->f_mapping->host;
1510 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1511 struct xfs_ioend *ioend = NULL;
1515 size_t size = iov_iter_count(iter);
1518 * We cannot preallocate a size update transaction here as we
1519 * don't know whether allocation is necessary or not. Hence we
1520 * can only tell IO completion that one is necessary if we are
1521 * not doing unwritten extent conversion.
1523 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1524 if (offset + size > XFS_I(inode)->i_d.di_size)
1525 ioend->io_isdirect = 1;
1527 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1528 offset, xfs_get_blocks_direct,
1529 xfs_end_io_direct_write, NULL,
1531 if (ret != -EIOCBQUEUED && iocb->private)
1532 goto out_destroy_ioend;
1534 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
1535 offset, xfs_get_blocks_direct,
1542 xfs_destroy_ioend(ioend);
1547 * Punch out the delalloc blocks we have already allocated.
1549 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1550 * as the page is still locked at this point.
1553 xfs_vm_kill_delalloc_range(
1554 struct inode *inode,
1558 struct xfs_inode *ip = XFS_I(inode);
1559 xfs_fileoff_t start_fsb;
1560 xfs_fileoff_t end_fsb;
1563 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1564 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1565 if (end_fsb <= start_fsb)
1568 xfs_ilock(ip, XFS_ILOCK_EXCL);
1569 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1570 end_fsb - start_fsb);
1572 /* something screwed, just bail */
1573 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1574 xfs_alert(ip->i_mount,
1575 "xfs_vm_write_failed: unable to clean up ino %lld",
1579 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1583 xfs_vm_write_failed(
1584 struct inode *inode,
1589 loff_t block_offset;
1592 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1593 loff_t to = from + len;
1594 struct buffer_head *bh, *head;
1597 * The request pos offset might be 32 or 64 bit, this is all fine
1598 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1599 * platform, the high 32-bit will be masked off if we evaluate the
1600 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1601 * 0xfffff000 as an unsigned long, hence the result is incorrect
1602 * which could cause the following ASSERT failed in most cases.
1603 * In order to avoid this, we can evaluate the block_offset of the
1604 * start of the page by using shifts rather than masks the mismatch
1607 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1609 ASSERT(block_offset + from == pos);
1611 head = page_buffers(page);
1613 for (bh = head; bh != head || !block_start;
1614 bh = bh->b_this_page, block_start = block_end,
1615 block_offset += bh->b_size) {
1616 block_end = block_start + bh->b_size;
1618 /* skip buffers before the write */
1619 if (block_end <= from)
1622 /* if the buffer is after the write, we're done */
1623 if (block_start >= to)
1626 if (!buffer_delay(bh))
1629 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1632 xfs_vm_kill_delalloc_range(inode, block_offset,
1633 block_offset + bh->b_size);
1636 * This buffer does not contain data anymore. make sure anyone
1637 * who finds it knows that for certain.
1639 clear_buffer_delay(bh);
1640 clear_buffer_uptodate(bh);
1641 clear_buffer_mapped(bh);
1642 clear_buffer_new(bh);
1643 clear_buffer_dirty(bh);
1649 * This used to call block_write_begin(), but it unlocks and releases the page
1650 * on error, and we need that page to be able to punch stale delalloc blocks out
1651 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1652 * the appropriate point.
1657 struct address_space *mapping,
1661 struct page **pagep,
1664 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1668 ASSERT(len <= PAGE_CACHE_SIZE);
1670 page = grab_cache_page_write_begin(mapping, index, flags);
1674 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1675 if (unlikely(status)) {
1676 struct inode *inode = mapping->host;
1677 size_t isize = i_size_read(inode);
1679 xfs_vm_write_failed(inode, page, pos, len);
1683 * If the write is beyond EOF, we only want to kill blocks
1684 * allocated in this write, not blocks that were previously
1685 * written successfully.
1687 if (pos + len > isize) {
1688 ssize_t start = max_t(ssize_t, pos, isize);
1690 truncate_pagecache_range(inode, start, pos + len);
1693 page_cache_release(page);
1702 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1703 * this specific write because they will never be written. Previous writes
1704 * beyond EOF where block allocation succeeded do not need to be trashed, so
1705 * only new blocks from this write should be trashed. For blocks within
1706 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1707 * written with all the other valid data.
1712 struct address_space *mapping,
1721 ASSERT(len <= PAGE_CACHE_SIZE);
1723 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1724 if (unlikely(ret < len)) {
1725 struct inode *inode = mapping->host;
1726 size_t isize = i_size_read(inode);
1727 loff_t to = pos + len;
1730 /* only kill blocks in this write beyond EOF */
1733 xfs_vm_kill_delalloc_range(inode, isize, to);
1734 truncate_pagecache_range(inode, isize, to);
1742 struct address_space *mapping,
1745 struct inode *inode = (struct inode *)mapping->host;
1746 struct xfs_inode *ip = XFS_I(inode);
1748 trace_xfs_vm_bmap(XFS_I(inode));
1749 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1750 filemap_write_and_wait(mapping);
1751 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1752 return generic_block_bmap(mapping, block, xfs_get_blocks);
1757 struct file *unused,
1760 return mpage_readpage(page, xfs_get_blocks);
1765 struct file *unused,
1766 struct address_space *mapping,
1767 struct list_head *pages,
1770 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1774 * This is basically a copy of __set_page_dirty_buffers() with one
1775 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1776 * dirty, we'll never be able to clean them because we don't write buffers
1777 * beyond EOF, and that means we can't invalidate pages that span EOF
1778 * that have been marked dirty. Further, the dirty state can leak into
1779 * the file interior if the file is extended, resulting in all sorts of
1780 * bad things happening as the state does not match the underlying data.
1782 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1783 * this only exist because of bufferheads and how the generic code manages them.
1786 xfs_vm_set_page_dirty(
1789 struct address_space *mapping = page->mapping;
1790 struct inode *inode = mapping->host;
1795 if (unlikely(!mapping))
1796 return !TestSetPageDirty(page);
1798 end_offset = i_size_read(inode);
1799 offset = page_offset(page);
1801 spin_lock(&mapping->private_lock);
1802 if (page_has_buffers(page)) {
1803 struct buffer_head *head = page_buffers(page);
1804 struct buffer_head *bh = head;
1807 if (offset < end_offset)
1808 set_buffer_dirty(bh);
1809 bh = bh->b_this_page;
1810 offset += 1 << inode->i_blkbits;
1811 } while (bh != head);
1813 newly_dirty = !TestSetPageDirty(page);
1814 spin_unlock(&mapping->private_lock);
1817 /* sigh - __set_page_dirty() is static, so copy it here, too */
1818 unsigned long flags;
1820 spin_lock_irqsave(&mapping->tree_lock, flags);
1821 if (page->mapping) { /* Race with truncate? */
1822 WARN_ON_ONCE(!PageUptodate(page));
1823 account_page_dirtied(page, mapping);
1824 radix_tree_tag_set(&mapping->page_tree,
1825 page_index(page), PAGECACHE_TAG_DIRTY);
1827 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1828 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1833 const struct address_space_operations xfs_address_space_operations = {
1834 .readpage = xfs_vm_readpage,
1835 .readpages = xfs_vm_readpages,
1836 .writepage = xfs_vm_writepage,
1837 .writepages = xfs_vm_writepages,
1838 .set_page_dirty = xfs_vm_set_page_dirty,
1839 .releasepage = xfs_vm_releasepage,
1840 .invalidatepage = xfs_vm_invalidatepage,
1841 .write_begin = xfs_vm_write_begin,
1842 .write_end = xfs_vm_write_end,
1843 .bmap = xfs_vm_bmap,
1844 .direct_IO = xfs_vm_direct_IO,
1845 .migratepage = buffer_migrate_page,
1846 .is_partially_uptodate = block_is_partially_uptodate,
1847 .error_remove_page = generic_error_remove_page,