2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dir2_sf.h"
32 #include "xfs_attr_sf.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_alloc.h"
36 #include "xfs_btree.h"
37 #include "xfs_error.h"
39 #include "xfs_iomap.h"
40 #include <linux/mpage.h>
41 #include <linux/pagevec.h>
42 #include <linux/writeback.h>
51 struct buffer_head *bh, *head;
53 *delalloc = *unmapped = *unwritten = 0;
55 bh = head = page_buffers(page);
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
59 else if (buffer_unwritten(bh))
61 else if (buffer_delay(bh))
63 } while ((bh = bh->b_this_page) != head);
66 #if defined(XFS_RW_TRACE)
75 bhv_vnode_t *vp = vn_from_inode(inode);
76 loff_t isize = i_size_read(inode);
77 loff_t offset = page_offset(page);
78 int delalloc = -1, unmapped = -1, unwritten = -1;
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
93 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
102 (void *)((unsigned long)current_pid()),
106 #define xfs_page_trace(tag, inode, page, pgoff)
110 * Schedule IO completion handling on a xfsdatad if this was
111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
119 if (atomic_dec_and_test(&ioend->io_remaining)) {
120 queue_work(xfsdatad_workqueue, &ioend->io_work);
122 flush_workqueue(xfsdatad_workqueue);
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
136 struct buffer_head *bh, *next;
138 for (bh = ioend->io_buffer_head; bh; bh = next) {
139 next = bh->b_private;
140 bh->b_end_io(bh, !ioend->io_error);
142 if (unlikely(ioend->io_error))
143 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
144 vn_iowake(ioend->io_vnode);
145 mempool_free(ioend, xfs_ioend_pool);
149 * Update on-disk file size now that data has been written to disk.
150 * The current in-memory file size is i_size. If a write is beyond
151 * eof io_new_size will be the intended file size until i_size is
152 * updated. If this write does not extend all the way to the valid
153 * file size then restrict this update to the end of the write.
163 ip = xfs_vtoi(ioend->io_vnode);
167 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
168 ASSERT(ioend->io_type != IOMAP_READ);
170 if (unlikely(ioend->io_error))
173 bsize = ioend->io_offset + ioend->io_size;
175 xfs_ilock(ip, XFS_ILOCK_EXCL);
177 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
178 isize = MIN(isize, bsize);
180 if (ip->i_d.di_size < isize) {
181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1;
183 ip->i_update_size = 1;
184 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
187 xfs_iunlock(ip, XFS_ILOCK_EXCL);
191 * Buffered IO write completion for delayed allocate extents.
194 xfs_end_bio_delalloc(
195 struct work_struct *work)
198 container_of(work, xfs_ioend_t, io_work);
200 xfs_setfilesize(ioend);
201 xfs_destroy_ioend(ioend);
205 * Buffered IO write completion for regular, written extents.
209 struct work_struct *work)
212 container_of(work, xfs_ioend_t, io_work);
214 xfs_setfilesize(ioend);
215 xfs_destroy_ioend(ioend);
219 * IO write completion for unwritten extents.
221 * Issue transactions to convert a buffer range from unwritten
222 * to written extents.
225 xfs_end_bio_unwritten(
226 struct work_struct *work)
229 container_of(work, xfs_ioend_t, io_work);
230 bhv_vnode_t *vp = ioend->io_vnode;
231 xfs_off_t offset = ioend->io_offset;
232 size_t size = ioend->io_size;
234 if (likely(!ioend->io_error)) {
235 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
236 xfs_setfilesize(ioend);
238 xfs_destroy_ioend(ioend);
242 * IO read completion for regular, written extents.
246 struct work_struct *work)
249 container_of(work, xfs_ioend_t, io_work);
251 xfs_destroy_ioend(ioend);
255 * Allocate and initialise an IO completion structure.
256 * We need to track unwritten extent write completion here initially.
257 * We'll need to extend this for updating the ondisk inode size later
267 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
270 * Set the count to 1 initially, which will prevent an I/O
271 * completion callback from happening before we have started
272 * all the I/O from calling the completion routine too early.
274 atomic_set(&ioend->io_remaining, 1);
276 ioend->io_list = NULL;
277 ioend->io_type = type;
278 ioend->io_vnode = vn_from_inode(inode);
279 ioend->io_buffer_head = NULL;
280 ioend->io_buffer_tail = NULL;
281 atomic_inc(&ioend->io_vnode->v_iocount);
282 ioend->io_offset = 0;
285 if (type == IOMAP_UNWRITTEN)
286 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
287 else if (type == IOMAP_DELAY)
288 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
289 else if (type == IOMAP_READ)
290 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
292 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
305 bhv_vnode_t *vp = vn_from_inode(inode);
306 int error, nmaps = 1;
308 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
309 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
319 return offset >= iomapp->iomap_offset &&
320 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
324 * BIO completion handler for buffered IO.
329 unsigned int bytes_done,
332 xfs_ioend_t *ioend = bio->bi_private;
337 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
338 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
340 /* Toss bio and pass work off to an xfsdatad thread */
341 bio->bi_private = NULL;
342 bio->bi_end_io = NULL;
345 xfs_finish_ioend(ioend, 0);
350 xfs_submit_ioend_bio(
354 atomic_inc(&ioend->io_remaining);
356 bio->bi_private = ioend;
357 bio->bi_end_io = xfs_end_bio;
359 submit_bio(WRITE, bio);
360 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
366 struct buffer_head *bh)
369 int nvecs = bio_get_nr_vecs(bh->b_bdev);
372 bio = bio_alloc(GFP_NOIO, nvecs);
376 ASSERT(bio->bi_private == NULL);
377 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
378 bio->bi_bdev = bh->b_bdev;
384 xfs_start_buffer_writeback(
385 struct buffer_head *bh)
387 ASSERT(buffer_mapped(bh));
388 ASSERT(buffer_locked(bh));
389 ASSERT(!buffer_delay(bh));
390 ASSERT(!buffer_unwritten(bh));
392 mark_buffer_async_write(bh);
393 set_buffer_uptodate(bh);
394 clear_buffer_dirty(bh);
398 xfs_start_page_writeback(
400 struct writeback_control *wbc,
404 ASSERT(PageLocked(page));
405 ASSERT(!PageWriteback(page));
407 clear_page_dirty_for_io(page);
408 set_page_writeback(page);
411 end_page_writeback(page);
412 wbc->pages_skipped++; /* We didn't write this page */
416 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
418 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
422 * Submit all of the bios for all of the ioends we have saved up, covering the
423 * initial writepage page and also any probed pages.
425 * Because we may have multiple ioends spanning a page, we need to start
426 * writeback on all the buffers before we submit them for I/O. If we mark the
427 * buffers as we got, then we can end up with a page that only has buffers
428 * marked async write and I/O complete on can occur before we mark the other
429 * buffers async write.
431 * The end result of this is that we trip a bug in end_page_writeback() because
432 * we call it twice for the one page as the code in end_buffer_async_write()
433 * assumes that all buffers on the page are started at the same time.
435 * The fix is two passes across the ioend list - one to start writeback on the
436 * buffer_heads, and then submit them for I/O on the second pass.
442 xfs_ioend_t *head = ioend;
444 struct buffer_head *bh;
446 sector_t lastblock = 0;
448 /* Pass 1 - start writeback */
450 next = ioend->io_list;
451 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
452 xfs_start_buffer_writeback(bh);
454 } while ((ioend = next) != NULL);
456 /* Pass 2 - submit I/O */
459 next = ioend->io_list;
462 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
466 bio = xfs_alloc_ioend_bio(bh);
467 } else if (bh->b_blocknr != lastblock + 1) {
468 xfs_submit_ioend_bio(ioend, bio);
472 if (bio_add_buffer(bio, bh) != bh->b_size) {
473 xfs_submit_ioend_bio(ioend, bio);
477 lastblock = bh->b_blocknr;
480 xfs_submit_ioend_bio(ioend, bio);
481 xfs_finish_ioend(ioend, 0);
482 } while ((ioend = next) != NULL);
486 * Cancel submission of all buffer_heads so far in this endio.
487 * Toss the endio too. Only ever called for the initial page
488 * in a writepage request, so only ever one page.
495 struct buffer_head *bh, *next_bh;
498 next = ioend->io_list;
499 bh = ioend->io_buffer_head;
501 next_bh = bh->b_private;
502 clear_buffer_async_write(bh);
504 } while ((bh = next_bh) != NULL);
506 vn_iowake(ioend->io_vnode);
507 mempool_free(ioend, xfs_ioend_pool);
508 } while ((ioend = next) != NULL);
512 * Test to see if we've been building up a completion structure for
513 * earlier buffers -- if so, we try to append to this ioend if we
514 * can, otherwise we finish off any current ioend and start another.
515 * Return true if we've finished the given ioend.
520 struct buffer_head *bh,
523 xfs_ioend_t **result,
526 xfs_ioend_t *ioend = *result;
528 if (!ioend || need_ioend || type != ioend->io_type) {
529 xfs_ioend_t *previous = *result;
531 ioend = xfs_alloc_ioend(inode, type);
532 ioend->io_offset = offset;
533 ioend->io_buffer_head = bh;
534 ioend->io_buffer_tail = bh;
536 previous->io_list = ioend;
539 ioend->io_buffer_tail->b_private = bh;
540 ioend->io_buffer_tail = bh;
543 bh->b_private = NULL;
544 ioend->io_size += bh->b_size;
549 struct buffer_head *bh,
556 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
558 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
559 ((offset - mp->iomap_offset) >> block_bits);
561 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
564 set_buffer_mapped(bh);
569 struct buffer_head *bh,
574 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
575 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
578 xfs_map_buffer(bh, iomapp, offset, block_bits);
579 bh->b_bdev = iomapp->iomap_target->bt_bdev;
580 set_buffer_mapped(bh);
581 clear_buffer_delay(bh);
582 clear_buffer_unwritten(bh);
586 * Look for a page at index that is suitable for clustering.
591 unsigned int pg_offset,
596 if (PageWriteback(page))
599 if (page->mapping && PageDirty(page)) {
600 if (page_has_buffers(page)) {
601 struct buffer_head *bh, *head;
603 bh = head = page_buffers(page);
605 if (!buffer_uptodate(bh))
607 if (mapped != buffer_mapped(bh))
610 if (ret >= pg_offset)
612 } while ((bh = bh->b_this_page) != head);
614 ret = mapped ? 0 : PAGE_CACHE_SIZE;
623 struct page *startpage,
624 struct buffer_head *bh,
625 struct buffer_head *head,
629 pgoff_t tindex, tlast, tloff;
633 /* First sum forwards in this page */
635 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
638 } while ((bh = bh->b_this_page) != head);
640 /* if we reached the end of the page, sum forwards in following pages */
641 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
642 tindex = startpage->index + 1;
644 /* Prune this back to avoid pathological behavior */
645 tloff = min(tlast, startpage->index + 64);
647 pagevec_init(&pvec, 0);
648 while (!done && tindex <= tloff) {
649 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
651 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
654 for (i = 0; i < pagevec_count(&pvec); i++) {
655 struct page *page = pvec.pages[i];
656 size_t pg_offset, pg_len = 0;
658 if (tindex == tlast) {
660 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
666 pg_offset = PAGE_CACHE_SIZE;
668 if (page->index == tindex && !TestSetPageLocked(page)) {
669 pg_len = xfs_probe_page(page, pg_offset, mapped);
682 pagevec_release(&pvec);
690 * Test if a given page is suitable for writing as part of an unwritten
691 * or delayed allocate extent.
698 if (PageWriteback(page))
701 if (page->mapping && page_has_buffers(page)) {
702 struct buffer_head *bh, *head;
705 bh = head = page_buffers(page);
707 if (buffer_unwritten(bh))
708 acceptable = (type == IOMAP_UNWRITTEN);
709 else if (buffer_delay(bh))
710 acceptable = (type == IOMAP_DELAY);
711 else if (buffer_dirty(bh) && buffer_mapped(bh))
712 acceptable = (type == IOMAP_NEW);
715 } while ((bh = bh->b_this_page) != head);
725 * Allocate & map buffers for page given the extent map. Write it out.
726 * except for the original page of a writepage, this is called on
727 * delalloc/unwritten pages only, for the original page it is possible
728 * that the page has no mapping at all.
736 xfs_ioend_t **ioendp,
737 struct writeback_control *wbc,
741 struct buffer_head *bh, *head;
742 xfs_off_t end_offset;
743 unsigned long p_offset;
745 int bbits = inode->i_blkbits;
747 int count = 0, done = 0, uptodate = 1;
748 xfs_off_t offset = page_offset(page);
750 if (page->index != tindex)
752 if (TestSetPageLocked(page))
754 if (PageWriteback(page))
755 goto fail_unlock_page;
756 if (page->mapping != inode->i_mapping)
757 goto fail_unlock_page;
758 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
759 goto fail_unlock_page;
762 * page_dirty is initially a count of buffers on the page before
763 * EOF and is decremented as we move each into a cleanable state.
767 * End offset is the highest offset that this page should represent.
768 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
769 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
770 * hence give us the correct page_dirty count. On any other page,
771 * it will be zero and in that case we need page_dirty to be the
772 * count of buffers on the page.
774 end_offset = min_t(unsigned long long,
775 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
778 len = 1 << inode->i_blkbits;
779 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
781 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
782 page_dirty = p_offset / len;
784 bh = head = page_buffers(page);
786 if (offset >= end_offset)
788 if (!buffer_uptodate(bh))
790 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
795 if (buffer_unwritten(bh) || buffer_delay(bh)) {
796 if (buffer_unwritten(bh))
797 type = IOMAP_UNWRITTEN;
801 if (!xfs_iomap_valid(mp, offset)) {
806 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
807 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
809 xfs_map_at_offset(bh, offset, bbits, mp);
811 xfs_add_to_ioend(inode, bh, offset,
814 set_buffer_dirty(bh);
816 mark_buffer_dirty(bh);
822 if (buffer_mapped(bh) && all_bh && startio) {
824 xfs_add_to_ioend(inode, bh, offset,
832 } while (offset += len, (bh = bh->b_this_page) != head);
834 if (uptodate && bh == head)
835 SetPageUptodate(page);
839 struct backing_dev_info *bdi;
841 bdi = inode->i_mapping->backing_dev_info;
843 if (bdi_write_congested(bdi)) {
844 wbc->encountered_congestion = 1;
846 } else if (wbc->nr_to_write <= 0) {
850 xfs_start_page_writeback(page, wbc, !page_dirty, count);
861 * Convert & write out a cluster of pages in the same extent as defined
862 * by mp and following the start page.
869 xfs_ioend_t **ioendp,
870 struct writeback_control *wbc,
878 pagevec_init(&pvec, 0);
879 while (!done && tindex <= tlast) {
880 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
882 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
885 for (i = 0; i < pagevec_count(&pvec); i++) {
886 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
887 iomapp, ioendp, wbc, startio, all_bh);
892 pagevec_release(&pvec);
898 * Calling this without startio set means we are being asked to make a dirty
899 * page ready for freeing it's buffers. When called with startio set then
900 * we are coming from writepage.
902 * When called with startio set it is important that we write the WHOLE
904 * The bh->b_state's cannot know if any of the blocks or which block for
905 * that matter are dirty due to mmap writes, and therefore bh uptodate is
906 * only valid if the page itself isn't completely uptodate. Some layers
907 * may clear the page dirty flag prior to calling write page, under the
908 * assumption the entire page will be written out; by not writing out the
909 * whole page the page can be reused before all valid dirty data is
910 * written out. Note: in the case of a page that has been dirty'd by
911 * mapwrite and but partially setup by block_prepare_write the
912 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
913 * valid state, thus the whole page must be written out thing.
917 xfs_page_state_convert(
920 struct writeback_control *wbc,
922 int unmapped) /* also implies page uptodate */
924 struct buffer_head *bh, *head;
926 xfs_ioend_t *ioend = NULL, *iohead = NULL;
928 unsigned long p_offset = 0;
930 __uint64_t end_offset;
931 pgoff_t end_index, last_index, tlast;
933 int flags, err, iomap_valid = 0, uptodate = 1;
934 int page_dirty, count = 0;
936 int all_bh = unmapped;
939 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
940 trylock |= BMAPI_TRYLOCK;
943 /* Is this page beyond the end of the file? */
944 offset = i_size_read(inode);
945 end_index = offset >> PAGE_CACHE_SHIFT;
946 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
947 if (page->index >= end_index) {
948 if ((page->index >= end_index + 1) ||
949 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
957 * page_dirty is initially a count of buffers on the page before
958 * EOF and is decremented as we move each into a cleanable state.
962 * End offset is the highest offset that this page should represent.
963 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
964 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
965 * hence give us the correct page_dirty count. On any other page,
966 * it will be zero and in that case we need page_dirty to be the
967 * count of buffers on the page.
969 end_offset = min_t(unsigned long long,
970 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
971 len = 1 << inode->i_blkbits;
972 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
974 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
975 page_dirty = p_offset / len;
977 bh = head = page_buffers(page);
978 offset = page_offset(page);
982 /* TODO: cleanup count and page_dirty */
985 if (offset >= end_offset)
987 if (!buffer_uptodate(bh))
989 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
991 * the iomap is actually still valid, but the ioend
992 * isn't. shouldn't happen too often.
999 iomap_valid = xfs_iomap_valid(&iomap, offset);
1002 * First case, map an unwritten extent and prepare for
1003 * extent state conversion transaction on completion.
1005 * Second case, allocate space for a delalloc buffer.
1006 * We can return EAGAIN here in the release page case.
1008 * Third case, an unmapped buffer was found, and we are
1009 * in a path where we need to write the whole page out.
1011 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1012 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1013 !buffer_mapped(bh) && (unmapped || startio))) {
1017 * Make sure we don't use a read-only iomap
1019 if (flags == BMAPI_READ)
1022 if (buffer_unwritten(bh)) {
1023 type = IOMAP_UNWRITTEN;
1024 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1025 } else if (buffer_delay(bh)) {
1027 flags = BMAPI_ALLOCATE | trylock;
1030 flags = BMAPI_WRITE | BMAPI_MMAP;
1035 * if we didn't have a valid mapping then we
1036 * need to ensure that we put the new mapping
1037 * in a new ioend structure. This needs to be
1038 * done to ensure that the ioends correctly
1039 * reflect the block mappings at io completion
1040 * for unwritten extent conversion.
1043 if (type == IOMAP_NEW) {
1044 size = xfs_probe_cluster(inode,
1050 err = xfs_map_blocks(inode, offset, size,
1054 iomap_valid = xfs_iomap_valid(&iomap, offset);
1057 xfs_map_at_offset(bh, offset,
1058 inode->i_blkbits, &iomap);
1060 xfs_add_to_ioend(inode, bh, offset,
1064 set_buffer_dirty(bh);
1066 mark_buffer_dirty(bh);
1071 } else if (buffer_uptodate(bh) && startio) {
1073 * we got here because the buffer is already mapped.
1074 * That means it must already have extents allocated
1075 * underneath it. Map the extent by reading it.
1077 if (!iomap_valid || flags != BMAPI_READ) {
1079 size = xfs_probe_cluster(inode, page, bh,
1081 err = xfs_map_blocks(inode, offset, size,
1085 iomap_valid = xfs_iomap_valid(&iomap, offset);
1089 * We set the type to IOMAP_NEW in case we are doing a
1090 * small write at EOF that is extending the file but
1091 * without needing an allocation. We need to update the
1092 * file size on I/O completion in this case so it is
1093 * the same case as having just allocated a new extent
1094 * that we are writing into for the first time.
1097 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1098 ASSERT(buffer_mapped(bh));
1101 xfs_add_to_ioend(inode, bh, offset, type,
1102 &ioend, !iomap_valid);
1108 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1109 (unmapped || startio)) {
1116 } while (offset += len, ((bh = bh->b_this_page) != head));
1118 if (uptodate && bh == head)
1119 SetPageUptodate(page);
1122 xfs_start_page_writeback(page, wbc, 1, count);
1124 if (ioend && iomap_valid) {
1125 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1127 tlast = min_t(pgoff_t, offset, last_index);
1128 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1129 wbc, startio, all_bh, tlast);
1133 xfs_submit_ioend(iohead);
1139 xfs_cancel_ioend(iohead);
1142 * If it's delalloc and we have nowhere to put it,
1143 * throw it away, unless the lower layers told
1146 if (err != -EAGAIN) {
1148 block_invalidatepage(page, 0);
1149 ClearPageUptodate(page);
1155 * writepage: Called from one of two places:
1157 * 1. we are flushing a delalloc buffer head.
1159 * 2. we are writing out a dirty page. Typically the page dirty
1160 * state is cleared before we get here. In this case is it
1161 * conceivable we have no buffer heads.
1163 * For delalloc space on the page we need to allocate space and
1164 * flush it. For unmapped buffer heads on the page we should
1165 * allocate space if the page is uptodate. For any other dirty
1166 * buffer heads on the page we should flush them.
1168 * If we detect that a transaction would be required to flush
1169 * the page, we have to check the process flags first, if we
1170 * are already in a transaction or disk I/O during allocations
1171 * is off, we need to fail the writepage and redirty the page.
1177 struct writeback_control *wbc)
1181 int delalloc, unmapped, unwritten;
1182 struct inode *inode = page->mapping->host;
1184 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1187 * We need a transaction if:
1188 * 1. There are delalloc buffers on the page
1189 * 2. The page is uptodate and we have unmapped buffers
1190 * 3. The page is uptodate and we have no buffers
1191 * 4. There are unwritten buffers on the page
1194 if (!page_has_buffers(page)) {
1198 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1199 if (!PageUptodate(page))
1201 need_trans = delalloc + unmapped + unwritten;
1205 * If we need a transaction and the process flags say
1206 * we are already in a transaction, or no IO is allowed
1207 * then mark the page dirty again and leave the page
1210 if (current_test_flags(PF_FSTRANS) && need_trans)
1214 * Delay hooking up buffer heads until we have
1215 * made our go/no-go decision.
1217 if (!page_has_buffers(page))
1218 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1221 * Convert delayed allocate, unwritten or unmapped space
1222 * to real space and flush out to disk.
1224 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1225 if (error == -EAGAIN)
1227 if (unlikely(error < 0))
1233 redirty_page_for_writepage(wbc, page);
1243 struct address_space *mapping,
1244 struct writeback_control *wbc)
1246 struct bhv_vnode *vp = vn_from_inode(mapping->host);
1250 return generic_writepages(mapping, wbc);
1254 * Called to move a page into cleanable state - and from there
1255 * to be released. Possibly the page is already clean. We always
1256 * have buffer heads in this call.
1258 * Returns 0 if the page is ok to release, 1 otherwise.
1260 * Possible scenarios are:
1262 * 1. We are being called to release a page which has been written
1263 * to via regular I/O. buffer heads will be dirty and possibly
1264 * delalloc. If no delalloc buffer heads in this case then we
1265 * can just return zero.
1267 * 2. We are called to release a page which has been written via
1268 * mmap, all we need to do is ensure there is no delalloc
1269 * state in the buffer heads, if not we can let the caller
1270 * free them and we should come back later via writepage.
1277 struct inode *inode = page->mapping->host;
1278 int dirty, delalloc, unmapped, unwritten;
1279 struct writeback_control wbc = {
1280 .sync_mode = WB_SYNC_ALL,
1284 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1286 if (!page_has_buffers(page))
1289 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1290 if (!delalloc && !unwritten)
1293 if (!(gfp_mask & __GFP_FS))
1296 /* If we are already inside a transaction or the thread cannot
1297 * do I/O, we cannot release this page.
1299 if (current_test_flags(PF_FSTRANS))
1303 * Convert delalloc space to real space, do not flush the
1304 * data out to disk, that will be done by the caller.
1305 * Never need to allocate space here - we will always
1306 * come back to writepage in that case.
1308 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1309 if (dirty == 0 && !unwritten)
1314 return try_to_free_buffers(page);
1319 struct inode *inode,
1321 struct buffer_head *bh_result,
1324 bmapi_flags_t flags)
1326 bhv_vnode_t *vp = vn_from_inode(inode);
1333 offset = (xfs_off_t)iblock << inode->i_blkbits;
1334 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1335 size = bh_result->b_size;
1336 error = bhv_vop_bmap(vp, offset, size,
1337 create ? flags : BMAPI_READ, &iomap, &niomap);
1343 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1345 * For unwritten extents do not report a disk address on
1346 * the read case (treat as if we're reading into a hole).
1348 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1349 xfs_map_buffer(bh_result, &iomap, offset,
1352 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1354 bh_result->b_private = inode;
1355 set_buffer_unwritten(bh_result);
1360 * If this is a realtime file, data may be on a different device.
1361 * to that pointed to from the buffer_head b_bdev currently.
1363 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1366 * If we previously allocated a block out beyond eof and we are now
1367 * coming back to use it then we will need to flag it as new even if it
1368 * has a disk address.
1370 * With sub-block writes into unwritten extents we also need to mark
1371 * the buffer as new so that the unwritten parts of the buffer gets
1375 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1376 (offset >= i_size_read(inode)) ||
1377 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1378 set_buffer_new(bh_result);
1380 if (iomap.iomap_flags & IOMAP_DELAY) {
1383 set_buffer_uptodate(bh_result);
1384 set_buffer_mapped(bh_result);
1385 set_buffer_delay(bh_result);
1389 if (direct || size > (1 << inode->i_blkbits)) {
1390 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1391 offset = min_t(xfs_off_t,
1392 iomap.iomap_bsize - iomap.iomap_delta, size);
1393 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1401 struct inode *inode,
1403 struct buffer_head *bh_result,
1406 return __xfs_get_blocks(inode, iblock,
1407 bh_result, create, 0, BMAPI_WRITE);
1411 xfs_get_blocks_direct(
1412 struct inode *inode,
1414 struct buffer_head *bh_result,
1417 return __xfs_get_blocks(inode, iblock,
1418 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1428 xfs_ioend_t *ioend = iocb->private;
1431 * Non-NULL private data means we need to issue a transaction to
1432 * convert a range from unwritten to written extents. This needs
1433 * to happen from process context but aio+dio I/O completion
1434 * happens from irq context so we need to defer it to a workqueue.
1435 * This is not necessary for synchronous direct I/O, but we do
1436 * it anyway to keep the code uniform and simpler.
1438 * Well, if only it were that simple. Because synchronous direct I/O
1439 * requires extent conversion to occur *before* we return to userspace,
1440 * we have to wait for extent conversion to complete. Look at the
1441 * iocb that has been passed to us to determine if this is AIO or
1442 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1443 * workqueue and wait for it to complete.
1445 * The core direct I/O code might be changed to always call the
1446 * completion handler in the future, in which case all this can
1449 ioend->io_offset = offset;
1450 ioend->io_size = size;
1451 if (ioend->io_type == IOMAP_READ) {
1452 xfs_finish_ioend(ioend, 0);
1453 } else if (private && size > 0) {
1454 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1457 * A direct I/O write ioend starts it's life in unwritten
1458 * state in case they map an unwritten extent. This write
1459 * didn't map an unwritten extent so switch it's completion
1462 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
1463 xfs_finish_ioend(ioend, 0);
1467 * blockdev_direct_IO can return an error even after the I/O
1468 * completion handler was called. Thus we need to protect
1469 * against double-freeing.
1471 iocb->private = NULL;
1478 const struct iovec *iov,
1480 unsigned long nr_segs)
1482 struct file *file = iocb->ki_filp;
1483 struct inode *inode = file->f_mapping->host;
1484 bhv_vnode_t *vp = vn_from_inode(inode);
1490 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
1495 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1496 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1497 iomap.iomap_target->bt_bdev,
1498 iov, offset, nr_segs,
1499 xfs_get_blocks_direct,
1502 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1503 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1504 iomap.iomap_target->bt_bdev,
1505 iov, offset, nr_segs,
1506 xfs_get_blocks_direct,
1510 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1511 xfs_destroy_ioend(iocb->private);
1516 xfs_vm_prepare_write(
1522 return block_prepare_write(page, from, to, xfs_get_blocks);
1527 struct address_space *mapping,
1530 struct inode *inode = (struct inode *)mapping->host;
1531 bhv_vnode_t *vp = vn_from_inode(inode);
1533 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
1534 bhv_vop_rwlock(vp, VRWLOCK_READ);
1535 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1536 bhv_vop_rwunlock(vp, VRWLOCK_READ);
1537 return generic_block_bmap(mapping, block, xfs_get_blocks);
1542 struct file *unused,
1545 return mpage_readpage(page, xfs_get_blocks);
1550 struct file *unused,
1551 struct address_space *mapping,
1552 struct list_head *pages,
1555 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1559 xfs_vm_invalidatepage(
1561 unsigned long offset)
1563 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1564 page->mapping->host, page, offset);
1565 block_invalidatepage(page, offset);
1568 const struct address_space_operations xfs_address_space_operations = {
1569 .readpage = xfs_vm_readpage,
1570 .readpages = xfs_vm_readpages,
1571 .writepage = xfs_vm_writepage,
1572 .writepages = xfs_vm_writepages,
1573 .sync_page = block_sync_page,
1574 .releasepage = xfs_vm_releasepage,
1575 .invalidatepage = xfs_vm_invalidatepage,
1576 .prepare_write = xfs_vm_prepare_write,
1577 .commit_write = generic_commit_write,
1578 .bmap = xfs_vm_bmap,
1579 .direct_IO = xfs_vm_direct_IO,
1580 .migratepage = buffer_migrate_page,