if (found_start != start) {
printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
start, found_start, len);
+ WARN_ON(1);
+ goto err;
+ }
+ if (eb->first_page != page) {
+ printk("bad first page %lu %lu\n", eb->first_page->index,
+ page->index);
+ WARN_ON(1);
+ goto err;
+ }
+ if (!PageUptodate(page)) {
+ printk("csum not up to date page %lu\n", page->index);
+ WARN_ON(1);
+ goto err;
}
found_level = btrfs_header_level(eb);
csum_tree_block(root, eb, 0);
+err:
free_extent_buffer(eb);
out:
return 0;
struct extent_buffer *buf)
{
struct inode *btree_inode = root->fs_info->btree_inode;
- clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, buf);
+ if (btrfs_header_generation(buf) ==
+ root->fs_info->running_transaction->transid)
+ clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree,
+ buf);
return 0;
}
void btrfs_throttle(struct btrfs_root *root)
{
- if (root->fs_info->throttles)
- congestion_wait(WRITE, HZ/10);
+ struct backing_dev_info *bdi;
+
+ bdi = root->fs_info->sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
+ if (root->fs_info->throttles && bdi_write_congested(bdi))
+ congestion_wait(WRITE, HZ/20);
}
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
u64 header_transid =
btrfs_header_generation(buf);
if (header_transid == transid) {
+ clean_tree_block(NULL, root, buf);
free_extent_buffer(buf);
return 1;
}
key.objectid = bytenr;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
key.offset = num_bytes;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
search_start, search_end, hint_byte, ins,
trans->alloc_exclude_start,
trans->alloc_exclude_nr, data);
-if (ret)
-printk("find free extent returns %d\n", ret);
BUG_ON(ret);
if (ret)
return ret;
0, 0, 0);
return ERR_PTR(-ENOMEM);
}
+ btrfs_set_header_generation(buf, trans->transid);
+ clean_tree_block(trans, root, buf);
+ wait_on_tree_block_writeback(root, buf);
btrfs_set_buffer_uptodate(buf);
+
+ if (PageDirty(buf->first_page)) {
+ printk("page %lu dirty\n", buf->first_page->index);
+ WARN_ON(1);
+ }
+
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
}
}
+void set_page_extent_head(struct page *page, unsigned long len)
+{
+ WARN_ON(page->private && page->private == EXTENT_PAGE_PRIVATE &&
+ PageDirty(page));
+ set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
+}
+
/*
* basic readpage implementation. Locked extent state structs are inserted
* into the tree that are removed when the IO is done (by the end_io
mark_page_accessed(page0);
set_page_extent_mapped(page0);
WARN_ON(!PageUptodate(page0));
- set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
- len << 2);
+ set_page_extent_head(page0, len);
} else {
i = 0;
}
mark_page_accessed(p);
if (i == 0) {
eb->first_page = p;
- set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
- len << 2);
+ set_page_extent_head(p, len);
} else {
set_page_private(p, EXTENT_PAGE_PRIVATE);
}
if (i == 0) {
eb->first_page = p;
- set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
- len << 2);
+ set_page_extent_head(p, len);
} else {
set_page_private(p, EXTENT_PAGE_PRIVATE);
}
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
lock_page(page);
+ if (i == 0)
+ set_page_extent_head(page, eb->len);
+ else
+ set_page_private(page, EXTENT_PAGE_PRIVATE);
+
/*
* if we're on the last page or the first page and the
* block isn't aligned on a page boundary, do extra checks
*/
if (i == 0) {
lock_page(page);
- set_page_private(page,
- EXTENT_PAGE_PRIVATE_FIRST_PAGE |
- eb->len << 2);
+ set_page_extent_head(page, eb->len);
+ } else if (PagePrivate(page) &&
+ page->private != EXTENT_PAGE_PRIVATE) {
+ lock_page(page);
+ set_page_extent_mapped(page);
+ unlock_page(page);
}
__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
if (i == 0)