#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
void fastcall unlock_buffer(struct buffer_head *bh)
{
+ smp_mb__before_clear_bit();
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
- * This takes the block device bd_mount_mutex to make sure no new mounts
+ * This takes the block device bd_mount_sem to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
{
struct super_block *sb;
- mutex_lock(&bdev->bd_mount_mutex);
+ down(&bdev->bd_mount_sem);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
drop_super(sb);
}
- mutex_unlock(&bdev->bd_mount_mutex);
+ up(&bdev->bd_mount_sem);
}
EXPORT_SYMBOL(thaw_bdev);
* We really want to use invalidate_inode_pages2() for
* that, but not until that's cleaned up.
*/
- invalidate_inode_pages(mapping);
+ invalidate_mapping_pages(mapping, 0, -1);
}
/*
}
spin_unlock(&mapping->private_lock);
- if (!TestSetPageDirty(page)) {
- write_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- if (mapping_cap_account_dirty(mapping))
- __inc_zone_page_state(page, NR_FILE_DIRTY);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
+ if (TestSetPageDirty(page))
+ return 0;
+
+ write_lock_irq(&mapping->tree_lock);
+ if (page->mapping) { /* Race with truncate? */
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ task_io_account_write(PAGE_CACHE_SIZE);
}
- write_unlock_irq(&mapping->tree_lock);
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return 1;
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
}
- return 0;
+ write_unlock_irq(&mapping->tree_lock);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return 1;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
* Look up the bh in this cpu's LRU. If it's there, move it to the head.
*/
static struct buffer_head *
-lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *ret = NULL;
struct bh_lru *lru;
- int i;
+ unsigned int i;
check_irqs_on();
bh_lru_lock();
* NULL
*/
struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, int size)
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
* attempt is failing. FIXME, perhaps?
*/
struct buffer_head *
-__getblk(struct block_device *bdev, sector_t block, int size)
+__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
/*
* Do async read-ahead on a buffer..
*/
-void __breadahead(struct block_device *bdev, sector_t block, int size)
+void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, int size)
+__bread(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
unlock_buffer(bh);
}
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++=bh;
if (PageUptodate(page))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
+
+ /*
+ * If the filesystem writes its buffers by hand (eg ext3)
+ * then we can have clean buffers against a dirty page. We
+ * clean the page here; otherwise the VM will never notice
+ * that the filesystem did any IO at all.
+ *
+ * Also, during truncate, discard_buffer will have marked all
+ * the page's buffers clean. We discover that here and clean
+ * the page also.
+ *
+ * private_lock must be held over this entire operation in order
+ * to synchronise against __set_page_dirty_buffers and prevent the
+ * dirty bit from being lost.
+ */
+ if (ret)
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
spin_unlock(&mapping->private_lock);
- if (ret) {
- /*
- * If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty page. We
- * clean the page here; otherwise later reattachment of buffers
- * could encounter a non-uptodate page, which is unresolvable.
- * This only applies in the rare case where try_to_free_buffers
- * succeeds but the page is not freed.
- */
- clear_page_dirty(page);
- }
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;