#include "page.h"
#include "btnode.h"
-void nilfs_btnode_cache_init(struct address_space *btnc,
- struct backing_dev_info *bdi)
-{
- nilfs_mapping_init(btnc, bdi);
-}
-
void nilfs_btnode_cache_clear(struct address_space *btnc)
{
invalidate_mapping_pages(btnc, 0, -1);
struct buffer_head *newbh;
};
-void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, bdi);
+ nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
+ nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
mi->mi_shadow = shadow;
return 0;
}
return &ii->vfs_inode;
}
-static inline struct inode *NILFS_AS_I(struct address_space *mapping)
-{
- return (mapping->host) ? :
- container_of(mapping, struct inode, i_data);
-}
-
/*
* Dynamic state flags of NILFS on-memory inode (i_state)
*/
void nilfs_page_bug(struct page *page)
{
struct address_space *m;
- unsigned long ino = 0;
+ unsigned long ino;
if (unlikely(!page)) {
printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
}
m = page->mapping;
- if (m) {
- struct inode *inode = NILFS_AS_I(m);
- if (inode != NULL)
- ino = inode->i_ino;
- }
+ ino = m ? m->host->i_ino : 0;
+
printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n",
page, atomic_read(&page->_count),
return nc;
}
-void nilfs_mapping_init(struct address_space *mapping,
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi)
{
- mapping->host = NULL;
+ mapping->host = inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL;
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init(struct address_space *mapping,
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
if (unlikely(page->index > last))
break;
- if (mapping->host) {
- lock_page(page);
- if (!page_has_buffers(page))
- create_empty_buffers(page,
- 1 << inode->i_blkbits, 0);
- unlock_page(page);
- }
+ lock_page(page);
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+ unlock_page(page);
bh = head = page_buffers(page);
do {
nblocks = le32_to_cpu(finfo->fi_nblocks);
ndatablk = le32_to_cpu(finfo->fi_ndatablk);
- if (buffer_nilfs_node(bh))
- inode = NILFS_BTNC_I(bh->b_page->mapping);
- else
- inode = NILFS_AS_I(bh->b_page->mapping);
+ inode = bh->b_page->mapping->host;
if (mode == SC_LSEG_DSYNC)
sc_op = &nilfs_sc_dsync_ops;
ii->i_state = 0;
ii->i_cno = 0;
ii->vfs_inode.i_version = 1;
- nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi);
+ nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
return &ii->vfs_inode;
}