4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
23 static struct kmem_cache *nat_entry_slab;
24 static struct kmem_cache *free_nid_slab;
26 static void clear_node_page_dirty(struct page *page)
28 struct address_space *mapping = page->mapping;
29 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
30 unsigned int long flags;
32 if (PageDirty(page)) {
33 spin_lock_irqsave(&mapping->tree_lock, flags);
34 radix_tree_tag_clear(&mapping->page_tree,
37 spin_unlock_irqrestore(&mapping->tree_lock, flags);
39 clear_page_dirty_for_io(page);
40 dec_page_count(sbi, F2FS_DIRTY_NODES);
42 ClearPageUptodate(page);
45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47 pgoff_t index = current_nat_addr(sbi, nid);
48 return get_meta_page(sbi, index);
51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53 struct page *src_page;
54 struct page *dst_page;
59 struct f2fs_nm_info *nm_i = NM_I(sbi);
61 src_off = current_nat_addr(sbi, nid);
62 dst_off = next_nat_addr(sbi, src_off);
64 /* get current nat block page with lock */
65 src_page = get_meta_page(sbi, src_off);
67 /* Dirty src_page means that it is already the new target NAT page. */
68 if (PageDirty(src_page))
71 dst_page = grab_meta_page(sbi, dst_off);
73 src_addr = page_address(src_page);
74 dst_addr = page_address(dst_page);
75 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
76 set_page_dirty(dst_page);
77 f2fs_put_page(src_page, 1);
79 set_to_next_nat(nm_i, nid);
87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89 struct address_space *mapping = sbi->meta_inode->i_mapping;
90 struct f2fs_nm_info *nm_i = NM_I(sbi);
95 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
96 if (nid >= nm_i->max_nid)
98 index = current_nat_addr(sbi, nid);
100 page = grab_cache_page(mapping, index);
103 if (PageUptodate(page)) {
104 f2fs_put_page(page, 1);
107 if (f2fs_readpage(sbi, page, index, READ))
110 f2fs_put_page(page, 0);
114 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
116 return radix_tree_lookup(&nm_i->nat_root, n);
119 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
120 nid_t start, unsigned int nr, struct nat_entry **ep)
122 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
125 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
128 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
130 kmem_cache_free(nat_entry_slab, e);
133 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
135 struct f2fs_nm_info *nm_i = NM_I(sbi);
139 read_lock(&nm_i->nat_tree_lock);
140 e = __lookup_nat_cache(nm_i, nid);
141 if (e && !e->checkpointed)
143 read_unlock(&nm_i->nat_tree_lock);
147 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
149 struct nat_entry *new;
151 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
154 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
155 kmem_cache_free(nat_entry_slab, new);
158 memset(new, 0, sizeof(struct nat_entry));
159 nat_set_nid(new, nid);
160 list_add_tail(&new->list, &nm_i->nat_entries);
165 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
166 struct f2fs_nat_entry *ne)
170 write_lock(&nm_i->nat_tree_lock);
171 e = __lookup_nat_cache(nm_i, nid);
173 e = grab_nat_entry(nm_i, nid);
175 write_unlock(&nm_i->nat_tree_lock);
178 nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
179 nat_set_ino(e, le32_to_cpu(ne->ino));
180 nat_set_version(e, ne->version);
181 e->checkpointed = true;
183 write_unlock(&nm_i->nat_tree_lock);
186 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
189 struct f2fs_nm_info *nm_i = NM_I(sbi);
192 write_lock(&nm_i->nat_tree_lock);
193 e = __lookup_nat_cache(nm_i, ni->nid);
195 e = grab_nat_entry(nm_i, ni->nid);
197 write_unlock(&nm_i->nat_tree_lock);
201 e->checkpointed = true;
202 BUG_ON(ni->blk_addr == NEW_ADDR);
203 } else if (new_blkaddr == NEW_ADDR) {
205 * when nid is reallocated,
206 * previous nat entry can be remained in nat cache.
207 * So, reinitialize it with new information.
210 BUG_ON(ni->blk_addr != NULL_ADDR);
213 if (new_blkaddr == NEW_ADDR)
214 e->checkpointed = false;
217 BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
218 BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
219 new_blkaddr == NULL_ADDR);
220 BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
221 new_blkaddr == NEW_ADDR);
222 BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
223 nat_get_blkaddr(e) != NULL_ADDR &&
224 new_blkaddr == NEW_ADDR);
226 /* increament version no as node is removed */
227 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
228 unsigned char version = nat_get_version(e);
229 nat_set_version(e, inc_node_version(version));
233 nat_set_blkaddr(e, new_blkaddr);
234 __set_nat_cache_dirty(nm_i, e);
235 write_unlock(&nm_i->nat_tree_lock);
238 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
240 struct f2fs_nm_info *nm_i = NM_I(sbi);
242 if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
245 write_lock(&nm_i->nat_tree_lock);
246 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
247 struct nat_entry *ne;
248 ne = list_first_entry(&nm_i->nat_entries,
249 struct nat_entry, list);
250 __del_from_nat_cache(nm_i, ne);
253 write_unlock(&nm_i->nat_tree_lock);
258 * This function returns always success
260 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
262 struct f2fs_nm_info *nm_i = NM_I(sbi);
263 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
264 struct f2fs_summary_block *sum = curseg->sum_blk;
265 nid_t start_nid = START_NID(nid);
266 struct f2fs_nat_block *nat_blk;
267 struct page *page = NULL;
268 struct f2fs_nat_entry ne;
272 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
275 /* Check nat cache */
276 read_lock(&nm_i->nat_tree_lock);
277 e = __lookup_nat_cache(nm_i, nid);
279 ni->ino = nat_get_ino(e);
280 ni->blk_addr = nat_get_blkaddr(e);
281 ni->version = nat_get_version(e);
283 read_unlock(&nm_i->nat_tree_lock);
287 /* Check current segment summary */
288 mutex_lock(&curseg->curseg_mutex);
289 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
291 ne = nat_in_journal(sum, i);
292 node_info_from_raw_nat(ni, &ne);
294 mutex_unlock(&curseg->curseg_mutex);
298 /* Fill node_info from nat page */
299 page = get_current_nat_page(sbi, start_nid);
300 nat_blk = (struct f2fs_nat_block *)page_address(page);
301 ne = nat_blk->entries[nid - start_nid];
302 node_info_from_raw_nat(ni, &ne);
303 f2fs_put_page(page, 1);
305 /* cache nat entry */
306 cache_nat_entry(NM_I(sbi), nid, &ne);
310 * The maximum depth is four.
311 * Offset[0] will have raw inode offset.
313 static int get_node_path(long block, int offset[4], unsigned int noffset[4])
315 const long direct_index = ADDRS_PER_INODE;
316 const long direct_blks = ADDRS_PER_BLOCK;
317 const long dptrs_per_blk = NIDS_PER_BLOCK;
318 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
319 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
325 if (block < direct_index) {
329 block -= direct_index;
330 if (block < direct_blks) {
331 offset[n++] = NODE_DIR1_BLOCK;
337 block -= direct_blks;
338 if (block < direct_blks) {
339 offset[n++] = NODE_DIR2_BLOCK;
345 block -= direct_blks;
346 if (block < indirect_blks) {
347 offset[n++] = NODE_IND1_BLOCK;
349 offset[n++] = block / direct_blks;
350 noffset[n] = 4 + offset[n - 1];
351 offset[n] = block % direct_blks;
355 block -= indirect_blks;
356 if (block < indirect_blks) {
357 offset[n++] = NODE_IND2_BLOCK;
358 noffset[n] = 4 + dptrs_per_blk;
359 offset[n++] = block / direct_blks;
360 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
361 offset[n] = block % direct_blks;
365 block -= indirect_blks;
366 if (block < dindirect_blks) {
367 offset[n++] = NODE_DIND_BLOCK;
368 noffset[n] = 5 + (dptrs_per_blk * 2);
369 offset[n++] = block / indirect_blks;
370 noffset[n] = 6 + (dptrs_per_blk * 2) +
371 offset[n - 1] * (dptrs_per_blk + 1);
372 offset[n++] = (block / direct_blks) % dptrs_per_blk;
373 noffset[n] = 7 + (dptrs_per_blk * 2) +
374 offset[n - 2] * (dptrs_per_blk + 1) +
376 offset[n] = block % direct_blks;
387 * Caller should call f2fs_put_dnode(dn).
388 * Also, it should grab and release a mutex by calling mutex_lock_op() and
389 * mutex_unlock_op() only if ro is not set RDONLY_NODE.
390 * In the case of RDONLY_NODE, we don't need to care about mutex.
392 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
394 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
395 struct page *npage[4];
398 unsigned int noffset[4];
403 level = get_node_path(index, offset, noffset);
405 nids[0] = dn->inode->i_ino;
406 npage[0] = get_node_page(sbi, nids[0]);
407 if (IS_ERR(npage[0]))
408 return PTR_ERR(npage[0]);
412 nids[1] = get_nid(parent, offset[0], true);
413 dn->inode_page = npage[0];
414 dn->inode_page_locked = true;
416 /* get indirect or direct nodes */
417 for (i = 1; i <= level; i++) {
420 if (!nids[i] && mode == ALLOC_NODE) {
422 if (!alloc_nid(sbi, &(nids[i]))) {
428 npage[i] = new_node_page(dn, noffset[i]);
429 if (IS_ERR(npage[i])) {
430 alloc_nid_failed(sbi, nids[i]);
431 err = PTR_ERR(npage[i]);
435 set_nid(parent, offset[i - 1], nids[i], i == 1);
436 alloc_nid_done(sbi, nids[i]);
438 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
439 npage[i] = get_node_page_ra(parent, offset[i - 1]);
440 if (IS_ERR(npage[i])) {
441 err = PTR_ERR(npage[i]);
447 dn->inode_page_locked = false;
450 f2fs_put_page(parent, 1);
454 npage[i] = get_node_page(sbi, nids[i]);
455 if (IS_ERR(npage[i])) {
456 err = PTR_ERR(npage[i]);
457 f2fs_put_page(npage[0], 0);
463 nids[i + 1] = get_nid(parent, offset[i], false);
466 dn->nid = nids[level];
467 dn->ofs_in_node = offset[level];
468 dn->node_page = npage[level];
469 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
473 f2fs_put_page(parent, 1);
475 f2fs_put_page(npage[0], 0);
477 dn->inode_page = NULL;
478 dn->node_page = NULL;
482 static void truncate_node(struct dnode_of_data *dn)
484 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
487 get_node_info(sbi, dn->nid, &ni);
488 if (dn->inode->i_blocks == 0) {
489 BUG_ON(ni.blk_addr != NULL_ADDR);
492 BUG_ON(ni.blk_addr == NULL_ADDR);
494 /* Deallocate node address */
495 invalidate_blocks(sbi, ni.blk_addr);
496 dec_valid_node_count(sbi, dn->inode, 1);
497 set_node_addr(sbi, &ni, NULL_ADDR);
499 if (dn->nid == dn->inode->i_ino) {
500 remove_orphan_inode(sbi, dn->nid);
501 dec_valid_inode_count(sbi);
506 clear_node_page_dirty(dn->node_page);
507 F2FS_SET_SB_DIRT(sbi);
509 f2fs_put_page(dn->node_page, 1);
510 dn->node_page = NULL;
513 static int truncate_dnode(struct dnode_of_data *dn)
515 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
521 /* get direct node */
522 page = get_node_page(sbi, dn->nid);
523 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
525 else if (IS_ERR(page))
526 return PTR_ERR(page);
528 /* Make dnode_of_data for parameter */
529 dn->node_page = page;
531 truncate_data_blocks(dn);
536 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
539 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
540 struct dnode_of_data rdn = *dn;
542 struct f2fs_node *rn;
544 unsigned int child_nofs;
549 return NIDS_PER_BLOCK + 1;
551 page = get_node_page(sbi, dn->nid);
553 return PTR_ERR(page);
555 rn = (struct f2fs_node *)page_address(page);
557 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
558 child_nid = le32_to_cpu(rn->in.nid[i]);
562 ret = truncate_dnode(&rdn);
565 set_nid(page, i, 0, false);
568 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
569 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
570 child_nid = le32_to_cpu(rn->in.nid[i]);
571 if (child_nid == 0) {
572 child_nofs += NIDS_PER_BLOCK + 1;
576 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
577 if (ret == (NIDS_PER_BLOCK + 1)) {
578 set_nid(page, i, 0, false);
580 } else if (ret < 0 && ret != -ENOENT) {
588 /* remove current indirect node */
589 dn->node_page = page;
593 f2fs_put_page(page, 1);
598 f2fs_put_page(page, 1);
602 static int truncate_partial_nodes(struct dnode_of_data *dn,
603 struct f2fs_inode *ri, int *offset, int depth)
605 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
606 struct page *pages[2];
613 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
617 /* get indirect nodes in the path */
618 for (i = 0; i < depth - 1; i++) {
619 /* refernece count'll be increased */
620 pages[i] = get_node_page(sbi, nid[i]);
621 if (IS_ERR(pages[i])) {
623 err = PTR_ERR(pages[i]);
626 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
629 /* free direct nodes linked to a partial indirect node */
630 for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
631 child_nid = get_nid(pages[idx], i, false);
635 err = truncate_dnode(dn);
638 set_nid(pages[idx], i, 0, false);
641 if (offset[depth - 1] == 0) {
642 dn->node_page = pages[idx];
646 f2fs_put_page(pages[idx], 1);
649 offset[depth - 1] = 0;
651 for (i = depth - 3; i >= 0; i--)
652 f2fs_put_page(pages[i], 1);
657 * All the block addresses of data and nodes should be nullified.
659 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
661 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
662 int err = 0, cont = 1;
663 int level, offset[4], noffset[4];
664 unsigned int nofs = 0;
665 struct f2fs_node *rn;
666 struct dnode_of_data dn;
669 level = get_node_path(from, offset, noffset);
671 page = get_node_page(sbi, inode->i_ino);
673 return PTR_ERR(page);
675 set_new_dnode(&dn, inode, page, NULL, 0);
678 rn = page_address(page);
686 if (!offset[level - 1])
688 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
689 if (err < 0 && err != -ENOENT)
691 nofs += 1 + NIDS_PER_BLOCK;
694 nofs = 5 + 2 * NIDS_PER_BLOCK;
695 if (!offset[level - 1])
697 err = truncate_partial_nodes(&dn, &rn->i, offset, level);
698 if (err < 0 && err != -ENOENT)
707 dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
709 case NODE_DIR1_BLOCK:
710 case NODE_DIR2_BLOCK:
711 err = truncate_dnode(&dn);
714 case NODE_IND1_BLOCK:
715 case NODE_IND2_BLOCK:
716 err = truncate_nodes(&dn, nofs, offset[1], 2);
719 case NODE_DIND_BLOCK:
720 err = truncate_nodes(&dn, nofs, offset[1], 3);
727 if (err < 0 && err != -ENOENT)
729 if (offset[1] == 0 &&
730 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
732 wait_on_page_writeback(page);
733 rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
734 set_page_dirty(page);
742 f2fs_put_page(page, 0);
743 return err > 0 ? 0 : err;
747 * Caller should grab and release a mutex by calling mutex_lock_op() and
750 int remove_inode_page(struct inode *inode)
752 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
754 nid_t ino = inode->i_ino;
755 struct dnode_of_data dn;
757 page = get_node_page(sbi, ino);
759 return PTR_ERR(page);
761 if (F2FS_I(inode)->i_xattr_nid) {
762 nid_t nid = F2FS_I(inode)->i_xattr_nid;
763 struct page *npage = get_node_page(sbi, nid);
766 return PTR_ERR(npage);
768 F2FS_I(inode)->i_xattr_nid = 0;
769 set_new_dnode(&dn, inode, page, npage, nid);
770 dn.inode_page_locked = 1;
774 /* 0 is possible, after f2fs_new_inode() is failed */
775 BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
776 set_new_dnode(&dn, inode, page, page, ino);
781 int new_inode_page(struct inode *inode, const struct qstr *name)
784 struct dnode_of_data dn;
786 /* allocate inode page for new inode */
787 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
788 page = new_node_page(&dn, 0);
789 init_dent_inode(name, page);
791 return PTR_ERR(page);
792 f2fs_put_page(page, 1);
796 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
798 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
799 struct address_space *mapping = sbi->node_inode->i_mapping;
800 struct node_info old_ni, new_ni;
804 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
805 return ERR_PTR(-EPERM);
807 page = grab_cache_page(mapping, dn->nid);
809 return ERR_PTR(-ENOMEM);
811 get_node_info(sbi, dn->nid, &old_ni);
813 SetPageUptodate(page);
814 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
816 /* Reinitialize old_ni with new node page */
817 BUG_ON(old_ni.blk_addr != NULL_ADDR);
819 new_ni.ino = dn->inode->i_ino;
821 if (!inc_valid_node_count(sbi, dn->inode, 1)) {
825 set_node_addr(sbi, &new_ni, NEW_ADDR);
826 set_cold_node(dn->inode, page);
828 dn->node_page = page;
830 set_page_dirty(page);
832 inc_valid_inode_count(sbi);
837 clear_node_page_dirty(page);
838 f2fs_put_page(page, 1);
843 * Caller should do after getting the following values.
844 * 0: f2fs_put_page(page, 0)
845 * LOCKED_PAGE: f2fs_put_page(page, 1)
848 static int read_node_page(struct page *page, int type)
850 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
853 get_node_info(sbi, page->index, &ni);
855 if (ni.blk_addr == NULL_ADDR) {
856 f2fs_put_page(page, 1);
860 if (PageUptodate(page))
863 return f2fs_readpage(sbi, page, ni.blk_addr, type);
867 * Readahead a node page
869 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
871 struct address_space *mapping = sbi->node_inode->i_mapping;
875 apage = find_get_page(mapping, nid);
876 if (apage && PageUptodate(apage)) {
877 f2fs_put_page(apage, 0);
880 f2fs_put_page(apage, 0);
882 apage = grab_cache_page(mapping, nid);
886 err = read_node_page(apage, READA);
888 f2fs_put_page(apage, 0);
889 else if (err == LOCKED_PAGE)
890 f2fs_put_page(apage, 1);
894 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
896 struct address_space *mapping = sbi->node_inode->i_mapping;
900 page = grab_cache_page(mapping, nid);
902 return ERR_PTR(-ENOMEM);
904 err = read_node_page(page, READ_SYNC);
907 else if (err == LOCKED_PAGE)
911 if (!PageUptodate(page)) {
912 f2fs_put_page(page, 1);
913 return ERR_PTR(-EIO);
916 BUG_ON(nid != nid_of_node(page));
917 mark_page_accessed(page);
922 * Return a locked page for the desired node page.
923 * And, readahead MAX_RA_NODE number of node pages.
925 struct page *get_node_page_ra(struct page *parent, int start)
927 struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
928 struct address_space *mapping = sbi->node_inode->i_mapping;
933 /* First, try getting the desired direct node. */
934 nid = get_nid(parent, start, false);
936 return ERR_PTR(-ENOENT);
938 page = grab_cache_page(mapping, nid);
940 return ERR_PTR(-ENOMEM);
942 err = read_node_page(page, READ_SYNC);
945 else if (err == LOCKED_PAGE)
948 /* Then, try readahead for siblings of the desired node */
949 end = start + MAX_RA_NODE;
950 end = min(end, NIDS_PER_BLOCK);
951 for (i = start + 1; i < end; i++) {
952 nid = get_nid(parent, i, false);
955 ra_node_page(sbi, nid);
961 if (!PageUptodate(page)) {
962 f2fs_put_page(page, 1);
963 return ERR_PTR(-EIO);
965 mark_page_accessed(page);
969 void sync_inode_page(struct dnode_of_data *dn)
971 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
972 update_inode(dn->inode, dn->node_page);
973 } else if (dn->inode_page) {
974 if (!dn->inode_page_locked)
975 lock_page(dn->inode_page);
976 update_inode(dn->inode, dn->inode_page);
977 if (!dn->inode_page_locked)
978 unlock_page(dn->inode_page);
980 update_inode_page(dn->inode);
984 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
985 struct writeback_control *wbc)
987 struct address_space *mapping = sbi->node_inode->i_mapping;
990 int step = ino ? 2 : 0;
991 int nwritten = 0, wrote = 0;
993 pagevec_init(&pvec, 0);
999 while (index <= end) {
1001 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1002 PAGECACHE_TAG_DIRTY,
1003 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1007 for (i = 0; i < nr_pages; i++) {
1008 struct page *page = pvec.pages[i];
1011 * flushing sequence with step:
1016 if (step == 0 && IS_DNODE(page))
1018 if (step == 1 && (!IS_DNODE(page) ||
1019 is_cold_node(page)))
1021 if (step == 2 && (!IS_DNODE(page) ||
1022 !is_cold_node(page)))
1027 * we should not skip writing node pages.
1029 if (ino && ino_of_node(page) == ino)
1031 else if (!trylock_page(page))
1034 if (unlikely(page->mapping != mapping)) {
1039 if (ino && ino_of_node(page) != ino)
1040 goto continue_unlock;
1042 if (!PageDirty(page)) {
1043 /* someone wrote it for us */
1044 goto continue_unlock;
1047 if (!clear_page_dirty_for_io(page))
1048 goto continue_unlock;
1050 /* called by fsync() */
1051 if (ino && IS_DNODE(page)) {
1052 int mark = !is_checkpointed_node(sbi, ino);
1053 set_fsync_mark(page, 1);
1055 set_dentry_mark(page, mark);
1058 set_fsync_mark(page, 0);
1059 set_dentry_mark(page, 0);
1061 mapping->a_ops->writepage(page, wbc);
1064 if (--wbc->nr_to_write == 0)
1067 pagevec_release(&pvec);
1070 if (wbc->nr_to_write == 0) {
1082 f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1087 static int f2fs_write_node_page(struct page *page,
1088 struct writeback_control *wbc)
1090 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1093 struct node_info ni;
1095 wait_on_page_writeback(page);
1097 /* get old block addr of this node page */
1098 nid = nid_of_node(page);
1099 BUG_ON(page->index != nid);
1101 get_node_info(sbi, nid, &ni);
1103 /* This page is already truncated */
1104 if (ni.blk_addr == NULL_ADDR) {
1105 dec_page_count(sbi, F2FS_DIRTY_NODES);
1110 if (wbc->for_reclaim) {
1111 dec_page_count(sbi, F2FS_DIRTY_NODES);
1112 wbc->pages_skipped++;
1113 set_page_dirty(page);
1114 return AOP_WRITEPAGE_ACTIVATE;
1117 mutex_lock(&sbi->node_write);
1118 set_page_writeback(page);
1119 write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1120 set_node_addr(sbi, &ni, new_addr);
1121 dec_page_count(sbi, F2FS_DIRTY_NODES);
1122 mutex_unlock(&sbi->node_write);
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1132 #define COLLECT_DIRTY_NODES 512
1133 static int f2fs_write_node_pages(struct address_space *mapping,
1134 struct writeback_control *wbc)
1136 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1137 struct block_device *bdev = sbi->sb->s_bdev;
1138 long nr_to_write = wbc->nr_to_write;
1140 /* First check balancing cached NAT entries */
1141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1142 f2fs_sync_fs(sbi->sb, true);
1146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1150 /* if mounting is failed, skip writing node pages */
1151 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1152 sync_node_pages(sbi, 0, wbc);
1153 wbc->nr_to_write = nr_to_write -
1154 (bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1158 static int f2fs_set_node_page_dirty(struct page *page)
1160 struct address_space *mapping = page->mapping;
1161 struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1163 SetPageUptodate(page);
1164 if (!PageDirty(page)) {
1165 __set_page_dirty_nobuffers(page);
1166 inc_page_count(sbi, F2FS_DIRTY_NODES);
1167 SetPagePrivate(page);
1173 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1175 struct inode *inode = page->mapping->host;
1176 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1177 if (PageDirty(page))
1178 dec_page_count(sbi, F2FS_DIRTY_NODES);
1179 ClearPagePrivate(page);
1182 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1184 ClearPagePrivate(page);
1189 * Structure of the f2fs node operations
1191 const struct address_space_operations f2fs_node_aops = {
1192 .writepage = f2fs_write_node_page,
1193 .writepages = f2fs_write_node_pages,
1194 .set_page_dirty = f2fs_set_node_page_dirty,
1195 .invalidatepage = f2fs_invalidate_node_page,
1196 .releasepage = f2fs_release_node_page,
1199 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1201 struct list_head *this;
1203 list_for_each(this, head) {
1204 i = list_entry(this, struct free_nid, list);
1211 static void __del_from_free_nid_list(struct free_nid *i)
1214 kmem_cache_free(free_nid_slab, i);
1217 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1221 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1224 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1232 spin_lock(&nm_i->free_nid_list_lock);
1233 if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1234 spin_unlock(&nm_i->free_nid_list_lock);
1235 kmem_cache_free(free_nid_slab, i);
1238 list_add_tail(&i->list, &nm_i->free_nid_list);
1240 spin_unlock(&nm_i->free_nid_list_lock);
1244 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1247 spin_lock(&nm_i->free_nid_list_lock);
1248 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1249 if (i && i->state == NID_NEW) {
1250 __del_from_free_nid_list(i);
1253 spin_unlock(&nm_i->free_nid_list_lock);
1256 static int scan_nat_page(struct f2fs_nm_info *nm_i,
1257 struct page *nat_page, nid_t start_nid)
1259 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1264 /* 0 nid should not be used */
1268 i = start_nid % NAT_ENTRY_PER_BLOCK;
1270 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1271 if (start_nid >= nm_i->max_nid)
1273 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1274 BUG_ON(blk_addr == NEW_ADDR);
1275 if (blk_addr == NULL_ADDR)
1276 fcnt += add_free_nid(nm_i, start_nid);
1281 static void build_free_nids(struct f2fs_sb_info *sbi)
1283 struct free_nid *fnid, *next_fnid;
1284 struct f2fs_nm_info *nm_i = NM_I(sbi);
1285 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1286 struct f2fs_summary_block *sum = curseg->sum_blk;
1288 bool is_cycled = false;
1292 nid = nm_i->next_scan_nid;
1293 nm_i->init_scan_nid = nid;
1295 ra_nat_pages(sbi, nid);
1298 struct page *page = get_current_nat_page(sbi, nid);
1300 fcnt += scan_nat_page(nm_i, page, nid);
1301 f2fs_put_page(page, 1);
1303 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1305 if (nid >= nm_i->max_nid) {
1309 if (fcnt > MAX_FREE_NIDS)
1311 if (is_cycled && nm_i->init_scan_nid <= nid)
1315 /* go to the next nat page in order to reuse free nids first */
1316 nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK;
1318 /* find free nids from current sum_pages */
1319 mutex_lock(&curseg->curseg_mutex);
1320 for (i = 0; i < nats_in_cursum(sum); i++) {
1321 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1322 nid = le32_to_cpu(nid_in_journal(sum, i));
1323 if (addr == NULL_ADDR)
1324 add_free_nid(nm_i, nid);
1326 remove_free_nid(nm_i, nid);
1328 mutex_unlock(&curseg->curseg_mutex);
1330 /* remove the free nids from current allocated nids */
1331 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1332 struct nat_entry *ne;
1334 read_lock(&nm_i->nat_tree_lock);
1335 ne = __lookup_nat_cache(nm_i, fnid->nid);
1336 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1337 remove_free_nid(nm_i, fnid->nid);
1338 read_unlock(&nm_i->nat_tree_lock);
1343 * If this function returns success, caller can obtain a new nid
1344 * from second parameter of this function.
1345 * The returned nid could be used ino as well as nid when inode is created.
1347 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1349 struct f2fs_nm_info *nm_i = NM_I(sbi);
1350 struct free_nid *i = NULL;
1351 struct list_head *this;
1353 mutex_lock(&nm_i->build_lock);
1355 /* scan NAT in order to build free nid list */
1356 build_free_nids(sbi);
1358 mutex_unlock(&nm_i->build_lock);
1362 mutex_unlock(&nm_i->build_lock);
1365 * We check fcnt again since previous check is racy as
1366 * we didn't hold free_nid_list_lock. So other thread
1367 * could consume all of free nids.
1369 spin_lock(&nm_i->free_nid_list_lock);
1371 spin_unlock(&nm_i->free_nid_list_lock);
1375 BUG_ON(list_empty(&nm_i->free_nid_list));
1376 list_for_each(this, &nm_i->free_nid_list) {
1377 i = list_entry(this, struct free_nid, list);
1378 if (i->state == NID_NEW)
1382 BUG_ON(i->state != NID_NEW);
1384 i->state = NID_ALLOC;
1386 spin_unlock(&nm_i->free_nid_list_lock);
1391 * alloc_nid() should be called prior to this function.
1393 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1395 struct f2fs_nm_info *nm_i = NM_I(sbi);
1398 spin_lock(&nm_i->free_nid_list_lock);
1399 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1400 BUG_ON(!i || i->state != NID_ALLOC);
1401 __del_from_free_nid_list(i);
1402 spin_unlock(&nm_i->free_nid_list_lock);
1406 * alloc_nid() should be called prior to this function.
1408 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1410 struct f2fs_nm_info *nm_i = NM_I(sbi);
1413 spin_lock(&nm_i->free_nid_list_lock);
1414 i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1415 BUG_ON(!i || i->state != NID_ALLOC);
1418 spin_unlock(&nm_i->free_nid_list_lock);
1421 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1422 struct f2fs_summary *sum, struct node_info *ni,
1423 block_t new_blkaddr)
1425 rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1426 set_node_addr(sbi, ni, new_blkaddr);
1427 clear_node_page_dirty(page);
1430 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1432 struct address_space *mapping = sbi->node_inode->i_mapping;
1433 struct f2fs_node *src, *dst;
1434 nid_t ino = ino_of_node(page);
1435 struct node_info old_ni, new_ni;
1438 ipage = grab_cache_page(mapping, ino);
1442 /* Should not use this inode from free nid list */
1443 remove_free_nid(NM_I(sbi), ino);
1445 get_node_info(sbi, ino, &old_ni);
1446 SetPageUptodate(ipage);
1447 fill_node_footer(ipage, ino, ino, 0, true);
1449 src = (struct f2fs_node *)page_address(page);
1450 dst = (struct f2fs_node *)page_address(ipage);
1452 memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1454 dst->i.i_blocks = cpu_to_le64(1);
1455 dst->i.i_links = cpu_to_le32(1);
1456 dst->i.i_xattr_nid = 0;
1461 set_node_addr(sbi, &new_ni, NEW_ADDR);
1462 inc_valid_inode_count(sbi);
1464 f2fs_put_page(ipage, 1);
1468 int restore_node_summary(struct f2fs_sb_info *sbi,
1469 unsigned int segno, struct f2fs_summary_block *sum)
1471 struct f2fs_node *rn;
1472 struct f2fs_summary *sum_entry;
1477 /* alloc temporal page for read node */
1478 page = alloc_page(GFP_NOFS | __GFP_ZERO);
1480 return PTR_ERR(page);
1483 /* scan the node segment */
1484 last_offset = sbi->blocks_per_seg;
1485 addr = START_BLOCK(sbi, segno);
1486 sum_entry = &sum->entries[0];
1488 for (i = 0; i < last_offset; i++, sum_entry++) {
1490 * In order to read next node page,
1491 * we must clear PageUptodate flag.
1493 ClearPageUptodate(page);
1495 if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1499 rn = (struct f2fs_node *)page_address(page);
1500 sum_entry->nid = rn->footer.nid;
1501 sum_entry->version = 0;
1502 sum_entry->ofs_in_node = 0;
1507 __free_pages(page, 0);
1511 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1513 struct f2fs_nm_info *nm_i = NM_I(sbi);
1514 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1515 struct f2fs_summary_block *sum = curseg->sum_blk;
1518 mutex_lock(&curseg->curseg_mutex);
1520 if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1521 mutex_unlock(&curseg->curseg_mutex);
1525 for (i = 0; i < nats_in_cursum(sum); i++) {
1526 struct nat_entry *ne;
1527 struct f2fs_nat_entry raw_ne;
1528 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1530 raw_ne = nat_in_journal(sum, i);
1532 write_lock(&nm_i->nat_tree_lock);
1533 ne = __lookup_nat_cache(nm_i, nid);
1535 __set_nat_cache_dirty(nm_i, ne);
1536 write_unlock(&nm_i->nat_tree_lock);
1539 ne = grab_nat_entry(nm_i, nid);
1541 write_unlock(&nm_i->nat_tree_lock);
1544 nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1545 nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1546 nat_set_version(ne, raw_ne.version);
1547 __set_nat_cache_dirty(nm_i, ne);
1548 write_unlock(&nm_i->nat_tree_lock);
1550 update_nats_in_cursum(sum, -i);
1551 mutex_unlock(&curseg->curseg_mutex);
1556 * This function is called during the checkpointing process.
1558 void flush_nat_entries(struct f2fs_sb_info *sbi)
1560 struct f2fs_nm_info *nm_i = NM_I(sbi);
1561 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1562 struct f2fs_summary_block *sum = curseg->sum_blk;
1563 struct list_head *cur, *n;
1564 struct page *page = NULL;
1565 struct f2fs_nat_block *nat_blk = NULL;
1566 nid_t start_nid = 0, end_nid = 0;
1569 flushed = flush_nats_in_journal(sbi);
1572 mutex_lock(&curseg->curseg_mutex);
1574 /* 1) flush dirty nat caches */
1575 list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1576 struct nat_entry *ne;
1578 struct f2fs_nat_entry raw_ne;
1580 block_t new_blkaddr;
1582 ne = list_entry(cur, struct nat_entry, list);
1583 nid = nat_get_nid(ne);
1585 if (nat_get_blkaddr(ne) == NEW_ADDR)
1590 /* if there is room for nat enries in curseg->sumpage */
1591 offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1593 raw_ne = nat_in_journal(sum, offset);
1597 if (!page || (start_nid > nid || nid > end_nid)) {
1599 f2fs_put_page(page, 1);
1602 start_nid = START_NID(nid);
1603 end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1606 * get nat block with dirty flag, increased reference
1607 * count, mapped and lock
1609 page = get_next_nat_page(sbi, start_nid);
1610 nat_blk = page_address(page);
1614 raw_ne = nat_blk->entries[nid - start_nid];
1616 new_blkaddr = nat_get_blkaddr(ne);
1618 raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1619 raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1620 raw_ne.version = nat_get_version(ne);
1623 nat_blk->entries[nid - start_nid] = raw_ne;
1625 nat_in_journal(sum, offset) = raw_ne;
1626 nid_in_journal(sum, offset) = cpu_to_le32(nid);
1629 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1630 !add_free_nid(NM_I(sbi), nid)) {
1631 write_lock(&nm_i->nat_tree_lock);
1632 __del_from_nat_cache(nm_i, ne);
1633 write_unlock(&nm_i->nat_tree_lock);
1635 write_lock(&nm_i->nat_tree_lock);
1636 __clear_nat_cache_dirty(nm_i, ne);
1637 ne->checkpointed = true;
1638 write_unlock(&nm_i->nat_tree_lock);
1642 mutex_unlock(&curseg->curseg_mutex);
1643 f2fs_put_page(page, 1);
1645 /* 2) shrink nat caches if necessary */
1646 try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1649 static int init_node_manager(struct f2fs_sb_info *sbi)
1651 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1652 struct f2fs_nm_info *nm_i = NM_I(sbi);
1653 unsigned char *version_bitmap;
1654 unsigned int nat_segs, nat_blocks;
1656 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1658 /* segment_count_nat includes pair segment so divide to 2. */
1659 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1660 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1661 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1665 INIT_LIST_HEAD(&nm_i->free_nid_list);
1666 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1667 INIT_LIST_HEAD(&nm_i->nat_entries);
1668 INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1670 mutex_init(&nm_i->build_lock);
1671 spin_lock_init(&nm_i->free_nid_list_lock);
1672 rwlock_init(&nm_i->nat_tree_lock);
1674 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1675 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1676 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1677 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1678 if (!version_bitmap)
1681 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1683 if (!nm_i->nat_bitmap)
1688 int build_node_manager(struct f2fs_sb_info *sbi)
1692 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1696 err = init_node_manager(sbi);
1700 build_free_nids(sbi);
1704 void destroy_node_manager(struct f2fs_sb_info *sbi)
1706 struct f2fs_nm_info *nm_i = NM_I(sbi);
1707 struct free_nid *i, *next_i;
1708 struct nat_entry *natvec[NATVEC_SIZE];
1715 /* destroy free nid list */
1716 spin_lock(&nm_i->free_nid_list_lock);
1717 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1718 BUG_ON(i->state == NID_ALLOC);
1719 __del_from_free_nid_list(i);
1723 spin_unlock(&nm_i->free_nid_list_lock);
1725 /* destroy nat cache */
1726 write_lock(&nm_i->nat_tree_lock);
1727 while ((found = __gang_lookup_nat_cache(nm_i,
1728 nid, NATVEC_SIZE, natvec))) {
1730 for (idx = 0; idx < found; idx++) {
1731 struct nat_entry *e = natvec[idx];
1732 nid = nat_get_nid(e) + 1;
1733 __del_from_nat_cache(nm_i, e);
1736 BUG_ON(nm_i->nat_cnt);
1737 write_unlock(&nm_i->nat_tree_lock);
1739 kfree(nm_i->nat_bitmap);
1740 sbi->nm_info = NULL;
1744 int __init create_node_manager_caches(void)
1746 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1747 sizeof(struct nat_entry), NULL);
1748 if (!nat_entry_slab)
1751 free_nid_slab = f2fs_kmem_cache_create("free_nid",
1752 sizeof(struct free_nid), NULL);
1753 if (!free_nid_slab) {
1754 kmem_cache_destroy(nat_entry_slab);
1760 void destroy_node_manager_caches(void)
1762 kmem_cache_destroy(free_nid_slab);
1763 kmem_cache_destroy(nat_entry_slab);