ext4: collapse ext4_convert_initialized_extents()
[firefly-linux-kernel-4.4.55.git] / fs / ext4 / inode.c
index 8a064734e6eb3ed06461e9954d036da6ff1e8147..cc95dca5cb8a2f3f19532d78eac8975c47224273 100644 (file)
@@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
 }
 #endif
 
-/*
- * Calculate the number of metadata blocks need to reserve
- * to allocate a block located at @lblock
- */
-static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
-{
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               return ext4_ext_calc_metadata_amount(inode, lblock);
-
-       return ext4_ind_calc_metadata_amount(inode, lblock);
-}
-
 /*
  * Called with i_data_sem down, which is important since we can call
  * ext4_discard_preallocations() from here.
@@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
                used = ei->i_reserved_data_blocks;
        }
 
-       if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
-               ext4_warning(inode->i_sb, "ino %lu, allocated %d "
-                       "with only %d reserved metadata blocks "
-                       "(releasing %d blocks with reserved %d data blocks)",
-                       inode->i_ino, ei->i_allocated_meta_blocks,
-                            ei->i_reserved_meta_blocks, used,
-                            ei->i_reserved_data_blocks);
-               WARN_ON(1);
-               ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
-       }
-
        /* Update per-inode reservations */
        ei->i_reserved_data_blocks -= used;
-       ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
-       percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-                          used + ei->i_allocated_meta_blocks);
-       ei->i_allocated_meta_blocks = 0;
+       percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 
-       if (ei->i_reserved_data_blocks == 0) {
-               /*
-                * We can release all of the reserved metadata blocks
-                * only when we have written all of the delayed
-                * allocation blocks.
-                */
-               percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-                                  ei->i_reserved_meta_blocks);
-               ei->i_reserved_meta_blocks = 0;
-               ei->i_da_metadata_calc_len = 0;
-       }
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
        /* Update quota subsystem for data blocks */
@@ -771,11 +734,11 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
  * `handle' can be NULL if create is zero
  */
 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t block, int create, int *errp)
+                               ext4_lblk_t block, int create)
 {
        struct ext4_map_blocks map;
        struct buffer_head *bh;
-       int fatal = 0, err;
+       int err;
 
        J_ASSERT(handle != NULL || create == 0);
 
@@ -784,21 +747,14 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
        err = ext4_map_blocks(handle, inode, &map,
                              create ? EXT4_GET_BLOCKS_CREATE : 0);
 
-       /* ensure we send some value back into *errp */
-       *errp = 0;
-
-       if (create && err == 0)
-               err = -ENOSPC;  /* should never happen */
+       if (err == 0)
+               return create ? ERR_PTR(-ENOSPC) : NULL;
        if (err < 0)
-               *errp = err;
-       if (err <= 0)
-               return NULL;
+               return ERR_PTR(err);
 
        bh = sb_getblk(inode->i_sb, map.m_pblk);
-       if (unlikely(!bh)) {
-               *errp = -ENOMEM;
-               return NULL;
-       }
+       if (unlikely(!bh))
+               return ERR_PTR(-ENOMEM);
        if (map.m_flags & EXT4_MAP_NEW) {
                J_ASSERT(create != 0);
                J_ASSERT(handle != NULL);
@@ -812,44 +768,44 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
                 */
                lock_buffer(bh);
                BUFFER_TRACE(bh, "call get_create_access");
-               fatal = ext4_journal_get_create_access(handle, bh);
-               if (!fatal && !buffer_uptodate(bh)) {
+               err = ext4_journal_get_create_access(handle, bh);
+               if (unlikely(err)) {
+                       unlock_buffer(bh);
+                       goto errout;
+               }
+               if (!buffer_uptodate(bh)) {
                        memset(bh->b_data, 0, inode->i_sb->s_blocksize);
                        set_buffer_uptodate(bh);
                }
                unlock_buffer(bh);
                BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
                err = ext4_handle_dirty_metadata(handle, inode, bh);
-               if (!fatal)
-                       fatal = err;
-       } else {
+               if (unlikely(err))
+                       goto errout;
+       } else
                BUFFER_TRACE(bh, "not a new buffer");
-       }
-       if (fatal) {
-               *errp = fatal;
-               brelse(bh);
-               bh = NULL;
-       }
        return bh;
+errout:
+       brelse(bh);
+       return ERR_PTR(err);
 }
 
 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
-                              ext4_lblk_t block, int create, int *err)
+                              ext4_lblk_t block, int create)
 {
        struct buffer_head *bh;
 
-       bh = ext4_getblk(handle, inode, block, create, err);
-       if (!bh)
+       bh = ext4_getblk(handle, inode, block, create);
+       if (IS_ERR(bh))
                return bh;
-       if (buffer_uptodate(bh))
+       if (!bh || buffer_uptodate(bh))
                return bh;
        ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
        put_bh(bh);
-       *err = -EIO;
-       return NULL;
+       return ERR_PTR(-EIO);
 }
 
 int ext4_walk_page_buffers(handle_t *handle,
@@ -1092,27 +1048,11 @@ static int ext4_write_end(struct file *file,
        } else
                copied = block_write_end(file, mapping, pos,
                                         len, copied, page, fsdata);
-
        /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hole i_mutex.
-        *
-        * But it's important to update i_size while still holding page lock:
+        * it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos + copied > inode->i_size) {
-               i_size_write(inode, pos + copied);
-               i_size_changed = 1;
-       }
-
-       if (pos + copied > EXT4_I(inode)->i_disksize) {
-               /* We need to mark inode dirty even if
-                * new_i_size is less that inode->i_size
-                * but greater than i_disksize. (hint delalloc)
-                */
-               ext4_update_i_disksize(inode, (pos + copied));
-               i_size_changed = 1;
-       }
+       i_size_changed = ext4_update_inode_size(inode, pos + copied);
        unlock_page(page);
        page_cache_release(page);
 
@@ -1160,7 +1100,7 @@ static int ext4_journalled_write_end(struct file *file,
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
-       loff_t new_i_size;
+       int size_changed = 0;
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1183,20 +1123,18 @@ static int ext4_journalled_write_end(struct file *file,
                if (!partial)
                        SetPageUptodate(page);
        }
-       new_i_size = pos + copied;
-       if (new_i_size > inode->i_size)
-               i_size_write(inode, pos+copied);
+       size_changed = ext4_update_inode_size(inode, pos + copied);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
        EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, new_i_size);
+       unlock_page(page);
+       page_cache_release(page);
+
+       if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
        }
 
-       unlock_page(page);
-       page_cache_release(page);
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
                /* if we have allocated more blocks and copied
                 * less. We will have blocks allocated outside
@@ -1221,49 +1159,6 @@ static int ext4_journalled_write_end(struct file *file,
        return ret ? ret : copied;
 }
 
-/*
- * Reserve a metadata for a single block located at lblock
- */
-static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
-{
-       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct ext4_inode_info *ei = EXT4_I(inode);
-       unsigned int md_needed;
-       ext4_lblk_t save_last_lblock;
-       int save_len;
-
-       /*
-        * recalculate the amount of metadata blocks to reserve
-        * in order to allocate nrblocks
-        * worse case is one extent per block
-        */
-       spin_lock(&ei->i_block_reservation_lock);
-       /*
-        * ext4_calc_metadata_amount() has side effects, which we have
-        * to be prepared undo if we fail to claim space.
-        */
-       save_len = ei->i_da_metadata_calc_len;
-       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
-       md_needed = EXT4_NUM_B2C(sbi,
-                                ext4_calc_metadata_amount(inode, lblock));
-       trace_ext4_da_reserve_space(inode, md_needed);
-
-       /*
-        * We do still charge estimated metadata to the sb though;
-        * we cannot afford to run out of free blocks.
-        */
-       if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
-               ei->i_da_metadata_calc_len = save_len;
-               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
-               spin_unlock(&ei->i_block_reservation_lock);
-               return -ENOSPC;
-       }
-       ei->i_reserved_meta_blocks += md_needed;
-       spin_unlock(&ei->i_block_reservation_lock);
-
-       return 0;       /* success */
-}
-
 /*
  * Reserve a single cluster located at lblock
  */
@@ -1273,8 +1168,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
        int ret;
-       ext4_lblk_t save_last_lblock;
-       int save_len;
 
        /*
         * We will charge metadata quota at writeout time; this saves
@@ -1295,25 +1188,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
         * ext4_calc_metadata_amount() has side effects, which we have
         * to be prepared undo if we fail to claim space.
         */
-       save_len = ei->i_da_metadata_calc_len;
-       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
-       md_needed = EXT4_NUM_B2C(sbi,
-                                ext4_calc_metadata_amount(inode, lblock));
-       trace_ext4_da_reserve_space(inode, md_needed);
+       md_needed = 0;
+       trace_ext4_da_reserve_space(inode, 0);
 
-       /*
-        * We do still charge estimated metadata to the sb though;
-        * we cannot afford to run out of free blocks.
-        */
-       if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
-               ei->i_da_metadata_calc_len = save_len;
-               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+       if (ext4_claim_free_clusters(sbi, 1, 0)) {
                spin_unlock(&ei->i_block_reservation_lock);
                dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
        ei->i_reserved_data_blocks++;
-       ei->i_reserved_meta_blocks += md_needed;
        spin_unlock(&ei->i_block_reservation_lock);
 
        return 0;       /* success */
@@ -1346,20 +1229,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
        }
        ei->i_reserved_data_blocks -= to_free;
 
-       if (ei->i_reserved_data_blocks == 0) {
-               /*
-                * We can release all of the reserved metadata blocks
-                * only when we have written all of the delayed
-                * allocation blocks.
-                * Note that in case of bigalloc, i_reserved_meta_blocks,
-                * i_reserved_data_blocks, etc. refer to number of clusters.
-                */
-               percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-                                  ei->i_reserved_meta_blocks);
-               ei->i_reserved_meta_blocks = 0;
-               ei->i_da_metadata_calc_len = 0;
-       }
-
        /* update fs dirty data blocks counter */
        percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
 
@@ -1500,10 +1369,6 @@ static void ext4_print_free_blocks(struct inode *inode)
        ext4_msg(sb, KERN_CRIT, "Block reservation details");
        ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
                 ei->i_reserved_data_blocks);
-       ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
-              ei->i_reserved_meta_blocks);
-       ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
-              ei->i_allocated_meta_blocks);
        return;
 }
 
@@ -1620,13 +1485,6 @@ add_delayed:
                                retval = ret;
                                goto out_unlock;
                        }
-               } else {
-                       ret = ext4_da_reserve_metadata(inode, iblock);
-                       if (ret) {
-                               /* not enough space to reserve */
-                               retval = ret;
-                               goto out_unlock;
-                       }
                }
 
                ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2212,6 +2070,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
        struct ext4_map_blocks *map = &mpd->map;
        int err;
        loff_t disksize;
+       int progress = 0;
 
        mpd->io_submit.io_end->offset =
                                ((loff_t)map->m_lblk) << inode->i_blkbits;
@@ -2228,8 +2087,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                         * is non-zero, a commit should free up blocks.
                         */
                        if ((err == -ENOMEM) ||
-                           (err == -ENOSPC && ext4_count_free_clusters(sb)))
+                           (err == -ENOSPC && ext4_count_free_clusters(sb))) {
+                               if (progress)
+                                       goto update_disksize;
                                return err;
+                       }
                        ext4_msg(sb, KERN_CRIT,
                                 "Delayed block allocation failed for "
                                 "inode %lu at logical offset %llu with"
@@ -2246,15 +2108,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        *give_up_on_write = true;
                        return err;
                }
+               progress = 1;
                /*
                 * Update buffer state, submit mapped pages, and get us new
                 * extent to map
                 */
                err = mpage_map_and_submit_buffers(mpd);
                if (err < 0)
-                       return err;
+                       goto update_disksize;
        } while (map->m_len);
 
+update_disksize:
        /*
         * Update on-disk size after IO is submitted.  Races with
         * truncate are avoided by checking i_size under i_data_sem.
@@ -2787,10 +2651,7 @@ static int ext4_da_write_end(struct file *file,
        if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_has_inline_data(inode) ||
                    ext4_da_should_update_i_disksize(page, end)) {
-                       down_write(&EXT4_I(inode)->i_data_sem);
-                       if (new_i_size > EXT4_I(inode)->i_disksize)
-                               EXT4_I(inode)->i_disksize = new_i_size;
-                       up_write(&EXT4_I(inode)->i_data_sem);
+                       ext4_update_i_disksize(inode, new_i_size);
                        /* We need to mark inode dirty even if
                         * new_i_size is less that inode->i_size
                         * bu greater than i_disksize.(hint delalloc)
@@ -2843,8 +2704,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
 {
        trace_ext4_alloc_da_blocks(inode);
 
-       if (!EXT4_I(inode)->i_reserved_data_blocks &&
-           !EXT4_I(inode)->i_reserved_meta_blocks)
+       if (!EXT4_I(inode)->i_reserved_data_blocks)
                return 0;
 
        /*
@@ -3624,7 +3484,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                ret = ext4_ext_remove_space(inode, first_block,
                                            stop_block - 1);
        else
-               ret = ext4_free_hole_blocks(handle, inode, first_block,
+               ret = ext4_ind_remove_space(handle, inode, first_block,
                                            stop_block);
 
        up_write(&EXT4_I(inode)->i_data_sem);