ext4: convert ext4_bread() to use the ERR_PTR convention
[firefly-linux-kernel-4.4.55.git] / fs / ext4 / inode.c
index 367a60c07cf034e6983d8a51cf0eb321394b6209..8aa241a000c5cb7cb0e88780e2041df5a32d78e6 100644 (file)
@@ -734,11 +734,11 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
  * `handle' can be NULL if create is zero
  */
 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t block, int create, int *errp)
+                               ext4_lblk_t block, int create)
 {
        struct ext4_map_blocks map;
        struct buffer_head *bh;
-       int fatal = 0, err;
+       int err;
 
        J_ASSERT(handle != NULL || create == 0);
 
@@ -747,21 +747,14 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
        err = ext4_map_blocks(handle, inode, &map,
                              create ? EXT4_GET_BLOCKS_CREATE : 0);
 
-       /* ensure we send some value back into *errp */
-       *errp = 0;
-
-       if (create && err == 0)
-               err = -ENOSPC;  /* should never happen */
+       if (err == 0)
+               return create ? ERR_PTR(-ENOSPC) : NULL;
        if (err < 0)
-               *errp = err;
-       if (err <= 0)
-               return NULL;
+               return ERR_PTR(err);
 
        bh = sb_getblk(inode->i_sb, map.m_pblk);
-       if (unlikely(!bh)) {
-               *errp = -ENOMEM;
-               return NULL;
-       }
+       if (unlikely(!bh))
+               return ERR_PTR(-ENOMEM);
        if (map.m_flags & EXT4_MAP_NEW) {
                J_ASSERT(create != 0);
                J_ASSERT(handle != NULL);
@@ -775,44 +768,44 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
                 */
                lock_buffer(bh);
                BUFFER_TRACE(bh, "call get_create_access");
-               fatal = ext4_journal_get_create_access(handle, bh);
-               if (!fatal && !buffer_uptodate(bh)) {
+               err = ext4_journal_get_create_access(handle, bh);
+               if (unlikely(err)) {
+                       unlock_buffer(bh);
+                       goto errout;
+               }
+               if (!buffer_uptodate(bh)) {
                        memset(bh->b_data, 0, inode->i_sb->s_blocksize);
                        set_buffer_uptodate(bh);
                }
                unlock_buffer(bh);
                BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
                err = ext4_handle_dirty_metadata(handle, inode, bh);
-               if (!fatal)
-                       fatal = err;
-       } else {
+               if (unlikely(err))
+                       goto errout;
+       } else
                BUFFER_TRACE(bh, "not a new buffer");
-       }
-       if (fatal) {
-               *errp = fatal;
-               brelse(bh);
-               bh = NULL;
-       }
        return bh;
+errout:
+       brelse(bh);
+       return ERR_PTR(err);
 }
 
 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
-                              ext4_lblk_t block, int create, int *err)
+                              ext4_lblk_t block, int create)
 {
        struct buffer_head *bh;
 
-       bh = ext4_getblk(handle, inode, block, create, err);
-       if (!bh)
+       bh = ext4_getblk(handle, inode, block, create);
+       if (IS_ERR(bh))
                return bh;
-       if (buffer_uptodate(bh))
+       if (!bh || buffer_uptodate(bh))
                return bh;
        ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
        put_bh(bh);
-       *err = -EIO;
-       return NULL;
+       return ERR_PTR(-EIO);
 }
 
 int ext4_walk_page_buffers(handle_t *handle,
@@ -1055,27 +1048,11 @@ static int ext4_write_end(struct file *file,
        } else
                copied = block_write_end(file, mapping, pos,
                                         len, copied, page, fsdata);
-
        /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hole i_mutex.
-        *
-        * But it's important to update i_size while still holding page lock:
+        * it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos + copied > inode->i_size) {
-               i_size_write(inode, pos + copied);
-               i_size_changed = 1;
-       }
-
-       if (pos + copied > EXT4_I(inode)->i_disksize) {
-               /* We need to mark inode dirty even if
-                * new_i_size is less that inode->i_size
-                * but greater than i_disksize. (hint delalloc)
-                */
-               ext4_update_i_disksize(inode, (pos + copied));
-               i_size_changed = 1;
-       }
+       i_size_changed = ext4_update_inode_size(inode, pos + copied);
        unlock_page(page);
        page_cache_release(page);
 
@@ -1123,7 +1100,7 @@ static int ext4_journalled_write_end(struct file *file,
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
-       loff_t new_i_size;
+       int size_changed = 0;
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1146,20 +1123,18 @@ static int ext4_journalled_write_end(struct file *file,
                if (!partial)
                        SetPageUptodate(page);
        }
-       new_i_size = pos + copied;
-       if (new_i_size > inode->i_size)
-               i_size_write(inode, pos+copied);
+       size_changed = ext4_update_inode_size(inode, pos + copied);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
        EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, new_i_size);
+       unlock_page(page);
+       page_cache_release(page);
+
+       if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
        }
 
-       unlock_page(page);
-       page_cache_release(page);
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
                /* if we have allocated more blocks and copied
                 * less. We will have blocks allocated outside
@@ -2095,6 +2070,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
        struct ext4_map_blocks *map = &mpd->map;
        int err;
        loff_t disksize;
+       int progress = 0;
 
        mpd->io_submit.io_end->offset =
                                ((loff_t)map->m_lblk) << inode->i_blkbits;
@@ -2111,8 +2087,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                         * is non-zero, a commit should free up blocks.
                         */
                        if ((err == -ENOMEM) ||
-                           (err == -ENOSPC && ext4_count_free_clusters(sb)))
+                           (err == -ENOSPC && ext4_count_free_clusters(sb))) {
+                               if (progress)
+                                       goto update_disksize;
                                return err;
+                       }
                        ext4_msg(sb, KERN_CRIT,
                                 "Delayed block allocation failed for "
                                 "inode %lu at logical offset %llu with"
@@ -2129,15 +2108,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        *give_up_on_write = true;
                        return err;
                }
+               progress = 1;
                /*
                 * Update buffer state, submit mapped pages, and get us new
                 * extent to map
                 */
                err = mpage_map_and_submit_buffers(mpd);
                if (err < 0)
-                       return err;
+                       goto update_disksize;
        } while (map->m_len);
 
+update_disksize:
        /*
         * Update on-disk size after IO is submitted.  Races with
         * truncate are avoided by checking i_size under i_data_sem.