ext4: quiet sparse noise about plain integer as NULL pointer
[firefly-linux-kernel-4.4.55.git] / fs / ext4 / extents.c
index c4e0058645345f1b1655431fc28826f31006ffe6..2fc6cc0a51ca71b2b199accb993405f270bea508 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/uaccess.h>
 #include <linux/fiemap.h>
 #include "ext4_jbd2.h"
-#include "ext4_extents.h"
 
 #include <trace/events/ext4.h>
 
@@ -780,6 +779,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
                ix = curp->p_idx;
        }
 
+       if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+               EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+               return -EIO;
+       }
+
        ix->ei_block = cpu_to_le32(logical);
        ext4_idx_store_pblock(ix, ptr);
        le16_add_cpu(&curp->p_hdr->eh_entries, 1);
@@ -1239,9 +1243,9 @@ static int ext4_ext_search_left(struct inode *inode,
                        if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
                                EXT4_ERROR_INODE(inode,
                                  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
-                                 ix != NULL ? ix->ei_block : 0,
+                                 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
                                  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
-                                   EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
+               le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
                                  depth);
                                return -EIO;
                        }
@@ -1264,7 +1268,7 @@ static int ext4_ext_search_left(struct inode *inode,
 /*
  * search the closest allocated block to the right for *logical
  * and returns it at @logical + it's physical address at @phys
- * if *logical is the smallest allocated block, the function
+ * if *logical is the largest allocated block, the function
  * returns 0 at @phys
  * return value contains 0 (success) or error code
  */
@@ -1969,6 +1973,7 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
        struct ext4_ext_cache *cex;
        BUG_ON(len == 0);
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       trace_ext4_ext_put_in_cache(inode, block, len, start);
        cex = &EXT4_I(inode)->i_cached_extent;
        cex->ec_block = block;
        cex->ec_len = len;
@@ -2070,6 +2075,7 @@ errout:
                sbi->extent_cache_misses++;
        else
                sbi->extent_cache_hits++;
+       trace_ext4_ext_in_cache(inode, block, ret);
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
        return ret;
 }
@@ -2137,6 +2143,8 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
        if (err)
                return err;
        ext_debug("index is empty, remove it, free block %llu\n", leaf);
+       trace_ext4_ext_rm_idx(inode, leaf);
+
        ext4_free_blocks(handle, inode, NULL, leaf, 1,
                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
        return err;
@@ -2165,7 +2173,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
                         *  need to account for leaf block credit
                         *
                         *  bitmaps and block group descriptor blocks
-                        *  and other metadat blocks still need to be
+                        *  and other metadata blocks still need to be
                         *  accounted.
                         */
                        /* 1 bitmap, 1 block group descriptor */
@@ -2222,6 +2230,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
         */
        flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
 
+       trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
        /*
         * If we have a partial cluster, and it's different from the
         * cluster of the last block, we need to explicitly free the
@@ -2282,7 +2291,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
                start = ext4_ext_pblock(ex);
 
                ext_debug("free first %u blocks starting %llu\n", num, start);
-               ext4_free_blocks(handle, inode, 0, start, num, flags);
+               ext4_free_blocks(handle, inode, NULL, start, num, flags);
 
        } else {
                printk(KERN_INFO "strange request: removal(2) "
@@ -2336,6 +2345,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
        ex_ee_block = le32_to_cpu(ex->ee_block);
        ex_ee_len = ext4_ext_get_actual_len(ex);
 
+       trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
        while (ex >= EXT_FIRST_EXTENT(eh) &&
                        ex_ee_block + ex_ee_len > start) {
 
@@ -2591,6 +2602,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
 again:
        ext4_ext_invalidate_cache(inode);
 
+       trace_ext4_ext_remove_space(inode, start, depth);
+
        /*
         * We start scanning from right side, freeing all the blocks
         * after i_size and walking into the tree depth-wise.
@@ -2686,6 +2699,9 @@ again:
                }
        }
 
+       trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
+                       path->p_hdr->eh_entries);
+
        /* If we still have something in the partial cluster and we have removed
         * even the first extent, then we should free the blocks in the partial
         * cluster as well. */
@@ -3285,24 +3301,9 @@ static int ext4_find_delalloc_range(struct inode *inode,
 
        while ((i >= lblk_start) && (i <= lblk_end)) {
                page = find_get_page(mapping, index);
-               if (!page || !PageDirty(page))
+               if (!page)
                        goto nextpage;
 
-               if (PageWriteback(page)) {
-                       /*
-                        * This might be a race with allocation and writeout. In
-                        * this case we just assume that the rest of the range
-                        * will eventually be written and there wont be any
-                        * delalloc blocks left.
-                        * TODO: the above assumption is troublesome, but might
-                        * work better in practice. other option could be note
-                        * somewhere that the cluster is getting written out and
-                        * detect that here.
-                        */
-                       page_cache_release(page);
-                       return 0;
-               }
-
                if (!page_has_buffers(page))
                        goto nextpage;
 
@@ -3325,8 +3326,16 @@ static int ext4_find_delalloc_range(struct inode *inode,
                                continue;
                        }
 
-                       if (buffer_delay(bh)) {
+                       /* Check if the buffer is delayed allocated and that it
+                        * is not yet mapped. (when da-buffers are mapped during
+                        * their writeout, their da_mapped bit is set.)
+                        */
+                       if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
                                page_cache_release(page);
+                               trace_ext4_find_delalloc_range(inode,
+                                               lblk_start, lblk_end,
+                                               search_hint_reverse,
+                                               1, i);
                                return 1;
                        }
                        if (search_hint_reverse)
@@ -3349,6 +3358,8 @@ nextpage:
                i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        }
 
+       trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
+                                       search_hint_reverse, 0, 0);
        return 0;
 }
 
@@ -3414,6 +3425,8 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
        /* max possible clusters for this allocation */
        allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
 
+       trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
        /* Check towards left side */
        c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
        if (c_offset) {
@@ -3453,6 +3466,9 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                  flags, allocated);
        ext4_ext_show_leaf(inode, path);
 
+       trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
+                                                   newblock);
+
        /* get_block() before submit the IO, split the extent */
        if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
                ret = ext4_split_unwritten_extents(handle, inode, map,
@@ -3572,7 +3588,7 @@ out2:
  * get_implied_cluster_alloc - check to see if the requested
  * allocation (in the map structure) overlaps with a cluster already
  * allocated in an extent.
- *     @sbi    The ext4-specific superblock structure
+ *     @sb     The filesystem superblock structure
  *     @map    The requested lblk->pblk mapping
  *     @ex     The extent structure which might contain an implied
  *                     cluster allocation
@@ -3609,11 +3625,12 @@ out2:
  * ext4_ext_map_blocks() will then allocate one or more new clusters
  * by calling ext4_mb_new_blocks().
  */
-static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
+static int get_implied_cluster_alloc(struct super_block *sb,
                                     struct ext4_map_blocks *map,
                                     struct ext4_extent *ex,
                                     struct ext4_ext_path *path)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
        ext4_lblk_t ex_cluster_start, ex_cluster_end;
        ext4_lblk_t rr_cluster_start, rr_cluster_end;
@@ -3662,8 +3679,12 @@ static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
                        ext4_lblk_t next = ext4_ext_next_allocated_block(path);
                        map->m_len = min(map->m_len, next - map->m_lblk);
                }
+
+               trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
                return 1;
        }
+
+       trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
        return 0;
 }
 
@@ -3772,6 +3793,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 * we split out initialized portions during a write.
                 */
                ee_len = ext4_ext_get_actual_len(ex);
+
+               trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
+
                /* if found extent covers block, simply return it */
                if (in_range(map->m_lblk, ee_block, ee_len)) {
                        ext4_fsblk_t partial_cluster = 0;
@@ -3912,7 +3936,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         * by ext4_ext_find_extent() implies a cluster we can use.
         */
        if (cluster_offset && ex &&
-           get_implied_cluster_alloc(sbi, map, ex, path)) {
+           get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
                ar.len = allocated = map->m_len;
                newblock = map->m_pblk;
                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
@@ -3933,7 +3957,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        /* Check if the extent after searching to the right implies a
         * cluster we can use. */
        if ((sbi->s_cluster_ratio > 1) && ex2 &&
-           get_implied_cluster_alloc(sbi, map, ex2, path)) {
+           get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
                ar.len = allocated = map->m_len;
                newblock = map->m_pblk;
                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
@@ -4072,6 +4096,7 @@ got_allocated_blocks:
                        ext4_da_update_reserve_space(inode, allocated_clusters,
                                                        1);
                        if (reserved_clusters < allocated_clusters) {
+                               struct ext4_inode_info *ei = EXT4_I(inode);
                                int reservation = allocated_clusters -
                                                  reserved_clusters;
                                /*
@@ -4114,11 +4139,11 @@ got_allocated_blocks:
                                 *   remaining blocks finally gets written, we
                                 *   could claim them.
                                 */
-                               while (reservation) {
-                                       ext4_da_reserve_space(inode,
-                                                             map->m_lblk);
-                                       reservation--;
-                               }
+                               dquot_reserve_block(inode,
+                                               EXT4_C2B(sbi, reservation));
+                               spin_lock(&ei->i_block_reservation_lock);
+                               ei->i_reserved_data_blocks += reservation;
+                               spin_unlock(&ei->i_block_reservation_lock);
                        }
                }
        }