Merge branch 'omap/multiplatform-fixes', tag 'v3.8-rc5' into next/multiplatform
[firefly-linux-kernel-4.4.55.git] / drivers / staging / zram / zram_drv.c
index 6edefde23722b436641166f5b678291d6648c6b2..f2a73bd739fb94b750e66e1ee2c931e3c971a52a 100644 (file)
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
        return bvec->bv_len != PAGE_SIZE;
 }
 
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-                         u32 index, int offset, struct bio *bio)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 {
-       int ret;
-       size_t clen;
-       struct page *page;
-       unsigned char *user_mem, *cmem, *uncmem = NULL;
-
-       page = bvec->bv_page;
-
-       if (zram_test_flag(zram, index, ZRAM_ZERO)) {
-               handle_zero_page(bvec);
-               return 0;
-       }
+       int ret = LZO_E_OK;
+       size_t clen = PAGE_SIZE;
+       unsigned char *cmem;
+       unsigned long handle = zram->table[index].handle;
 
-       /* Requested page is not present in compressed area */
-       if (unlikely(!zram->table[index].handle)) {
-               pr_debug("Read before write: sector=%lu, size=%u",
-                        (ulong)(bio->bi_sector), bio->bi_size);
-               handle_zero_page(bvec);
+       if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
+               memset(mem, 0, PAGE_SIZE);
                return 0;
        }
 
-       if (is_partial_io(bvec)) {
-               /* Use  a temporary buffer to decompress the page */
-               uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
-               if (!uncmem) {
-                       pr_info("Error allocating temp memory!\n");
-                       return -ENOMEM;
-               }
-       }
-
-       user_mem = kmap_atomic(page);
-       if (!is_partial_io(bvec))
-               uncmem = user_mem;
-       clen = PAGE_SIZE;
-
-       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
-                               ZS_MM_RO);
-
-       if (zram->table[index].size == PAGE_SIZE) {
-               memcpy(uncmem, cmem, PAGE_SIZE);
-               ret = LZO_E_OK;
-       } else {
+       cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+       if (zram->table[index].size == PAGE_SIZE)
+               memcpy(mem, cmem, PAGE_SIZE);
+       else
                ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-                                   uncmem, &clen);
-       }
-
-       if (is_partial_io(bvec)) {
-               memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-                      bvec->bv_len);
-               kfree(uncmem);
-       }
-
-       zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-       kunmap_atomic(user_mem);
+                                               mem, &clen);
+       zs_unmap_object(zram->mem_pool, handle);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@ -247,42 +210,62 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                return ret;
        }
 
-       flush_dcache_page(page);
-
        return 0;
 }
 
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+                         u32 index, int offset, struct bio *bio)
 {
        int ret;
-       size_t clen = PAGE_SIZE;
-       unsigned char *cmem;
-       unsigned long handle = zram->table[index].handle;
+       struct page *page;
+       unsigned char *user_mem, *uncmem = NULL;
 
-       if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
-               memset(mem, 0, PAGE_SIZE);
+       page = bvec->bv_page;
+
+       if (unlikely(!zram->table[index].handle) ||
+                       zram_test_flag(zram, index, ZRAM_ZERO)) {
+               handle_zero_page(bvec);
                return 0;
        }
 
-       cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
-       ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
-                                   mem, &clen);
-       zs_unmap_object(zram->mem_pool, handle);
+       user_mem = kmap_atomic(page);
+       if (is_partial_io(bvec))
+               /* Use  a temporary buffer to decompress the page */
+               uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       else
+               uncmem = user_mem;
+
+       if (!uncmem) {
+               pr_info("Unable to allocate temp memory\n");
+               ret = -ENOMEM;
+               goto out_cleanup;
+       }
 
+       ret = zram_decompress_page(zram, uncmem, index);
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
                pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
                zram_stat64_inc(zram, &zram->stats.failed_reads);
-               return ret;
+               goto out_cleanup;
        }
 
-       return 0;
+       if (is_partial_io(bvec))
+               memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+                               bvec->bv_len);
+
+       flush_dcache_page(page);
+       ret = 0;
+out_cleanup:
+       kunmap_atomic(user_mem);
+       if (is_partial_io(bvec))
+               kfree(uncmem);
+       return ret;
 }
 
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                           int offset)
 {
-       int ret;
+       int ret = 0;
        size_t clen;
        unsigned long handle;
        struct page *page;
@@ -302,11 +285,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                        ret = -ENOMEM;
                        goto out;
                }
-               ret = zram_read_before_write(zram, uncmem, index);
-               if (ret) {
-                       kfree(uncmem);
+               ret = zram_decompress_page(zram, uncmem, index);
+               if (ret)
                        goto out;
-               }
        }
 
        /*
@@ -319,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        user_mem = kmap_atomic(page);
 
-       if (is_partial_io(bvec))
+       if (is_partial_io(bvec)) {
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                       bvec->bv_len);
-       else
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+       } else {
                uncmem = user_mem;
+       }
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem);
-               if (is_partial_io(bvec))
-                       kfree(uncmem);
+               if (!is_partial_io(bvec))
+                       kunmap_atomic(user_mem);
                zram_stat_inc(&zram->stats.pages_zero);
                zram_set_flag(zram, index, ZRAM_ZERO);
                ret = 0;
@@ -338,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
 
-       kunmap_atomic(user_mem);
-       if (is_partial_io(bvec))
-                       kfree(uncmem);
+       if (!is_partial_io(bvec)) {
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+               uncmem = NULL;
+       }
 
        if (unlikely(ret != LZO_E_OK)) {
                pr_err("Compression failed! err=%d\n", ret);
@@ -349,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        if (unlikely(clen > max_zpage_size)) {
                zram_stat_inc(&zram->stats.bad_compress);
-               src = uncmem;
                clen = PAGE_SIZE;
+               src = NULL;
+               if (is_partial_io(bvec))
+                       src = uncmem;
        }
 
        handle = zs_malloc(zram->mem_pool, clen);
@@ -362,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
        cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               src = kmap_atomic(page);
        memcpy(cmem, src, clen);
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               kunmap_atomic(src);
 
        zs_unmap_object(zram->mem_pool, handle);
 
@@ -375,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        if (clen <= PAGE_SIZE / 2)
                zram_stat_inc(&zram->stats.good_compress);
 
-       return 0;
-
 out:
+       if (is_partial_io(bvec))
+               kfree(uncmem);
+
        if (ret)
                zram_stat64_inc(zram, &zram->stats.failed_writes);
        return ret;