Merge tag 'mmc-updates-for-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / fs / nilfs2 / sufile.c
index 3127e9f438a7c22e7e00d54288209bf0c8c02659..2a869c35c3622386ac0fdc774681df65c59f7093 100644 (file)
@@ -869,6 +869,289 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
        return ret;
 }
 
+/**
+ * nilfs_sufile_set_suinfo - sets segment usage info
+ * @sufile: inode of segment usage file
+ * @buf: array of suinfo_update
+ * @supsz: byte size of suinfo_update
+ * @nsup: size of suinfo_update array
+ *
+ * Description: Takes an array of nilfs_suinfo_update structs and updates
+ * segment usage accordingly. Only the fields indicated by the sup_flags
+ * are updated.
+ *
+ * Return Value: On success, 0 is returned. On error, one of the
+ * following negative error codes is returned.
+ *
+ * %-EIO - I/O error.
+ *
+ * %-ENOMEM - Insufficient amount of memory available.
+ *
+ * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
+ */
+ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
+                               unsigned supsz, size_t nsup)
+{
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+       struct buffer_head *header_bh, *bh;
+       struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
+       struct nilfs_segment_usage *su;
+       void *kaddr;
+       unsigned long blkoff, prev_blkoff;
+       int cleansi, cleansu, dirtysi, dirtysu;
+       long ncleaned = 0, ndirtied = 0;
+       int ret = 0;
+
+       if (unlikely(nsup == 0))
+               return ret;
+
+       for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
+               if (sup->sup_segnum >= nilfs->ns_nsegments
+                       || (sup->sup_flags &
+                               (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
+                       || (nilfs_suinfo_update_nblocks(sup) &&
+                               sup->sup_sui.sui_nblocks >
+                               nilfs->ns_blocks_per_segment))
+                       return -EINVAL;
+       }
+
+       down_write(&NILFS_MDT(sufile)->mi_sem);
+
+       ret = nilfs_sufile_get_header_block(sufile, &header_bh);
+       if (ret < 0)
+               goto out_sem;
+
+       sup = buf;
+       blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
+       ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
+       if (ret < 0)
+               goto out_header;
+
+       for (;;) {
+               kaddr = kmap_atomic(bh->b_page);
+               su = nilfs_sufile_block_get_segment_usage(
+                       sufile, sup->sup_segnum, bh, kaddr);
+
+               if (nilfs_suinfo_update_lastmod(sup))
+                       su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
+
+               if (nilfs_suinfo_update_nblocks(sup))
+                       su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
+
+               if (nilfs_suinfo_update_flags(sup)) {
+                       /*
+                        * Active flag is a virtual flag projected by running
+                        * nilfs kernel code - drop it not to write it to
+                        * disk.
+                        */
+                       sup->sup_sui.sui_flags &=
+                                       ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+
+                       cleansi = nilfs_suinfo_clean(&sup->sup_sui);
+                       cleansu = nilfs_segment_usage_clean(su);
+                       dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
+                       dirtysu = nilfs_segment_usage_dirty(su);
+
+                       if (cleansi && !cleansu)
+                               ++ncleaned;
+                       else if (!cleansi && cleansu)
+                               --ncleaned;
+
+                       if (dirtysi && !dirtysu)
+                               ++ndirtied;
+                       else if (!dirtysi && dirtysu)
+                               --ndirtied;
+
+                       su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
+               }
+
+               kunmap_atomic(kaddr);
+
+               sup = (void *)sup + supsz;
+               if (sup >= supend)
+                       break;
+
+               prev_blkoff = blkoff;
+               blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
+               if (blkoff == prev_blkoff)
+                       continue;
+
+               /* get different block */
+               mark_buffer_dirty(bh);
+               put_bh(bh);
+               ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
+               if (unlikely(ret < 0))
+                       goto out_mark;
+       }
+       mark_buffer_dirty(bh);
+       put_bh(bh);
+
+ out_mark:
+       if (ncleaned || ndirtied) {
+               nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
+                               (u64)ndirtied);
+               NILFS_SUI(sufile)->ncleansegs += ncleaned;
+       }
+       nilfs_mdt_mark_dirty(sufile);
+ out_header:
+       put_bh(header_bh);
+ out_sem:
+       up_write(&NILFS_MDT(sufile)->mi_sem);
+       return ret;
+}
+
+/**
+ * nilfs_sufile_trim_fs() - trim ioctl handle function
+ * @sufile: inode of segment usage file
+ * @range: fstrim_range structure
+ *
+ * start:      First Byte to trim
+ * len:                number of Bytes to trim from start
+ * minlen:     minimum extent length in Bytes
+ *
+ * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
+ * from start to start+len. start is rounded up to the next block boundary
+ * and start+len is rounded down. For each clean segment blkdev_issue_discard
+ * function is invoked.
+ *
+ * Return Value: On success, 0 is returned or negative error code, otherwise.
+ */
+int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
+{
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+       struct buffer_head *su_bh;
+       struct nilfs_segment_usage *su;
+       void *kaddr;
+       size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
+       sector_t seg_start, seg_end, start_block, end_block;
+       sector_t start = 0, nblocks = 0;
+       u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
+       int ret = 0;
+       unsigned int sects_per_block;
+
+       sects_per_block = (1 << nilfs->ns_blocksize_bits) /
+                       bdev_logical_block_size(nilfs->ns_bdev);
+       len = range->len >> nilfs->ns_blocksize_bits;
+       minlen = range->minlen >> nilfs->ns_blocksize_bits;
+       max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
+
+       if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
+               return -EINVAL;
+
+       start_block = (range->start + nilfs->ns_blocksize - 1) >>
+                       nilfs->ns_blocksize_bits;
+
+       /*
+        * range->len can be very large (actually, it is set to
+        * ULLONG_MAX by default) - truncate upper end of the range
+        * carefully so as not to overflow.
+        */
+       if (max_blocks - start_block < len)
+               end_block = max_blocks - 1;
+       else
+               end_block = start_block + len - 1;
+
+       segnum = nilfs_get_segnum_of_block(nilfs, start_block);
+       segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
+
+       down_read(&NILFS_MDT(sufile)->mi_sem);
+
+       while (segnum <= segnum_end) {
+               n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
+                               segnum_end);
+
+               ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
+                                                          &su_bh);
+               if (ret < 0) {
+                       if (ret != -ENOENT)
+                               goto out_sem;
+                       /* hole */
+                       segnum += n;
+                       continue;
+               }
+
+               kaddr = kmap_atomic(su_bh->b_page);
+               su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
+                               su_bh, kaddr);
+               for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
+                       if (!nilfs_segment_usage_clean(su))
+                               continue;
+
+                       nilfs_get_segment_range(nilfs, segnum, &seg_start,
+                                               &seg_end);
+
+                       if (!nblocks) {
+                               /* start new extent */
+                               start = seg_start;
+                               nblocks = seg_end - seg_start + 1;
+                               continue;
+                       }
+
+                       if (start + nblocks == seg_start) {
+                               /* add to previous extent */
+                               nblocks += seg_end - seg_start + 1;
+                               continue;
+                       }
+
+                       /* discard previous extent */
+                       if (start < start_block) {
+                               nblocks -= start_block - start;
+                               start = start_block;
+                       }
+
+                       if (nblocks >= minlen) {
+                               kunmap_atomic(kaddr);
+
+                               ret = blkdev_issue_discard(nilfs->ns_bdev,
+                                               start * sects_per_block,
+                                               nblocks * sects_per_block,
+                                               GFP_NOFS, 0);
+                               if (ret < 0) {
+                                       put_bh(su_bh);
+                                       goto out_sem;
+                               }
+
+                               ndiscarded += nblocks;
+                               kaddr = kmap_atomic(su_bh->b_page);
+                               su = nilfs_sufile_block_get_segment_usage(
+                                       sufile, segnum, su_bh, kaddr);
+                       }
+
+                       /* start new extent */
+                       start = seg_start;
+                       nblocks = seg_end - seg_start + 1;
+               }
+               kunmap_atomic(kaddr);
+               put_bh(su_bh);
+       }
+
+
+       if (nblocks) {
+               /* discard last extent */
+               if (start < start_block) {
+                       nblocks -= start_block - start;
+                       start = start_block;
+               }
+               if (start + nblocks > end_block + 1)
+                       nblocks = end_block - start + 1;
+
+               if (nblocks >= minlen) {
+                       ret = blkdev_issue_discard(nilfs->ns_bdev,
+                                       start * sects_per_block,
+                                       nblocks * sects_per_block,
+                                       GFP_NOFS, 0);
+                       if (!ret)
+                               ndiscarded += nblocks;
+               }
+       }
+
+out_sem:
+       up_read(&NILFS_MDT(sufile)->mi_sem);
+
+       range->len = ndiscarded << nilfs->ns_blocksize_bits;
+       return ret;
+}
+
 /**
  * nilfs_sufile_read - read or get sufile inode
  * @sb: super block instance
@@ -886,6 +1169,18 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
        void *kaddr;
        int err;
 
+       if (susize > sb->s_blocksize) {
+               printk(KERN_ERR
+                      "NILFS: too large segment usage size: %zu bytes.\n",
+                      susize);
+               return -EINVAL;
+       } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
+               printk(KERN_ERR
+                      "NILFS: too small segment usage size: %zu bytes.\n",
+                      susize);
+               return -EINVAL;
+       }
+
        sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
        if (unlikely(!sufile))
                return -ENOMEM;