dm thin metadata: add dm_thin_find_mapped_range()
authorJoe Thornber <ejt@redhat.com>
Thu, 16 Apr 2015 11:47:21 +0000 (12:47 +0100)
committerMike Snitzer <snitzer@redhat.com>
Thu, 11 Jun 2015 21:13:03 +0000 (17:13 -0400)
Retrieve the next run of contiguously mapped blocks.  Useful for working
out where to break up IO.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h

index cb6dd055053d59da06e1e50ebe597522046d6e0e..94cf0db8a22ed59f000ac77d75b039330d4c6e7b 100644 (file)
@@ -1417,6 +1417,63 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
        return r;
 }
 
+/* FIXME: write a more efficient one in btree */
+int dm_thin_find_mapped_range(struct dm_thin_device *td,
+                             dm_block_t begin, dm_block_t end,
+                             dm_block_t *thin_begin, dm_block_t *thin_end,
+                             dm_block_t *pool_begin, bool *maybe_shared)
+{
+       int r;
+       dm_block_t pool_end;
+       struct dm_thin_lookup_result lookup;
+
+       if (end < begin)
+               return -ENODATA;
+
+       /*
+        * Find first mapped block.
+        */
+       while (begin < end) {
+               r = dm_thin_find_block(td, begin, true, &lookup);
+               if (r) {
+                       if (r != -ENODATA)
+                               return r;
+               } else
+                       break;
+
+               begin++;
+       }
+
+       if (begin == end)
+               return -ENODATA;
+
+       *thin_begin = begin;
+       *pool_begin = lookup.block;
+       *maybe_shared = lookup.shared;
+
+       begin++;
+       pool_end = *pool_begin + 1;
+       while (begin != end) {
+               r = dm_thin_find_block(td, begin, true, &lookup);
+               if (r) {
+                       if (r == -ENODATA)
+                               break;
+                       else
+                               return r;
+               }
+
+               if ((lookup.block != pool_end) ||
+                   (lookup.shared != *maybe_shared))
+                       break;
+
+               pool_end++;
+               begin++;
+       }
+
+       *thin_end = begin;
+       return 0;
+}
+
 static int __insert(struct dm_thin_device *td, dm_block_t block,
                    dm_block_t data_block)
 {
index fac01a96d303113b3036071ed8f359c7e5f10224..f11f14095b93501ff64f275adbc44723cbee61f6 100644 (file)
@@ -146,6 +146,15 @@ struct dm_thin_lookup_result {
 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
                       int can_issue_io, struct dm_thin_lookup_result *result);
 
+/*
+ * Retrieve the next run of contiguously mapped blocks.  Useful for working
+ * out where to break up IO.  Returns 0 on success, < 0 on error.
+ */
+int dm_thin_find_mapped_range(struct dm_thin_device *td,
+                             dm_block_t begin, dm_block_t end,
+                             dm_block_t *thin_begin, dm_block_t *thin_end,
+                             dm_block_t *pool_begin, bool *maybe_shared);
+
 /*
  * Obtain an unused block.
  */