3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
544 set_device_ro(bdev, rbd_dev->mapping.read_only);
549 static void rbd_release(struct gendisk *disk, fmode_t mode)
551 struct rbd_device *rbd_dev = disk->private_data;
552 unsigned long open_count_before;
554 spin_lock_irq(&rbd_dev->lock);
555 open_count_before = rbd_dev->open_count--;
556 spin_unlock_irq(&rbd_dev->lock);
557 rbd_assert(open_count_before > 0);
559 put_device(&rbd_dev->dev);
562 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
567 bool ro_changed = false;
569 /* get_user() may sleep, so call it before taking rbd_dev->lock */
570 if (get_user(val, (int __user *)(arg)))
573 ro = val ? true : false;
574 /* Snapshot doesn't allow to write*/
575 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
578 spin_lock_irq(&rbd_dev->lock);
579 /* prevent others open this device */
580 if (rbd_dev->open_count > 1) {
585 if (rbd_dev->mapping.read_only != ro) {
586 rbd_dev->mapping.read_only = ro;
591 spin_unlock_irq(&rbd_dev->lock);
592 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
593 if (ret == 0 && ro_changed)
594 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
599 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
600 unsigned int cmd, unsigned long arg)
602 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
607 ret = rbd_ioctl_set_ro(rbd_dev, arg);
617 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
618 unsigned int cmd, unsigned long arg)
620 return rbd_ioctl(bdev, mode, cmd, arg);
622 #endif /* CONFIG_COMPAT */
624 static const struct block_device_operations rbd_bd_ops = {
625 .owner = THIS_MODULE,
627 .release = rbd_release,
630 .compat_ioctl = rbd_compat_ioctl,
635 * Initialize an rbd client instance. Success or not, this function
636 * consumes ceph_opts. Caller holds client_mutex.
638 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
640 struct rbd_client *rbdc;
643 dout("%s:\n", __func__);
644 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
648 kref_init(&rbdc->kref);
649 INIT_LIST_HEAD(&rbdc->node);
651 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
652 if (IS_ERR(rbdc->client))
654 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
656 ret = ceph_open_session(rbdc->client);
660 spin_lock(&rbd_client_list_lock);
661 list_add_tail(&rbdc->node, &rbd_client_list);
662 spin_unlock(&rbd_client_list_lock);
664 dout("%s: rbdc %p\n", __func__, rbdc);
668 ceph_destroy_client(rbdc->client);
673 ceph_destroy_options(ceph_opts);
674 dout("%s: error %d\n", __func__, ret);
679 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
681 kref_get(&rbdc->kref);
687 * Find a ceph client with specific addr and configuration. If
688 * found, bump its reference count.
690 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
692 struct rbd_client *client_node;
695 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
698 spin_lock(&rbd_client_list_lock);
699 list_for_each_entry(client_node, &rbd_client_list, node) {
700 if (!ceph_compare_options(ceph_opts, client_node->client)) {
701 __rbd_get_client(client_node);
707 spin_unlock(&rbd_client_list_lock);
709 return found ? client_node : NULL;
719 /* string args above */
722 /* Boolean args above */
726 static match_table_t rbd_opts_tokens = {
728 /* string args above */
729 {Opt_read_only, "read_only"},
730 {Opt_read_only, "ro"}, /* Alternate spelling */
731 {Opt_read_write, "read_write"},
732 {Opt_read_write, "rw"}, /* Alternate spelling */
733 /* Boolean args above */
741 #define RBD_READ_ONLY_DEFAULT false
743 static int parse_rbd_opts_token(char *c, void *private)
745 struct rbd_options *rbd_opts = private;
746 substring_t argstr[MAX_OPT_ARGS];
747 int token, intval, ret;
749 token = match_token(c, rbd_opts_tokens, argstr);
753 if (token < Opt_last_int) {
754 ret = match_int(&argstr[0], &intval);
756 pr_err("bad mount option arg (not int) "
760 dout("got int token %d val %d\n", token, intval);
761 } else if (token > Opt_last_int && token < Opt_last_string) {
762 dout("got string token %d val %s\n", token,
764 } else if (token > Opt_last_string && token < Opt_last_bool) {
765 dout("got Boolean token %d\n", token);
767 dout("got token %d\n", token);
772 rbd_opts->read_only = true;
775 rbd_opts->read_only = false;
785 * Get a ceph client with specific addr and configuration, if one does
786 * not exist create it. Either way, ceph_opts is consumed by this
789 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
791 struct rbd_client *rbdc;
793 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
794 rbdc = rbd_client_find(ceph_opts);
795 if (rbdc) /* using an existing client */
796 ceph_destroy_options(ceph_opts);
798 rbdc = rbd_client_create(ceph_opts);
799 mutex_unlock(&client_mutex);
805 * Destroy ceph client
807 * Caller must hold rbd_client_list_lock.
809 static void rbd_client_release(struct kref *kref)
811 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
813 dout("%s: rbdc %p\n", __func__, rbdc);
814 spin_lock(&rbd_client_list_lock);
815 list_del(&rbdc->node);
816 spin_unlock(&rbd_client_list_lock);
818 ceph_destroy_client(rbdc->client);
823 * Drop reference to ceph client node. If it's not referenced anymore, release
826 static void rbd_put_client(struct rbd_client *rbdc)
829 kref_put(&rbdc->kref, rbd_client_release);
832 static bool rbd_image_format_valid(u32 image_format)
834 return image_format == 1 || image_format == 2;
837 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
842 /* The header has to start with the magic rbd header text */
843 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
846 /* The bio layer requires at least sector-sized I/O */
848 if (ondisk->options.order < SECTOR_SHIFT)
851 /* If we use u64 in a few spots we may be able to loosen this */
853 if (ondisk->options.order > 8 * sizeof (int) - 1)
857 * The size of a snapshot header has to fit in a size_t, and
858 * that limits the number of snapshots.
860 snap_count = le32_to_cpu(ondisk->snap_count);
861 size = SIZE_MAX - sizeof (struct ceph_snap_context);
862 if (snap_count > size / sizeof (__le64))
866 * Not only that, but the size of the entire the snapshot
867 * header must also be representable in a size_t.
869 size -= snap_count * sizeof (__le64);
870 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
877 * Fill an rbd image header with information from the given format 1
880 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
881 struct rbd_image_header_ondisk *ondisk)
883 struct rbd_image_header *header = &rbd_dev->header;
884 bool first_time = header->object_prefix == NULL;
885 struct ceph_snap_context *snapc;
886 char *object_prefix = NULL;
887 char *snap_names = NULL;
888 u64 *snap_sizes = NULL;
894 /* Allocate this now to avoid having to handle failure below */
899 len = strnlen(ondisk->object_prefix,
900 sizeof (ondisk->object_prefix));
901 object_prefix = kmalloc(len + 1, GFP_KERNEL);
904 memcpy(object_prefix, ondisk->object_prefix, len);
905 object_prefix[len] = '\0';
908 /* Allocate the snapshot context and fill it in */
910 snap_count = le32_to_cpu(ondisk->snap_count);
911 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
914 snapc->seq = le64_to_cpu(ondisk->snap_seq);
916 struct rbd_image_snap_ondisk *snaps;
917 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
919 /* We'll keep a copy of the snapshot names... */
921 if (snap_names_len > (u64)SIZE_MAX)
923 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
927 /* ...as well as the array of their sizes. */
929 size = snap_count * sizeof (*header->snap_sizes);
930 snap_sizes = kmalloc(size, GFP_KERNEL);
935 * Copy the names, and fill in each snapshot's id
938 * Note that rbd_dev_v1_header_info() guarantees the
939 * ondisk buffer we're working with has
940 * snap_names_len bytes beyond the end of the
941 * snapshot id array, this memcpy() is safe.
943 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
944 snaps = ondisk->snaps;
945 for (i = 0; i < snap_count; i++) {
946 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
947 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
951 /* We won't fail any more, fill in the header */
954 header->object_prefix = object_prefix;
955 header->obj_order = ondisk->options.order;
956 header->crypt_type = ondisk->options.crypt_type;
957 header->comp_type = ondisk->options.comp_type;
958 /* The rest aren't used for format 1 images */
959 header->stripe_unit = 0;
960 header->stripe_count = 0;
961 header->features = 0;
963 ceph_put_snap_context(header->snapc);
964 kfree(header->snap_names);
965 kfree(header->snap_sizes);
968 /* The remaining fields always get updated (when we refresh) */
970 header->image_size = le64_to_cpu(ondisk->image_size);
971 header->snapc = snapc;
972 header->snap_names = snap_names;
973 header->snap_sizes = snap_sizes;
975 /* Make sure mapping size is consistent with header info */
977 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
978 if (rbd_dev->mapping.size != header->image_size)
979 rbd_dev->mapping.size = header->image_size;
987 ceph_put_snap_context(snapc);
988 kfree(object_prefix);
993 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
995 const char *snap_name;
997 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
999 /* Skip over names until we find the one we are looking for */
1001 snap_name = rbd_dev->header.snap_names;
1003 snap_name += strlen(snap_name) + 1;
1005 return kstrdup(snap_name, GFP_KERNEL);
1009 * Snapshot id comparison function for use with qsort()/bsearch().
1010 * Note that result is for snapshots in *descending* order.
1012 static int snapid_compare_reverse(const void *s1, const void *s2)
1014 u64 snap_id1 = *(u64 *)s1;
1015 u64 snap_id2 = *(u64 *)s2;
1017 if (snap_id1 < snap_id2)
1019 return snap_id1 == snap_id2 ? 0 : -1;
1023 * Search a snapshot context to see if the given snapshot id is
1026 * Returns the position of the snapshot id in the array if it's found,
1027 * or BAD_SNAP_INDEX otherwise.
1029 * Note: The snapshot array is in kept sorted (by the osd) in
1030 * reverse order, highest snapshot id first.
1032 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1034 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1037 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1038 sizeof (snap_id), snapid_compare_reverse);
1040 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1043 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1047 const char *snap_name;
1049 which = rbd_dev_snap_index(rbd_dev, snap_id);
1050 if (which == BAD_SNAP_INDEX)
1051 return ERR_PTR(-ENOENT);
1053 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1054 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1057 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1059 if (snap_id == CEPH_NOSNAP)
1060 return RBD_SNAP_HEAD_NAME;
1062 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1063 if (rbd_dev->image_format == 1)
1064 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1066 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1069 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1072 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1073 if (snap_id == CEPH_NOSNAP) {
1074 *snap_size = rbd_dev->header.image_size;
1075 } else if (rbd_dev->image_format == 1) {
1078 which = rbd_dev_snap_index(rbd_dev, snap_id);
1079 if (which == BAD_SNAP_INDEX)
1082 *snap_size = rbd_dev->header.snap_sizes[which];
1087 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1096 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1099 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1100 if (snap_id == CEPH_NOSNAP) {
1101 *snap_features = rbd_dev->header.features;
1102 } else if (rbd_dev->image_format == 1) {
1103 *snap_features = 0; /* No features for format 1 */
1108 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1112 *snap_features = features;
1117 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1119 u64 snap_id = rbd_dev->spec->snap_id;
1124 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1127 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1131 rbd_dev->mapping.size = size;
1132 rbd_dev->mapping.features = features;
1137 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1139 rbd_dev->mapping.size = 0;
1140 rbd_dev->mapping.features = 0;
1143 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1150 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1153 segment = offset >> rbd_dev->header.obj_order;
1154 name_format = "%s.%012llx";
1155 if (rbd_dev->image_format == 2)
1156 name_format = "%s.%016llx";
1157 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1158 rbd_dev->header.object_prefix, segment);
1159 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1160 pr_err("error formatting segment name for #%llu (%d)\n",
1169 static void rbd_segment_name_free(const char *name)
1171 /* The explicit cast here is needed to drop the const qualifier */
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1176 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1178 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1180 return offset & (segment_size - 1);
1183 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1184 u64 offset, u64 length)
1186 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1188 offset &= segment_size - 1;
1190 rbd_assert(length <= U64_MAX - offset);
1191 if (offset + length > segment_size)
1192 length = segment_size - offset;
1198 * returns the size of an object in the image
1200 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1202 return 1 << header->obj_order;
1209 static void bio_chain_put(struct bio *chain)
1215 chain = chain->bi_next;
1221 * zeros a bio chain, starting at specific offset
1223 static void zero_bio_chain(struct bio *chain, int start_ofs)
1226 struct bvec_iter iter;
1227 unsigned long flags;
1232 bio_for_each_segment(bv, chain, iter) {
1233 if (pos + bv.bv_len > start_ofs) {
1234 int remainder = max(start_ofs - pos, 0);
1235 buf = bvec_kmap_irq(&bv, &flags);
1236 memset(buf + remainder, 0,
1237 bv.bv_len - remainder);
1238 flush_dcache_page(bv.bv_page);
1239 bvec_kunmap_irq(buf, &flags);
1244 chain = chain->bi_next;
1249 * similar to zero_bio_chain(), zeros data defined by a page array,
1250 * starting at the given byte offset from the start of the array and
1251 * continuing up to the given end offset. The pages array is
1252 * assumed to be big enough to hold all bytes up to the end.
1254 static void zero_pages(struct page **pages, u64 offset, u64 end)
1256 struct page **page = &pages[offset >> PAGE_SHIFT];
1258 rbd_assert(end > offset);
1259 rbd_assert(end - offset <= (u64)SIZE_MAX);
1260 while (offset < end) {
1263 unsigned long flags;
1266 page_offset = offset & ~PAGE_MASK;
1267 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1268 local_irq_save(flags);
1269 kaddr = kmap_atomic(*page);
1270 memset(kaddr + page_offset, 0, length);
1271 flush_dcache_page(*page);
1272 kunmap_atomic(kaddr);
1273 local_irq_restore(flags);
1281 * Clone a portion of a bio, starting at the given byte offset
1282 * and continuing for the number of bytes indicated.
1284 static struct bio *bio_clone_range(struct bio *bio_src,
1285 unsigned int offset,
1291 bio = bio_clone(bio_src, gfpmask);
1293 return NULL; /* ENOMEM */
1295 bio_advance(bio, offset);
1296 bio->bi_iter.bi_size = len;
1302 * Clone a portion of a bio chain, starting at the given byte offset
1303 * into the first bio in the source chain and continuing for the
1304 * number of bytes indicated. The result is another bio chain of
1305 * exactly the given length, or a null pointer on error.
1307 * The bio_src and offset parameters are both in-out. On entry they
1308 * refer to the first source bio and the offset into that bio where
1309 * the start of data to be cloned is located.
1311 * On return, bio_src is updated to refer to the bio in the source
1312 * chain that contains first un-cloned byte, and *offset will
1313 * contain the offset of that byte within that bio.
1315 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1316 unsigned int *offset,
1320 struct bio *bi = *bio_src;
1321 unsigned int off = *offset;
1322 struct bio *chain = NULL;
1325 /* Build up a chain of clone bios up to the limit */
1327 if (!bi || off >= bi->bi_iter.bi_size || !len)
1328 return NULL; /* Nothing to clone */
1332 unsigned int bi_size;
1336 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1337 goto out_err; /* EINVAL; ran out of bio's */
1339 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1340 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1342 goto out_err; /* ENOMEM */
1345 end = &bio->bi_next;
1348 if (off == bi->bi_iter.bi_size) {
1359 bio_chain_put(chain);
1365 * The default/initial value for all object request flags is 0. For
1366 * each flag, once its value is set to 1 it is never reset to 0
1369 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1371 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1372 struct rbd_device *rbd_dev;
1374 rbd_dev = obj_request->img_request->rbd_dev;
1375 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1380 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1383 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1386 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1388 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1389 struct rbd_device *rbd_dev = NULL;
1391 if (obj_request_img_data_test(obj_request))
1392 rbd_dev = obj_request->img_request->rbd_dev;
1393 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1398 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1401 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1405 * This sets the KNOWN flag after (possibly) setting the EXISTS
1406 * flag. The latter is set based on the "exists" value provided.
1408 * Note that for our purposes once an object exists it never goes
1409 * away again. It's possible that the response from two existence
1410 * checks are separated by the creation of the target object, and
1411 * the first ("doesn't exist") response arrives *after* the second
1412 * ("does exist"). In that case we ignore the second one.
1414 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1418 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1419 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1423 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1426 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1429 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1432 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1435 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1437 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1438 atomic_read(&obj_request->kref.refcount));
1439 kref_get(&obj_request->kref);
1442 static void rbd_obj_request_destroy(struct kref *kref);
1443 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1445 rbd_assert(obj_request != NULL);
1446 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1447 atomic_read(&obj_request->kref.refcount));
1448 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1451 static void rbd_img_request_get(struct rbd_img_request *img_request)
1453 dout("%s: img %p (was %d)\n", __func__, img_request,
1454 atomic_read(&img_request->kref.refcount));
1455 kref_get(&img_request->kref);
1458 static bool img_request_child_test(struct rbd_img_request *img_request);
1459 static void rbd_parent_request_destroy(struct kref *kref);
1460 static void rbd_img_request_destroy(struct kref *kref);
1461 static void rbd_img_request_put(struct rbd_img_request *img_request)
1463 rbd_assert(img_request != NULL);
1464 dout("%s: img %p (was %d)\n", __func__, img_request,
1465 atomic_read(&img_request->kref.refcount));
1466 if (img_request_child_test(img_request))
1467 kref_put(&img_request->kref, rbd_parent_request_destroy);
1469 kref_put(&img_request->kref, rbd_img_request_destroy);
1472 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1473 struct rbd_obj_request *obj_request)
1475 rbd_assert(obj_request->img_request == NULL);
1477 /* Image request now owns object's original reference */
1478 obj_request->img_request = img_request;
1479 obj_request->which = img_request->obj_request_count;
1480 rbd_assert(!obj_request_img_data_test(obj_request));
1481 obj_request_img_data_set(obj_request);
1482 rbd_assert(obj_request->which != BAD_WHICH);
1483 img_request->obj_request_count++;
1484 list_add_tail(&obj_request->links, &img_request->obj_requests);
1485 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1486 obj_request->which);
1489 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1490 struct rbd_obj_request *obj_request)
1492 rbd_assert(obj_request->which != BAD_WHICH);
1494 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1495 obj_request->which);
1496 list_del(&obj_request->links);
1497 rbd_assert(img_request->obj_request_count > 0);
1498 img_request->obj_request_count--;
1499 rbd_assert(obj_request->which == img_request->obj_request_count);
1500 obj_request->which = BAD_WHICH;
1501 rbd_assert(obj_request_img_data_test(obj_request));
1502 rbd_assert(obj_request->img_request == img_request);
1503 obj_request->img_request = NULL;
1504 obj_request->callback = NULL;
1505 rbd_obj_request_put(obj_request);
1508 static bool obj_request_type_valid(enum obj_request_type type)
1511 case OBJ_REQUEST_NODATA:
1512 case OBJ_REQUEST_BIO:
1513 case OBJ_REQUEST_PAGES:
1520 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1521 struct rbd_obj_request *obj_request)
1523 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1525 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1528 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1531 dout("%s: img %p\n", __func__, img_request);
1534 * If no error occurred, compute the aggregate transfer
1535 * count for the image request. We could instead use
1536 * atomic64_cmpxchg() to update it as each object request
1537 * completes; not clear which way is better off hand.
1539 if (!img_request->result) {
1540 struct rbd_obj_request *obj_request;
1543 for_each_obj_request(img_request, obj_request)
1544 xferred += obj_request->xferred;
1545 img_request->xferred = xferred;
1548 if (img_request->callback)
1549 img_request->callback(img_request);
1551 rbd_img_request_put(img_request);
1554 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1556 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1558 dout("%s: obj %p\n", __func__, obj_request);
1560 return wait_for_completion_interruptible(&obj_request->completion);
1564 * The default/initial value for all image request flags is 0. Each
1565 * is conditionally set to 1 at image request initialization time
1566 * and currently never change thereafter.
1568 static void img_request_write_set(struct rbd_img_request *img_request)
1570 set_bit(IMG_REQ_WRITE, &img_request->flags);
1574 static bool img_request_write_test(struct rbd_img_request *img_request)
1577 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1580 static void img_request_child_set(struct rbd_img_request *img_request)
1582 set_bit(IMG_REQ_CHILD, &img_request->flags);
1586 static void img_request_child_clear(struct rbd_img_request *img_request)
1588 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1592 static bool img_request_child_test(struct rbd_img_request *img_request)
1595 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1598 static void img_request_layered_set(struct rbd_img_request *img_request)
1600 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1604 static void img_request_layered_clear(struct rbd_img_request *img_request)
1606 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1610 static bool img_request_layered_test(struct rbd_img_request *img_request)
1613 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1617 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1619 u64 xferred = obj_request->xferred;
1620 u64 length = obj_request->length;
1622 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1623 obj_request, obj_request->img_request, obj_request->result,
1626 * ENOENT means a hole in the image. We zero-fill the entire
1627 * length of the request. A short read also implies zero-fill
1628 * to the end of the request. An error requires the whole
1629 * length of the request to be reported finished with an error
1630 * to the block layer. In each case we update the xferred
1631 * count to indicate the whole request was satisfied.
1633 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1634 if (obj_request->result == -ENOENT) {
1635 if (obj_request->type == OBJ_REQUEST_BIO)
1636 zero_bio_chain(obj_request->bio_list, 0);
1638 zero_pages(obj_request->pages, 0, length);
1639 obj_request->result = 0;
1640 } else if (xferred < length && !obj_request->result) {
1641 if (obj_request->type == OBJ_REQUEST_BIO)
1642 zero_bio_chain(obj_request->bio_list, xferred);
1644 zero_pages(obj_request->pages, xferred, length);
1646 obj_request->xferred = length;
1647 obj_request_done_set(obj_request);
1650 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1652 dout("%s: obj %p cb %p\n", __func__, obj_request,
1653 obj_request->callback);
1654 if (obj_request->callback)
1655 obj_request->callback(obj_request);
1657 complete_all(&obj_request->completion);
1660 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1662 dout("%s: obj %p\n", __func__, obj_request);
1663 obj_request_done_set(obj_request);
1666 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1668 struct rbd_img_request *img_request = NULL;
1669 struct rbd_device *rbd_dev = NULL;
1670 bool layered = false;
1672 if (obj_request_img_data_test(obj_request)) {
1673 img_request = obj_request->img_request;
1674 layered = img_request && img_request_layered_test(img_request);
1675 rbd_dev = img_request->rbd_dev;
1678 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1679 obj_request, img_request, obj_request->result,
1680 obj_request->xferred, obj_request->length);
1681 if (layered && obj_request->result == -ENOENT &&
1682 obj_request->img_offset < rbd_dev->parent_overlap)
1683 rbd_img_parent_read(obj_request);
1684 else if (img_request)
1685 rbd_img_obj_request_read_callback(obj_request);
1687 obj_request_done_set(obj_request);
1690 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1692 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1693 obj_request->result, obj_request->length);
1695 * There is no such thing as a successful short write. Set
1696 * it to our originally-requested length.
1698 obj_request->xferred = obj_request->length;
1699 obj_request_done_set(obj_request);
1703 * For a simple stat call there's nothing to do. We'll do more if
1704 * this is part of a write sequence for a layered image.
1706 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1708 dout("%s: obj %p\n", __func__, obj_request);
1709 obj_request_done_set(obj_request);
1712 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1713 struct ceph_msg *msg)
1715 struct rbd_obj_request *obj_request = osd_req->r_priv;
1718 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1719 rbd_assert(osd_req == obj_request->osd_req);
1720 if (obj_request_img_data_test(obj_request)) {
1721 rbd_assert(obj_request->img_request);
1722 rbd_assert(obj_request->which != BAD_WHICH);
1724 rbd_assert(obj_request->which == BAD_WHICH);
1727 if (osd_req->r_result < 0)
1728 obj_request->result = osd_req->r_result;
1730 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1733 * We support a 64-bit length, but ultimately it has to be
1734 * passed to blk_end_request(), which takes an unsigned int.
1736 obj_request->xferred = osd_req->r_reply_op_len[0];
1737 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1739 opcode = osd_req->r_ops[0].op;
1741 case CEPH_OSD_OP_READ:
1742 rbd_osd_read_callback(obj_request);
1744 case CEPH_OSD_OP_SETALLOCHINT:
1745 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1747 case CEPH_OSD_OP_WRITE:
1748 rbd_osd_write_callback(obj_request);
1750 case CEPH_OSD_OP_STAT:
1751 rbd_osd_stat_callback(obj_request);
1753 case CEPH_OSD_OP_CALL:
1754 case CEPH_OSD_OP_NOTIFY_ACK:
1755 case CEPH_OSD_OP_WATCH:
1756 rbd_osd_trivial_callback(obj_request);
1759 rbd_warn(NULL, "%s: unsupported op %hu\n",
1760 obj_request->object_name, (unsigned short) opcode);
1764 if (obj_request_done_test(obj_request))
1765 rbd_obj_request_complete(obj_request);
1768 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1770 struct rbd_img_request *img_request = obj_request->img_request;
1771 struct ceph_osd_request *osd_req = obj_request->osd_req;
1774 rbd_assert(osd_req != NULL);
1776 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1777 ceph_osdc_build_request(osd_req, obj_request->offset,
1778 NULL, snap_id, NULL);
1781 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1783 struct rbd_img_request *img_request = obj_request->img_request;
1784 struct ceph_osd_request *osd_req = obj_request->osd_req;
1785 struct ceph_snap_context *snapc;
1786 struct timespec mtime = CURRENT_TIME;
1788 rbd_assert(osd_req != NULL);
1790 snapc = img_request ? img_request->snapc : NULL;
1791 ceph_osdc_build_request(osd_req, obj_request->offset,
1792 snapc, CEPH_NOSNAP, &mtime);
1796 * Create an osd request. A read request has one osd op (read).
1797 * A write request has either one (watch) or two (hint+write) osd ops.
1798 * (All rbd data writes are prefixed with an allocation hint op, but
1799 * technically osd watch is a write request, hence this distinction.)
1801 static struct ceph_osd_request *rbd_osd_req_create(
1802 struct rbd_device *rbd_dev,
1804 unsigned int num_ops,
1805 struct rbd_obj_request *obj_request)
1807 struct ceph_snap_context *snapc = NULL;
1808 struct ceph_osd_client *osdc;
1809 struct ceph_osd_request *osd_req;
1811 if (obj_request_img_data_test(obj_request)) {
1812 struct rbd_img_request *img_request = obj_request->img_request;
1814 rbd_assert(write_request ==
1815 img_request_write_test(img_request));
1817 snapc = img_request->snapc;
1820 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1822 /* Allocate and initialize the request, for the num_ops ops */
1824 osdc = &rbd_dev->rbd_client->client->osdc;
1825 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1828 return NULL; /* ENOMEM */
1831 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1833 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1835 osd_req->r_callback = rbd_osd_req_callback;
1836 osd_req->r_priv = obj_request;
1838 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1839 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1845 * Create a copyup osd request based on the information in the
1846 * object request supplied. A copyup request has three osd ops,
1847 * a copyup method call, a hint op, and a write op.
1849 static struct ceph_osd_request *
1850 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1852 struct rbd_img_request *img_request;
1853 struct ceph_snap_context *snapc;
1854 struct rbd_device *rbd_dev;
1855 struct ceph_osd_client *osdc;
1856 struct ceph_osd_request *osd_req;
1858 rbd_assert(obj_request_img_data_test(obj_request));
1859 img_request = obj_request->img_request;
1860 rbd_assert(img_request);
1861 rbd_assert(img_request_write_test(img_request));
1863 /* Allocate and initialize the request, for the three ops */
1865 snapc = img_request->snapc;
1866 rbd_dev = img_request->rbd_dev;
1867 osdc = &rbd_dev->rbd_client->client->osdc;
1868 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1870 return NULL; /* ENOMEM */
1872 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1873 osd_req->r_callback = rbd_osd_req_callback;
1874 osd_req->r_priv = obj_request;
1876 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1877 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1883 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1885 ceph_osdc_put_request(osd_req);
1888 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1890 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1891 u64 offset, u64 length,
1892 enum obj_request_type type)
1894 struct rbd_obj_request *obj_request;
1898 rbd_assert(obj_request_type_valid(type));
1900 size = strlen(object_name) + 1;
1901 name = kmalloc(size, GFP_KERNEL);
1905 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1911 obj_request->object_name = memcpy(name, object_name, size);
1912 obj_request->offset = offset;
1913 obj_request->length = length;
1914 obj_request->flags = 0;
1915 obj_request->which = BAD_WHICH;
1916 obj_request->type = type;
1917 INIT_LIST_HEAD(&obj_request->links);
1918 init_completion(&obj_request->completion);
1919 kref_init(&obj_request->kref);
1921 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1922 offset, length, (int)type, obj_request);
1927 static void rbd_obj_request_destroy(struct kref *kref)
1929 struct rbd_obj_request *obj_request;
1931 obj_request = container_of(kref, struct rbd_obj_request, kref);
1933 dout("%s: obj %p\n", __func__, obj_request);
1935 rbd_assert(obj_request->img_request == NULL);
1936 rbd_assert(obj_request->which == BAD_WHICH);
1938 if (obj_request->osd_req)
1939 rbd_osd_req_destroy(obj_request->osd_req);
1941 rbd_assert(obj_request_type_valid(obj_request->type));
1942 switch (obj_request->type) {
1943 case OBJ_REQUEST_NODATA:
1944 break; /* Nothing to do */
1945 case OBJ_REQUEST_BIO:
1946 if (obj_request->bio_list)
1947 bio_chain_put(obj_request->bio_list);
1949 case OBJ_REQUEST_PAGES:
1950 if (obj_request->pages)
1951 ceph_release_page_vector(obj_request->pages,
1952 obj_request->page_count);
1956 kfree(obj_request->object_name);
1957 obj_request->object_name = NULL;
1958 kmem_cache_free(rbd_obj_request_cache, obj_request);
1961 /* It's OK to call this for a device with no parent */
1963 static void rbd_spec_put(struct rbd_spec *spec);
1964 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1966 rbd_dev_remove_parent(rbd_dev);
1967 rbd_spec_put(rbd_dev->parent_spec);
1968 rbd_dev->parent_spec = NULL;
1969 rbd_dev->parent_overlap = 0;
1973 * Parent image reference counting is used to determine when an
1974 * image's parent fields can be safely torn down--after there are no
1975 * more in-flight requests to the parent image. When the last
1976 * reference is dropped, cleaning them up is safe.
1978 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1982 if (!rbd_dev->parent_spec)
1985 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1989 /* Last reference; clean up parent data structures */
1992 rbd_dev_unparent(rbd_dev);
1994 rbd_warn(rbd_dev, "parent reference underflow\n");
1998 * If an image has a non-zero parent overlap, get a reference to its
2001 * We must get the reference before checking for the overlap to
2002 * coordinate properly with zeroing the parent overlap in
2003 * rbd_dev_v2_parent_info() when an image gets flattened. We
2004 * drop it again if there is no overlap.
2006 * Returns true if the rbd device has a parent with a non-zero
2007 * overlap and a reference for it was successfully taken, or
2010 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2014 if (!rbd_dev->parent_spec)
2017 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2018 if (counter > 0 && rbd_dev->parent_overlap)
2021 /* Image was flattened, but parent is not yet torn down */
2024 rbd_warn(rbd_dev, "parent reference overflow\n");
2030 * Caller is responsible for filling in the list of object requests
2031 * that comprises the image request, and the Linux request pointer
2032 * (if there is one).
2034 static struct rbd_img_request *rbd_img_request_create(
2035 struct rbd_device *rbd_dev,
2036 u64 offset, u64 length,
2039 struct rbd_img_request *img_request;
2041 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2045 if (write_request) {
2046 down_read(&rbd_dev->header_rwsem);
2047 ceph_get_snap_context(rbd_dev->header.snapc);
2048 up_read(&rbd_dev->header_rwsem);
2051 img_request->rq = NULL;
2052 img_request->rbd_dev = rbd_dev;
2053 img_request->offset = offset;
2054 img_request->length = length;
2055 img_request->flags = 0;
2056 if (write_request) {
2057 img_request_write_set(img_request);
2058 img_request->snapc = rbd_dev->header.snapc;
2060 img_request->snap_id = rbd_dev->spec->snap_id;
2062 if (rbd_dev_parent_get(rbd_dev))
2063 img_request_layered_set(img_request);
2064 spin_lock_init(&img_request->completion_lock);
2065 img_request->next_completion = 0;
2066 img_request->callback = NULL;
2067 img_request->result = 0;
2068 img_request->obj_request_count = 0;
2069 INIT_LIST_HEAD(&img_request->obj_requests);
2070 kref_init(&img_request->kref);
2072 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2073 write_request ? "write" : "read", offset, length,
2079 static void rbd_img_request_destroy(struct kref *kref)
2081 struct rbd_img_request *img_request;
2082 struct rbd_obj_request *obj_request;
2083 struct rbd_obj_request *next_obj_request;
2085 img_request = container_of(kref, struct rbd_img_request, kref);
2087 dout("%s: img %p\n", __func__, img_request);
2089 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2090 rbd_img_obj_request_del(img_request, obj_request);
2091 rbd_assert(img_request->obj_request_count == 0);
2093 if (img_request_layered_test(img_request)) {
2094 img_request_layered_clear(img_request);
2095 rbd_dev_parent_put(img_request->rbd_dev);
2098 if (img_request_write_test(img_request))
2099 ceph_put_snap_context(img_request->snapc);
2101 kmem_cache_free(rbd_img_request_cache, img_request);
2104 static struct rbd_img_request *rbd_parent_request_create(
2105 struct rbd_obj_request *obj_request,
2106 u64 img_offset, u64 length)
2108 struct rbd_img_request *parent_request;
2109 struct rbd_device *rbd_dev;
2111 rbd_assert(obj_request->img_request);
2112 rbd_dev = obj_request->img_request->rbd_dev;
2114 parent_request = rbd_img_request_create(rbd_dev->parent,
2115 img_offset, length, false);
2116 if (!parent_request)
2119 img_request_child_set(parent_request);
2120 rbd_obj_request_get(obj_request);
2121 parent_request->obj_request = obj_request;
2123 return parent_request;
2126 static void rbd_parent_request_destroy(struct kref *kref)
2128 struct rbd_img_request *parent_request;
2129 struct rbd_obj_request *orig_request;
2131 parent_request = container_of(kref, struct rbd_img_request, kref);
2132 orig_request = parent_request->obj_request;
2134 parent_request->obj_request = NULL;
2135 rbd_obj_request_put(orig_request);
2136 img_request_child_clear(parent_request);
2138 rbd_img_request_destroy(kref);
2141 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2143 struct rbd_img_request *img_request;
2144 unsigned int xferred;
2148 rbd_assert(obj_request_img_data_test(obj_request));
2149 img_request = obj_request->img_request;
2151 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2152 xferred = (unsigned int)obj_request->xferred;
2153 result = obj_request->result;
2155 struct rbd_device *rbd_dev = img_request->rbd_dev;
2157 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2158 img_request_write_test(img_request) ? "write" : "read",
2159 obj_request->length, obj_request->img_offset,
2160 obj_request->offset);
2161 rbd_warn(rbd_dev, " result %d xferred %x\n",
2163 if (!img_request->result)
2164 img_request->result = result;
2167 /* Image object requests don't own their page array */
2169 if (obj_request->type == OBJ_REQUEST_PAGES) {
2170 obj_request->pages = NULL;
2171 obj_request->page_count = 0;
2174 if (img_request_child_test(img_request)) {
2175 rbd_assert(img_request->obj_request != NULL);
2176 more = obj_request->which < img_request->obj_request_count - 1;
2178 rbd_assert(img_request->rq != NULL);
2179 more = blk_end_request(img_request->rq, result, xferred);
2185 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2187 struct rbd_img_request *img_request;
2188 u32 which = obj_request->which;
2191 rbd_assert(obj_request_img_data_test(obj_request));
2192 img_request = obj_request->img_request;
2194 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2195 rbd_assert(img_request != NULL);
2196 rbd_assert(img_request->obj_request_count > 0);
2197 rbd_assert(which != BAD_WHICH);
2198 rbd_assert(which < img_request->obj_request_count);
2200 spin_lock_irq(&img_request->completion_lock);
2201 if (which != img_request->next_completion)
2204 for_each_obj_request_from(img_request, obj_request) {
2206 rbd_assert(which < img_request->obj_request_count);
2208 if (!obj_request_done_test(obj_request))
2210 more = rbd_img_obj_end_request(obj_request);
2214 rbd_assert(more ^ (which == img_request->obj_request_count));
2215 img_request->next_completion = which;
2217 spin_unlock_irq(&img_request->completion_lock);
2218 rbd_img_request_put(img_request);
2221 rbd_img_request_complete(img_request);
2225 * Split up an image request into one or more object requests, each
2226 * to a different object. The "type" parameter indicates whether
2227 * "data_desc" is the pointer to the head of a list of bio
2228 * structures, or the base of a page array. In either case this
2229 * function assumes data_desc describes memory sufficient to hold
2230 * all data described by the image request.
2232 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2233 enum obj_request_type type,
2236 struct rbd_device *rbd_dev = img_request->rbd_dev;
2237 struct rbd_obj_request *obj_request = NULL;
2238 struct rbd_obj_request *next_obj_request;
2239 bool write_request = img_request_write_test(img_request);
2240 struct bio *bio_list = NULL;
2241 unsigned int bio_offset = 0;
2242 struct page **pages = NULL;
2247 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2248 (int)type, data_desc);
2250 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2251 img_offset = img_request->offset;
2252 resid = img_request->length;
2253 rbd_assert(resid > 0);
2255 if (type == OBJ_REQUEST_BIO) {
2256 bio_list = data_desc;
2257 rbd_assert(img_offset ==
2258 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2260 rbd_assert(type == OBJ_REQUEST_PAGES);
2265 struct ceph_osd_request *osd_req;
2266 const char *object_name;
2269 unsigned int which = 0;
2271 object_name = rbd_segment_name(rbd_dev, img_offset);
2274 offset = rbd_segment_offset(rbd_dev, img_offset);
2275 length = rbd_segment_length(rbd_dev, img_offset, resid);
2276 obj_request = rbd_obj_request_create(object_name,
2277 offset, length, type);
2278 /* object request has its own copy of the object name */
2279 rbd_segment_name_free(object_name);
2284 * set obj_request->img_request before creating the
2285 * osd_request so that it gets the right snapc
2287 rbd_img_obj_request_add(img_request, obj_request);
2289 if (type == OBJ_REQUEST_BIO) {
2290 unsigned int clone_size;
2292 rbd_assert(length <= (u64)UINT_MAX);
2293 clone_size = (unsigned int)length;
2294 obj_request->bio_list =
2295 bio_chain_clone_range(&bio_list,
2299 if (!obj_request->bio_list)
2302 unsigned int page_count;
2304 obj_request->pages = pages;
2305 page_count = (u32)calc_pages_for(offset, length);
2306 obj_request->page_count = page_count;
2307 if ((offset + length) & ~PAGE_MASK)
2308 page_count--; /* more on last page */
2309 pages += page_count;
2312 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2313 (write_request ? 2 : 1),
2317 obj_request->osd_req = osd_req;
2318 obj_request->callback = rbd_img_obj_callback;
2319 rbd_img_request_get(img_request);
2321 if (write_request) {
2322 osd_req_op_alloc_hint_init(osd_req, which,
2323 rbd_obj_bytes(&rbd_dev->header),
2324 rbd_obj_bytes(&rbd_dev->header));
2328 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2330 if (type == OBJ_REQUEST_BIO)
2331 osd_req_op_extent_osd_data_bio(osd_req, which,
2332 obj_request->bio_list, length);
2334 osd_req_op_extent_osd_data_pages(osd_req, which,
2335 obj_request->pages, length,
2336 offset & ~PAGE_MASK, false, false);
2339 rbd_osd_req_format_write(obj_request);
2341 rbd_osd_req_format_read(obj_request);
2343 obj_request->img_offset = img_offset;
2345 img_offset += length;
2352 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2353 rbd_img_obj_request_del(img_request, obj_request);
2359 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2361 struct rbd_img_request *img_request;
2362 struct rbd_device *rbd_dev;
2363 struct page **pages;
2366 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2367 rbd_assert(obj_request_img_data_test(obj_request));
2368 img_request = obj_request->img_request;
2369 rbd_assert(img_request);
2371 rbd_dev = img_request->rbd_dev;
2372 rbd_assert(rbd_dev);
2374 pages = obj_request->copyup_pages;
2375 rbd_assert(pages != NULL);
2376 obj_request->copyup_pages = NULL;
2377 page_count = obj_request->copyup_page_count;
2378 rbd_assert(page_count);
2379 obj_request->copyup_page_count = 0;
2380 ceph_release_page_vector(pages, page_count);
2383 * We want the transfer count to reflect the size of the
2384 * original write request. There is no such thing as a
2385 * successful short write, so if the request was successful
2386 * we can just set it to the originally-requested length.
2388 if (!obj_request->result)
2389 obj_request->xferred = obj_request->length;
2391 /* Finish up with the normal image object callback */
2393 rbd_img_obj_callback(obj_request);
2397 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2399 struct rbd_obj_request *orig_request;
2400 struct ceph_osd_request *osd_req;
2401 struct ceph_osd_client *osdc;
2402 struct rbd_device *rbd_dev;
2403 struct page **pages;
2410 rbd_assert(img_request_child_test(img_request));
2412 /* First get what we need from the image request */
2414 pages = img_request->copyup_pages;
2415 rbd_assert(pages != NULL);
2416 img_request->copyup_pages = NULL;
2417 page_count = img_request->copyup_page_count;
2418 rbd_assert(page_count);
2419 img_request->copyup_page_count = 0;
2421 orig_request = img_request->obj_request;
2422 rbd_assert(orig_request != NULL);
2423 rbd_assert(obj_request_type_valid(orig_request->type));
2424 img_result = img_request->result;
2425 parent_length = img_request->length;
2426 rbd_assert(parent_length == img_request->xferred);
2427 rbd_img_request_put(img_request);
2429 rbd_assert(orig_request->img_request);
2430 rbd_dev = orig_request->img_request->rbd_dev;
2431 rbd_assert(rbd_dev);
2434 * If the overlap has become 0 (most likely because the
2435 * image has been flattened) we need to free the pages
2436 * and re-submit the original write request.
2438 if (!rbd_dev->parent_overlap) {
2439 struct ceph_osd_client *osdc;
2441 ceph_release_page_vector(pages, page_count);
2442 osdc = &rbd_dev->rbd_client->client->osdc;
2443 img_result = rbd_obj_request_submit(osdc, orig_request);
2452 * The original osd request is of no use to use any more.
2453 * We need a new one that can hold the three ops in a copyup
2454 * request. Allocate the new copyup osd request for the
2455 * original request, and release the old one.
2457 img_result = -ENOMEM;
2458 osd_req = rbd_osd_req_create_copyup(orig_request);
2461 rbd_osd_req_destroy(orig_request->osd_req);
2462 orig_request->osd_req = osd_req;
2463 orig_request->copyup_pages = pages;
2464 orig_request->copyup_page_count = page_count;
2466 /* Initialize the copyup op */
2468 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2469 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2472 /* Then the hint op */
2474 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2475 rbd_obj_bytes(&rbd_dev->header));
2477 /* And the original write request op */
2479 offset = orig_request->offset;
2480 length = orig_request->length;
2481 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2482 offset, length, 0, 0);
2483 if (orig_request->type == OBJ_REQUEST_BIO)
2484 osd_req_op_extent_osd_data_bio(osd_req, 2,
2485 orig_request->bio_list, length);
2487 osd_req_op_extent_osd_data_pages(osd_req, 2,
2488 orig_request->pages, length,
2489 offset & ~PAGE_MASK, false, false);
2491 rbd_osd_req_format_write(orig_request);
2493 /* All set, send it off. */
2495 orig_request->callback = rbd_img_obj_copyup_callback;
2496 osdc = &rbd_dev->rbd_client->client->osdc;
2497 img_result = rbd_obj_request_submit(osdc, orig_request);
2501 /* Record the error code and complete the request */
2503 orig_request->result = img_result;
2504 orig_request->xferred = 0;
2505 obj_request_done_set(orig_request);
2506 rbd_obj_request_complete(orig_request);
2510 * Read from the parent image the range of data that covers the
2511 * entire target of the given object request. This is used for
2512 * satisfying a layered image write request when the target of an
2513 * object request from the image request does not exist.
2515 * A page array big enough to hold the returned data is allocated
2516 * and supplied to rbd_img_request_fill() as the "data descriptor."
2517 * When the read completes, this page array will be transferred to
2518 * the original object request for the copyup operation.
2520 * If an error occurs, record it as the result of the original
2521 * object request and mark it done so it gets completed.
2523 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2525 struct rbd_img_request *img_request = NULL;
2526 struct rbd_img_request *parent_request = NULL;
2527 struct rbd_device *rbd_dev;
2530 struct page **pages = NULL;
2534 rbd_assert(obj_request_img_data_test(obj_request));
2535 rbd_assert(obj_request_type_valid(obj_request->type));
2537 img_request = obj_request->img_request;
2538 rbd_assert(img_request != NULL);
2539 rbd_dev = img_request->rbd_dev;
2540 rbd_assert(rbd_dev->parent != NULL);
2543 * Determine the byte range covered by the object in the
2544 * child image to which the original request was to be sent.
2546 img_offset = obj_request->img_offset - obj_request->offset;
2547 length = (u64)1 << rbd_dev->header.obj_order;
2550 * There is no defined parent data beyond the parent
2551 * overlap, so limit what we read at that boundary if
2554 if (img_offset + length > rbd_dev->parent_overlap) {
2555 rbd_assert(img_offset < rbd_dev->parent_overlap);
2556 length = rbd_dev->parent_overlap - img_offset;
2560 * Allocate a page array big enough to receive the data read
2563 page_count = (u32)calc_pages_for(0, length);
2564 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2565 if (IS_ERR(pages)) {
2566 result = PTR_ERR(pages);
2572 parent_request = rbd_parent_request_create(obj_request,
2573 img_offset, length);
2574 if (!parent_request)
2577 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2580 parent_request->copyup_pages = pages;
2581 parent_request->copyup_page_count = page_count;
2583 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2584 result = rbd_img_request_submit(parent_request);
2588 parent_request->copyup_pages = NULL;
2589 parent_request->copyup_page_count = 0;
2590 parent_request->obj_request = NULL;
2591 rbd_obj_request_put(obj_request);
2594 ceph_release_page_vector(pages, page_count);
2596 rbd_img_request_put(parent_request);
2597 obj_request->result = result;
2598 obj_request->xferred = 0;
2599 obj_request_done_set(obj_request);
2604 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2606 struct rbd_obj_request *orig_request;
2607 struct rbd_device *rbd_dev;
2610 rbd_assert(!obj_request_img_data_test(obj_request));
2613 * All we need from the object request is the original
2614 * request and the result of the STAT op. Grab those, then
2615 * we're done with the request.
2617 orig_request = obj_request->obj_request;
2618 obj_request->obj_request = NULL;
2619 rbd_obj_request_put(orig_request);
2620 rbd_assert(orig_request);
2621 rbd_assert(orig_request->img_request);
2623 result = obj_request->result;
2624 obj_request->result = 0;
2626 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2627 obj_request, orig_request, result,
2628 obj_request->xferred, obj_request->length);
2629 rbd_obj_request_put(obj_request);
2632 * If the overlap has become 0 (most likely because the
2633 * image has been flattened) we need to free the pages
2634 * and re-submit the original write request.
2636 rbd_dev = orig_request->img_request->rbd_dev;
2637 if (!rbd_dev->parent_overlap) {
2638 struct ceph_osd_client *osdc;
2640 osdc = &rbd_dev->rbd_client->client->osdc;
2641 result = rbd_obj_request_submit(osdc, orig_request);
2647 * Our only purpose here is to determine whether the object
2648 * exists, and we don't want to treat the non-existence as
2649 * an error. If something else comes back, transfer the
2650 * error to the original request and complete it now.
2653 obj_request_existence_set(orig_request, true);
2654 } else if (result == -ENOENT) {
2655 obj_request_existence_set(orig_request, false);
2656 } else if (result) {
2657 orig_request->result = result;
2662 * Resubmit the original request now that we have recorded
2663 * whether the target object exists.
2665 orig_request->result = rbd_img_obj_request_submit(orig_request);
2667 if (orig_request->result)
2668 rbd_obj_request_complete(orig_request);
2671 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2673 struct rbd_obj_request *stat_request;
2674 struct rbd_device *rbd_dev;
2675 struct ceph_osd_client *osdc;
2676 struct page **pages = NULL;
2682 * The response data for a STAT call consists of:
2689 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2690 page_count = (u32)calc_pages_for(0, size);
2691 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2693 return PTR_ERR(pages);
2696 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2701 rbd_obj_request_get(obj_request);
2702 stat_request->obj_request = obj_request;
2703 stat_request->pages = pages;
2704 stat_request->page_count = page_count;
2706 rbd_assert(obj_request->img_request);
2707 rbd_dev = obj_request->img_request->rbd_dev;
2708 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2710 if (!stat_request->osd_req)
2712 stat_request->callback = rbd_img_obj_exists_callback;
2714 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2715 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2717 rbd_osd_req_format_read(stat_request);
2719 osdc = &rbd_dev->rbd_client->client->osdc;
2720 ret = rbd_obj_request_submit(osdc, stat_request);
2723 rbd_obj_request_put(obj_request);
2728 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2730 struct rbd_img_request *img_request;
2731 struct rbd_device *rbd_dev;
2734 rbd_assert(obj_request_img_data_test(obj_request));
2736 img_request = obj_request->img_request;
2737 rbd_assert(img_request);
2738 rbd_dev = img_request->rbd_dev;
2741 * Only writes to layered images need special handling.
2742 * Reads and non-layered writes are simple object requests.
2743 * Layered writes that start beyond the end of the overlap
2744 * with the parent have no parent data, so they too are
2745 * simple object requests. Finally, if the target object is
2746 * known to already exist, its parent data has already been
2747 * copied, so a write to the object can also be handled as a
2748 * simple object request.
2750 if (!img_request_write_test(img_request) ||
2751 !img_request_layered_test(img_request) ||
2752 rbd_dev->parent_overlap <= obj_request->img_offset ||
2753 ((known = obj_request_known_test(obj_request)) &&
2754 obj_request_exists_test(obj_request))) {
2756 struct rbd_device *rbd_dev;
2757 struct ceph_osd_client *osdc;
2759 rbd_dev = obj_request->img_request->rbd_dev;
2760 osdc = &rbd_dev->rbd_client->client->osdc;
2762 return rbd_obj_request_submit(osdc, obj_request);
2766 * It's a layered write. The target object might exist but
2767 * we may not know that yet. If we know it doesn't exist,
2768 * start by reading the data for the full target object from
2769 * the parent so we can use it for a copyup to the target.
2772 return rbd_img_obj_parent_read_full(obj_request);
2774 /* We don't know whether the target exists. Go find out. */
2776 return rbd_img_obj_exists_submit(obj_request);
2779 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2781 struct rbd_obj_request *obj_request;
2782 struct rbd_obj_request *next_obj_request;
2784 dout("%s: img %p\n", __func__, img_request);
2785 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2788 ret = rbd_img_obj_request_submit(obj_request);
2796 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2798 struct rbd_obj_request *obj_request;
2799 struct rbd_device *rbd_dev;
2804 rbd_assert(img_request_child_test(img_request));
2806 /* First get what we need from the image request and release it */
2808 obj_request = img_request->obj_request;
2809 img_xferred = img_request->xferred;
2810 img_result = img_request->result;
2811 rbd_img_request_put(img_request);
2814 * If the overlap has become 0 (most likely because the
2815 * image has been flattened) we need to re-submit the
2818 rbd_assert(obj_request);
2819 rbd_assert(obj_request->img_request);
2820 rbd_dev = obj_request->img_request->rbd_dev;
2821 if (!rbd_dev->parent_overlap) {
2822 struct ceph_osd_client *osdc;
2824 osdc = &rbd_dev->rbd_client->client->osdc;
2825 img_result = rbd_obj_request_submit(osdc, obj_request);
2830 obj_request->result = img_result;
2831 if (obj_request->result)
2835 * We need to zero anything beyond the parent overlap
2836 * boundary. Since rbd_img_obj_request_read_callback()
2837 * will zero anything beyond the end of a short read, an
2838 * easy way to do this is to pretend the data from the
2839 * parent came up short--ending at the overlap boundary.
2841 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2842 obj_end = obj_request->img_offset + obj_request->length;
2843 if (obj_end > rbd_dev->parent_overlap) {
2846 if (obj_request->img_offset < rbd_dev->parent_overlap)
2847 xferred = rbd_dev->parent_overlap -
2848 obj_request->img_offset;
2850 obj_request->xferred = min(img_xferred, xferred);
2852 obj_request->xferred = img_xferred;
2855 rbd_img_obj_request_read_callback(obj_request);
2856 rbd_obj_request_complete(obj_request);
2859 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2861 struct rbd_img_request *img_request;
2864 rbd_assert(obj_request_img_data_test(obj_request));
2865 rbd_assert(obj_request->img_request != NULL);
2866 rbd_assert(obj_request->result == (s32) -ENOENT);
2867 rbd_assert(obj_request_type_valid(obj_request->type));
2869 /* rbd_read_finish(obj_request, obj_request->length); */
2870 img_request = rbd_parent_request_create(obj_request,
2871 obj_request->img_offset,
2872 obj_request->length);
2877 if (obj_request->type == OBJ_REQUEST_BIO)
2878 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2879 obj_request->bio_list);
2881 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2882 obj_request->pages);
2886 img_request->callback = rbd_img_parent_read_callback;
2887 result = rbd_img_request_submit(img_request);
2894 rbd_img_request_put(img_request);
2895 obj_request->result = result;
2896 obj_request->xferred = 0;
2897 obj_request_done_set(obj_request);
2900 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2902 struct rbd_obj_request *obj_request;
2903 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2906 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2907 OBJ_REQUEST_NODATA);
2912 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2914 if (!obj_request->osd_req)
2917 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2919 rbd_osd_req_format_read(obj_request);
2921 ret = rbd_obj_request_submit(osdc, obj_request);
2924 ret = rbd_obj_request_wait(obj_request);
2926 rbd_obj_request_put(obj_request);
2931 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2933 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2939 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2940 rbd_dev->header_name, (unsigned long long)notify_id,
2941 (unsigned int)opcode);
2942 ret = rbd_dev_refresh(rbd_dev);
2944 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2946 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2950 * Initiate a watch request, synchronously.
2952 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2954 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2955 struct rbd_obj_request *obj_request;
2958 rbd_assert(!rbd_dev->watch_event);
2959 rbd_assert(!rbd_dev->watch_request);
2961 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2962 &rbd_dev->watch_event);
2966 rbd_assert(rbd_dev->watch_event);
2968 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2969 OBJ_REQUEST_NODATA);
2975 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2977 if (!obj_request->osd_req) {
2982 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2984 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2985 rbd_dev->watch_event->cookie, 0, 1);
2986 rbd_osd_req_format_write(obj_request);
2988 ret = rbd_obj_request_submit(osdc, obj_request);
2992 ret = rbd_obj_request_wait(obj_request);
2996 ret = obj_request->result;
3001 * A watch request is set to linger, so the underlying osd
3002 * request won't go away until we unregister it. We retain
3003 * a pointer to the object request during that time (in
3004 * rbd_dev->watch_request), so we'll keep a reference to
3005 * it. We'll drop that reference (below) after we've
3008 rbd_dev->watch_request = obj_request;
3013 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
3015 rbd_obj_request_put(obj_request);
3017 ceph_osdc_cancel_event(rbd_dev->watch_event);
3018 rbd_dev->watch_event = NULL;
3024 * Tear down a watch request, synchronously.
3026 static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3028 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3029 struct rbd_obj_request *obj_request;
3032 rbd_assert(rbd_dev->watch_event);
3033 rbd_assert(rbd_dev->watch_request);
3035 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3036 OBJ_REQUEST_NODATA);
3042 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3044 if (!obj_request->osd_req) {
3049 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3050 rbd_dev->watch_event->cookie, 0, 0);
3051 rbd_osd_req_format_write(obj_request);
3053 ret = rbd_obj_request_submit(osdc, obj_request);
3057 ret = rbd_obj_request_wait(obj_request);
3061 ret = obj_request->result;
3065 /* We have successfully torn down the watch request */
3067 ceph_osdc_unregister_linger_request(osdc,
3068 rbd_dev->watch_request->osd_req);
3069 rbd_obj_request_put(rbd_dev->watch_request);
3070 rbd_dev->watch_request = NULL;
3073 rbd_obj_request_put(obj_request);
3075 ceph_osdc_cancel_event(rbd_dev->watch_event);
3076 rbd_dev->watch_event = NULL;
3081 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3085 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
3087 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3093 * Synchronous osd object method call. Returns the number of bytes
3094 * returned in the outbound buffer, or a negative error code.
3096 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3097 const char *object_name,
3098 const char *class_name,
3099 const char *method_name,
3100 const void *outbound,
3101 size_t outbound_size,
3103 size_t inbound_size)
3105 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3106 struct rbd_obj_request *obj_request;
3107 struct page **pages;
3112 * Method calls are ultimately read operations. The result
3113 * should placed into the inbound buffer provided. They
3114 * also supply outbound data--parameters for the object
3115 * method. Currently if this is present it will be a
3118 page_count = (u32)calc_pages_for(0, inbound_size);
3119 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3121 return PTR_ERR(pages);
3124 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3129 obj_request->pages = pages;
3130 obj_request->page_count = page_count;
3132 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3134 if (!obj_request->osd_req)
3137 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3138 class_name, method_name);
3139 if (outbound_size) {
3140 struct ceph_pagelist *pagelist;
3142 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3146 ceph_pagelist_init(pagelist);
3147 ceph_pagelist_append(pagelist, outbound, outbound_size);
3148 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3151 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3152 obj_request->pages, inbound_size,
3154 rbd_osd_req_format_read(obj_request);
3156 ret = rbd_obj_request_submit(osdc, obj_request);
3159 ret = rbd_obj_request_wait(obj_request);
3163 ret = obj_request->result;
3167 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3168 ret = (int)obj_request->xferred;
3169 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3172 rbd_obj_request_put(obj_request);
3174 ceph_release_page_vector(pages, page_count);
3179 static void rbd_request_fn(struct request_queue *q)
3180 __releases(q->queue_lock) __acquires(q->queue_lock)
3182 struct rbd_device *rbd_dev = q->queuedata;
3186 while ((rq = blk_fetch_request(q))) {
3187 bool write_request = rq_data_dir(rq) == WRITE;
3188 struct rbd_img_request *img_request;
3192 /* Ignore any non-FS requests that filter through. */
3194 if (rq->cmd_type != REQ_TYPE_FS) {
3195 dout("%s: non-fs request type %d\n", __func__,
3196 (int) rq->cmd_type);
3197 __blk_end_request_all(rq, 0);
3201 /* Ignore/skip any zero-length requests */
3203 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3204 length = (u64) blk_rq_bytes(rq);
3207 dout("%s: zero-length request\n", __func__);
3208 __blk_end_request_all(rq, 0);
3212 spin_unlock_irq(q->queue_lock);
3214 /* Disallow writes to a read-only device */
3216 if (write_request) {
3218 if (rbd_dev->mapping.read_only)
3220 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3224 * Quit early if the mapped snapshot no longer
3225 * exists. It's still possible the snapshot will
3226 * have disappeared by the time our request arrives
3227 * at the osd, but there's no sense in sending it if
3230 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3231 dout("request for non-existent snapshot");
3232 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3238 if (offset && length > U64_MAX - offset + 1) {
3239 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3241 goto end_request; /* Shouldn't happen */
3245 if (offset + length > rbd_dev->mapping.size) {
3246 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3247 offset, length, rbd_dev->mapping.size);
3252 img_request = rbd_img_request_create(rbd_dev, offset, length,
3257 img_request->rq = rq;
3259 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3262 result = rbd_img_request_submit(img_request);
3264 rbd_img_request_put(img_request);
3266 spin_lock_irq(q->queue_lock);
3268 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3269 write_request ? "write" : "read",
3270 length, offset, result);
3272 __blk_end_request_all(rq, result);
3278 * a queue callback. Makes sure that we don't create a bio that spans across
3279 * multiple osd objects. One exception would be with a single page bios,
3280 * which we handle later at bio_chain_clone_range()
3282 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3283 struct bio_vec *bvec)
3285 struct rbd_device *rbd_dev = q->queuedata;
3286 sector_t sector_offset;
3287 sector_t sectors_per_obj;
3288 sector_t obj_sector_offset;
3292 * Find how far into its rbd object the partition-relative
3293 * bio start sector is to offset relative to the enclosing
3296 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3297 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3298 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3301 * Compute the number of bytes from that offset to the end
3302 * of the object. Account for what's already used by the bio.
3304 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3305 if (ret > bmd->bi_size)
3306 ret -= bmd->bi_size;
3311 * Don't send back more than was asked for. And if the bio
3312 * was empty, let the whole thing through because: "Note
3313 * that a block device *must* allow a single page to be
3314 * added to an empty bio."
3316 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3317 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3318 ret = (int) bvec->bv_len;
3323 static void rbd_free_disk(struct rbd_device *rbd_dev)
3325 struct gendisk *disk = rbd_dev->disk;
3330 rbd_dev->disk = NULL;
3331 if (disk->flags & GENHD_FL_UP) {
3334 blk_cleanup_queue(disk->queue);
3339 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3340 const char *object_name,
3341 u64 offset, u64 length, void *buf)
3344 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3345 struct rbd_obj_request *obj_request;
3346 struct page **pages = NULL;
3351 page_count = (u32) calc_pages_for(offset, length);
3352 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3354 ret = PTR_ERR(pages);
3357 obj_request = rbd_obj_request_create(object_name, offset, length,
3362 obj_request->pages = pages;
3363 obj_request->page_count = page_count;
3365 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3367 if (!obj_request->osd_req)
3370 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3371 offset, length, 0, 0);
3372 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3374 obj_request->length,
3375 obj_request->offset & ~PAGE_MASK,
3377 rbd_osd_req_format_read(obj_request);
3379 ret = rbd_obj_request_submit(osdc, obj_request);
3382 ret = rbd_obj_request_wait(obj_request);
3386 ret = obj_request->result;
3390 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3391 size = (size_t) obj_request->xferred;
3392 ceph_copy_from_page_vector(pages, buf, 0, size);
3393 rbd_assert(size <= (size_t)INT_MAX);
3397 rbd_obj_request_put(obj_request);
3399 ceph_release_page_vector(pages, page_count);
3405 * Read the complete header for the given rbd device. On successful
3406 * return, the rbd_dev->header field will contain up-to-date
3407 * information about the image.
3409 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3411 struct rbd_image_header_ondisk *ondisk = NULL;
3418 * The complete header will include an array of its 64-bit
3419 * snapshot ids, followed by the names of those snapshots as
3420 * a contiguous block of NUL-terminated strings. Note that
3421 * the number of snapshots could change by the time we read
3422 * it in, in which case we re-read it.
3429 size = sizeof (*ondisk);
3430 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3432 ondisk = kmalloc(size, GFP_KERNEL);
3436 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3440 if ((size_t)ret < size) {
3442 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3446 if (!rbd_dev_ondisk_valid(ondisk)) {
3448 rbd_warn(rbd_dev, "invalid header");
3452 names_size = le64_to_cpu(ondisk->snap_names_len);
3453 want_count = snap_count;
3454 snap_count = le32_to_cpu(ondisk->snap_count);
3455 } while (snap_count != want_count);
3457 ret = rbd_header_from_disk(rbd_dev, ondisk);
3465 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3466 * has disappeared from the (just updated) snapshot context.
3468 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3472 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3475 snap_id = rbd_dev->spec->snap_id;
3476 if (snap_id == CEPH_NOSNAP)
3479 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3480 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3483 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3489 * Don't hold the lock while doing disk operations,
3490 * or lock ordering will conflict with the bdev mutex via:
3491 * rbd_add() -> blkdev_get() -> rbd_open()
3493 spin_lock_irq(&rbd_dev->lock);
3494 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3495 spin_unlock_irq(&rbd_dev->lock);
3497 * If the device is being removed, rbd_dev->disk has
3498 * been destroyed, so don't try to update its size
3501 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3502 dout("setting size to %llu sectors", (unsigned long long)size);
3503 set_capacity(rbd_dev->disk, size);
3504 revalidate_disk(rbd_dev->disk);
3508 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3513 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3514 down_write(&rbd_dev->header_rwsem);
3515 mapping_size = rbd_dev->mapping.size;
3516 if (rbd_dev->image_format == 1)
3517 ret = rbd_dev_v1_header_info(rbd_dev);
3519 ret = rbd_dev_v2_header_info(rbd_dev);
3521 /* If it's a mapped snapshot, validate its EXISTS flag */
3523 rbd_exists_validate(rbd_dev);
3524 up_write(&rbd_dev->header_rwsem);
3526 if (mapping_size != rbd_dev->mapping.size) {
3527 rbd_dev_update_size(rbd_dev);
3533 static int rbd_init_disk(struct rbd_device *rbd_dev)
3535 struct gendisk *disk;
3536 struct request_queue *q;
3539 /* create gendisk info */
3540 disk = alloc_disk(single_major ?
3541 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3542 RBD_MINORS_PER_MAJOR);
3546 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3548 disk->major = rbd_dev->major;
3549 disk->first_minor = rbd_dev->minor;
3551 disk->flags |= GENHD_FL_EXT_DEVT;
3552 disk->fops = &rbd_bd_ops;
3553 disk->private_data = rbd_dev;
3555 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3559 /* We use the default size, but let's be explicit about it. */
3560 blk_queue_physical_block_size(q, SECTOR_SIZE);
3562 /* set io sizes to object size */
3563 segment_size = rbd_obj_bytes(&rbd_dev->header);
3564 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3565 blk_queue_max_segment_size(q, segment_size);
3566 blk_queue_io_min(q, segment_size);
3567 blk_queue_io_opt(q, segment_size);
3569 blk_queue_merge_bvec(q, rbd_merge_bvec);
3572 q->queuedata = rbd_dev;
3574 rbd_dev->disk = disk;
3587 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3589 return container_of(dev, struct rbd_device, dev);
3592 static ssize_t rbd_size_show(struct device *dev,
3593 struct device_attribute *attr, char *buf)
3595 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3597 return sprintf(buf, "%llu\n",
3598 (unsigned long long)rbd_dev->mapping.size);
3602 * Note this shows the features for whatever's mapped, which is not
3603 * necessarily the base image.
3605 static ssize_t rbd_features_show(struct device *dev,
3606 struct device_attribute *attr, char *buf)
3608 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3610 return sprintf(buf, "0x%016llx\n",
3611 (unsigned long long)rbd_dev->mapping.features);
3614 static ssize_t rbd_major_show(struct device *dev,
3615 struct device_attribute *attr, char *buf)
3617 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3620 return sprintf(buf, "%d\n", rbd_dev->major);
3622 return sprintf(buf, "(none)\n");
3625 static ssize_t rbd_minor_show(struct device *dev,
3626 struct device_attribute *attr, char *buf)
3628 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3630 return sprintf(buf, "%d\n", rbd_dev->minor);
3633 static ssize_t rbd_client_id_show(struct device *dev,
3634 struct device_attribute *attr, char *buf)
3636 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3638 return sprintf(buf, "client%lld\n",
3639 ceph_client_id(rbd_dev->rbd_client->client));
3642 static ssize_t rbd_pool_show(struct device *dev,
3643 struct device_attribute *attr, char *buf)
3645 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3647 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3650 static ssize_t rbd_pool_id_show(struct device *dev,
3651 struct device_attribute *attr, char *buf)
3653 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3655 return sprintf(buf, "%llu\n",
3656 (unsigned long long) rbd_dev->spec->pool_id);
3659 static ssize_t rbd_name_show(struct device *dev,
3660 struct device_attribute *attr, char *buf)
3662 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3664 if (rbd_dev->spec->image_name)
3665 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3667 return sprintf(buf, "(unknown)\n");
3670 static ssize_t rbd_image_id_show(struct device *dev,
3671 struct device_attribute *attr, char *buf)
3673 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3675 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3679 * Shows the name of the currently-mapped snapshot (or
3680 * RBD_SNAP_HEAD_NAME for the base image).
3682 static ssize_t rbd_snap_show(struct device *dev,
3683 struct device_attribute *attr,
3686 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3688 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3692 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3693 * for the parent image. If there is no parent, simply shows
3694 * "(no parent image)".
3696 static ssize_t rbd_parent_show(struct device *dev,
3697 struct device_attribute *attr,
3700 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3701 struct rbd_spec *spec = rbd_dev->parent_spec;
3706 return sprintf(buf, "(no parent image)\n");
3708 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3709 (unsigned long long) spec->pool_id, spec->pool_name);
3714 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3715 spec->image_name ? spec->image_name : "(unknown)");
3720 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3721 (unsigned long long) spec->snap_id, spec->snap_name);
3726 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3731 return (ssize_t) (bufp - buf);
3734 static ssize_t rbd_image_refresh(struct device *dev,
3735 struct device_attribute *attr,
3739 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3742 ret = rbd_dev_refresh(rbd_dev);
3744 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3746 return ret < 0 ? ret : size;
3749 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3750 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3751 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3752 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3753 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3754 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3755 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3756 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3757 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3758 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3759 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3760 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3762 static struct attribute *rbd_attrs[] = {
3763 &dev_attr_size.attr,
3764 &dev_attr_features.attr,
3765 &dev_attr_major.attr,
3766 &dev_attr_minor.attr,
3767 &dev_attr_client_id.attr,
3768 &dev_attr_pool.attr,
3769 &dev_attr_pool_id.attr,
3770 &dev_attr_name.attr,
3771 &dev_attr_image_id.attr,
3772 &dev_attr_current_snap.attr,
3773 &dev_attr_parent.attr,
3774 &dev_attr_refresh.attr,
3778 static struct attribute_group rbd_attr_group = {
3782 static const struct attribute_group *rbd_attr_groups[] = {
3787 static void rbd_sysfs_dev_release(struct device *dev)
3791 static struct device_type rbd_device_type = {
3793 .groups = rbd_attr_groups,
3794 .release = rbd_sysfs_dev_release,
3797 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3799 kref_get(&spec->kref);
3804 static void rbd_spec_free(struct kref *kref);
3805 static void rbd_spec_put(struct rbd_spec *spec)
3808 kref_put(&spec->kref, rbd_spec_free);
3811 static struct rbd_spec *rbd_spec_alloc(void)
3813 struct rbd_spec *spec;
3815 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3818 kref_init(&spec->kref);
3823 static void rbd_spec_free(struct kref *kref)
3825 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3827 kfree(spec->pool_name);
3828 kfree(spec->image_id);
3829 kfree(spec->image_name);
3830 kfree(spec->snap_name);
3834 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3835 struct rbd_spec *spec)
3837 struct rbd_device *rbd_dev;
3839 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3843 spin_lock_init(&rbd_dev->lock);
3845 atomic_set(&rbd_dev->parent_ref, 0);
3846 INIT_LIST_HEAD(&rbd_dev->node);
3847 init_rwsem(&rbd_dev->header_rwsem);
3849 rbd_dev->spec = spec;
3850 rbd_dev->rbd_client = rbdc;
3852 /* Initialize the layout used for all rbd requests */
3854 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3855 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3856 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3857 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3862 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3864 rbd_put_client(rbd_dev->rbd_client);
3865 rbd_spec_put(rbd_dev->spec);
3870 * Get the size and object order for an image snapshot, or if
3871 * snap_id is CEPH_NOSNAP, gets this information for the base
3874 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3875 u8 *order, u64 *snap_size)
3877 __le64 snapid = cpu_to_le64(snap_id);
3882 } __attribute__ ((packed)) size_buf = { 0 };
3884 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3886 &snapid, sizeof (snapid),
3887 &size_buf, sizeof (size_buf));
3888 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3891 if (ret < sizeof (size_buf))
3895 *order = size_buf.order;
3896 dout(" order %u", (unsigned int)*order);
3898 *snap_size = le64_to_cpu(size_buf.size);
3900 dout(" snap_id 0x%016llx snap_size = %llu\n",
3901 (unsigned long long)snap_id,
3902 (unsigned long long)*snap_size);
3907 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3909 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3910 &rbd_dev->header.obj_order,
3911 &rbd_dev->header.image_size);
3914 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3920 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3924 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3925 "rbd", "get_object_prefix", NULL, 0,
3926 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3927 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3932 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3933 p + ret, NULL, GFP_NOIO);
3936 if (IS_ERR(rbd_dev->header.object_prefix)) {
3937 ret = PTR_ERR(rbd_dev->header.object_prefix);
3938 rbd_dev->header.object_prefix = NULL;
3940 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3948 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3951 __le64 snapid = cpu_to_le64(snap_id);
3955 } __attribute__ ((packed)) features_buf = { 0 };
3959 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3960 "rbd", "get_features",
3961 &snapid, sizeof (snapid),
3962 &features_buf, sizeof (features_buf));
3963 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3966 if (ret < sizeof (features_buf))
3969 incompat = le64_to_cpu(features_buf.incompat);
3970 if (incompat & ~RBD_FEATURES_SUPPORTED)
3973 *snap_features = le64_to_cpu(features_buf.features);
3975 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3976 (unsigned long long)snap_id,
3977 (unsigned long long)*snap_features,
3978 (unsigned long long)le64_to_cpu(features_buf.incompat));
3983 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3985 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3986 &rbd_dev->header.features);
3989 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3991 struct rbd_spec *parent_spec;
3993 void *reply_buf = NULL;
4003 parent_spec = rbd_spec_alloc();
4007 size = sizeof (__le64) + /* pool_id */
4008 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4009 sizeof (__le64) + /* snap_id */
4010 sizeof (__le64); /* overlap */
4011 reply_buf = kmalloc(size, GFP_KERNEL);
4017 snapid = cpu_to_le64(CEPH_NOSNAP);
4018 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4019 "rbd", "get_parent",
4020 &snapid, sizeof (snapid),
4022 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4027 end = reply_buf + ret;
4029 ceph_decode_64_safe(&p, end, pool_id, out_err);
4030 if (pool_id == CEPH_NOPOOL) {
4032 * Either the parent never existed, or we have
4033 * record of it but the image got flattened so it no
4034 * longer has a parent. When the parent of a
4035 * layered image disappears we immediately set the
4036 * overlap to 0. The effect of this is that all new
4037 * requests will be treated as if the image had no
4040 if (rbd_dev->parent_overlap) {
4041 rbd_dev->parent_overlap = 0;
4043 rbd_dev_parent_put(rbd_dev);
4044 pr_info("%s: clone image has been flattened\n",
4045 rbd_dev->disk->disk_name);
4048 goto out; /* No parent? No problem. */
4051 /* The ceph file layout needs to fit pool id in 32 bits */
4054 if (pool_id > (u64)U32_MAX) {
4055 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
4056 (unsigned long long)pool_id, U32_MAX);
4060 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4061 if (IS_ERR(image_id)) {
4062 ret = PTR_ERR(image_id);
4065 ceph_decode_64_safe(&p, end, snap_id, out_err);
4066 ceph_decode_64_safe(&p, end, overlap, out_err);
4069 * The parent won't change (except when the clone is
4070 * flattened, already handled that). So we only need to
4071 * record the parent spec we have not already done so.
4073 if (!rbd_dev->parent_spec) {
4074 parent_spec->pool_id = pool_id;
4075 parent_spec->image_id = image_id;
4076 parent_spec->snap_id = snap_id;
4077 rbd_dev->parent_spec = parent_spec;
4078 parent_spec = NULL; /* rbd_dev now owns this */
4082 * We always update the parent overlap. If it's zero we
4083 * treat it specially.
4085 rbd_dev->parent_overlap = overlap;
4089 /* A null parent_spec indicates it's the initial probe */
4093 * The overlap has become zero, so the clone
4094 * must have been resized down to 0 at some
4095 * point. Treat this the same as a flatten.
4097 rbd_dev_parent_put(rbd_dev);
4098 pr_info("%s: clone image now standalone\n",
4099 rbd_dev->disk->disk_name);
4102 * For the initial probe, if we find the
4103 * overlap is zero we just pretend there was
4106 rbd_warn(rbd_dev, "ignoring parent of "
4107 "clone with overlap 0\n");
4114 rbd_spec_put(parent_spec);
4119 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4123 __le64 stripe_count;
4124 } __attribute__ ((packed)) striping_info_buf = { 0 };
4125 size_t size = sizeof (striping_info_buf);
4132 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4133 "rbd", "get_stripe_unit_count", NULL, 0,
4134 (char *)&striping_info_buf, size);
4135 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4142 * We don't actually support the "fancy striping" feature
4143 * (STRIPINGV2) yet, but if the striping sizes are the
4144 * defaults the behavior is the same as before. So find
4145 * out, and only fail if the image has non-default values.
4148 obj_size = (u64)1 << rbd_dev->header.obj_order;
4149 p = &striping_info_buf;
4150 stripe_unit = ceph_decode_64(&p);
4151 if (stripe_unit != obj_size) {
4152 rbd_warn(rbd_dev, "unsupported stripe unit "
4153 "(got %llu want %llu)",
4154 stripe_unit, obj_size);
4157 stripe_count = ceph_decode_64(&p);
4158 if (stripe_count != 1) {
4159 rbd_warn(rbd_dev, "unsupported stripe count "
4160 "(got %llu want 1)", stripe_count);
4163 rbd_dev->header.stripe_unit = stripe_unit;
4164 rbd_dev->header.stripe_count = stripe_count;
4169 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4171 size_t image_id_size;
4176 void *reply_buf = NULL;
4178 char *image_name = NULL;
4181 rbd_assert(!rbd_dev->spec->image_name);
4183 len = strlen(rbd_dev->spec->image_id);
4184 image_id_size = sizeof (__le32) + len;
4185 image_id = kmalloc(image_id_size, GFP_KERNEL);
4190 end = image_id + image_id_size;
4191 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4193 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4194 reply_buf = kmalloc(size, GFP_KERNEL);
4198 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4199 "rbd", "dir_get_name",
4200 image_id, image_id_size,
4205 end = reply_buf + ret;
4207 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4208 if (IS_ERR(image_name))
4211 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4219 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4221 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4222 const char *snap_name;
4225 /* Skip over names until we find the one we are looking for */
4227 snap_name = rbd_dev->header.snap_names;
4228 while (which < snapc->num_snaps) {
4229 if (!strcmp(name, snap_name))
4230 return snapc->snaps[which];
4231 snap_name += strlen(snap_name) + 1;
4237 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4239 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4244 for (which = 0; !found && which < snapc->num_snaps; which++) {
4245 const char *snap_name;
4247 snap_id = snapc->snaps[which];
4248 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4249 if (IS_ERR(snap_name)) {
4250 /* ignore no-longer existing snapshots */
4251 if (PTR_ERR(snap_name) == -ENOENT)
4256 found = !strcmp(name, snap_name);
4259 return found ? snap_id : CEPH_NOSNAP;
4263 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4264 * no snapshot by that name is found, or if an error occurs.
4266 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4268 if (rbd_dev->image_format == 1)
4269 return rbd_v1_snap_id_by_name(rbd_dev, name);
4271 return rbd_v2_snap_id_by_name(rbd_dev, name);
4275 * When an rbd image has a parent image, it is identified by the
4276 * pool, image, and snapshot ids (not names). This function fills
4277 * in the names for those ids. (It's OK if we can't figure out the
4278 * name for an image id, but the pool and snapshot ids should always
4279 * exist and have names.) All names in an rbd spec are dynamically
4282 * When an image being mapped (not a parent) is probed, we have the
4283 * pool name and pool id, image name and image id, and the snapshot
4284 * name. The only thing we're missing is the snapshot id.
4286 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4288 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4289 struct rbd_spec *spec = rbd_dev->spec;
4290 const char *pool_name;
4291 const char *image_name;
4292 const char *snap_name;
4296 * An image being mapped will have the pool name (etc.), but
4297 * we need to look up the snapshot id.
4299 if (spec->pool_name) {
4300 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4303 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4304 if (snap_id == CEPH_NOSNAP)
4306 spec->snap_id = snap_id;
4308 spec->snap_id = CEPH_NOSNAP;
4314 /* Get the pool name; we have to make our own copy of this */
4316 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4318 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4321 pool_name = kstrdup(pool_name, GFP_KERNEL);
4325 /* Fetch the image name; tolerate failure here */
4327 image_name = rbd_dev_image_name(rbd_dev);
4329 rbd_warn(rbd_dev, "unable to get image name");
4331 /* Look up the snapshot name, and make a copy */
4333 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4334 if (IS_ERR(snap_name)) {
4335 ret = PTR_ERR(snap_name);
4339 spec->pool_name = pool_name;
4340 spec->image_name = image_name;
4341 spec->snap_name = snap_name;
4351 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4360 struct ceph_snap_context *snapc;
4364 * We'll need room for the seq value (maximum snapshot id),
4365 * snapshot count, and array of that many snapshot ids.
4366 * For now we have a fixed upper limit on the number we're
4367 * prepared to receive.
4369 size = sizeof (__le64) + sizeof (__le32) +
4370 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4371 reply_buf = kzalloc(size, GFP_KERNEL);
4375 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4376 "rbd", "get_snapcontext", NULL, 0,
4378 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4383 end = reply_buf + ret;
4385 ceph_decode_64_safe(&p, end, seq, out);
4386 ceph_decode_32_safe(&p, end, snap_count, out);
4389 * Make sure the reported number of snapshot ids wouldn't go
4390 * beyond the end of our buffer. But before checking that,
4391 * make sure the computed size of the snapshot context we
4392 * allocate is representable in a size_t.
4394 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4399 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4403 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4409 for (i = 0; i < snap_count; i++)
4410 snapc->snaps[i] = ceph_decode_64(&p);
4412 ceph_put_snap_context(rbd_dev->header.snapc);
4413 rbd_dev->header.snapc = snapc;
4415 dout(" snap context seq = %llu, snap_count = %u\n",
4416 (unsigned long long)seq, (unsigned int)snap_count);
4423 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4434 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4435 reply_buf = kmalloc(size, GFP_KERNEL);
4437 return ERR_PTR(-ENOMEM);
4439 snapid = cpu_to_le64(snap_id);
4440 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4441 "rbd", "get_snapshot_name",
4442 &snapid, sizeof (snapid),
4444 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4446 snap_name = ERR_PTR(ret);
4451 end = reply_buf + ret;
4452 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4453 if (IS_ERR(snap_name))
4456 dout(" snap_id 0x%016llx snap_name = %s\n",
4457 (unsigned long long)snap_id, snap_name);
4464 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4466 bool first_time = rbd_dev->header.object_prefix == NULL;
4469 ret = rbd_dev_v2_image_size(rbd_dev);
4474 ret = rbd_dev_v2_header_onetime(rbd_dev);
4480 * If the image supports layering, get the parent info. We
4481 * need to probe the first time regardless. Thereafter we
4482 * only need to if there's a parent, to see if it has
4483 * disappeared due to the mapped image getting flattened.
4485 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4486 (first_time || rbd_dev->parent_spec)) {
4489 ret = rbd_dev_v2_parent_info(rbd_dev);
4494 * Print a warning if this is the initial probe and
4495 * the image has a parent. Don't print it if the
4496 * image now being probed is itself a parent. We
4497 * can tell at this point because we won't know its
4498 * pool name yet (just its pool id).
4500 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4501 if (first_time && warn)
4502 rbd_warn(rbd_dev, "WARNING: kernel layering "
4503 "is EXPERIMENTAL!");
4506 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4507 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4508 rbd_dev->mapping.size = rbd_dev->header.image_size;
4510 ret = rbd_dev_v2_snap_context(rbd_dev);
4511 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4516 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4521 dev = &rbd_dev->dev;
4522 dev->bus = &rbd_bus_type;
4523 dev->type = &rbd_device_type;
4524 dev->parent = &rbd_root_dev;
4525 dev->release = rbd_dev_device_release;
4526 dev_set_name(dev, "%d", rbd_dev->dev_id);
4527 ret = device_register(dev);
4532 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4534 device_unregister(&rbd_dev->dev);
4538 * Get a unique rbd identifier for the given new rbd_dev, and add
4539 * the rbd_dev to the global list.
4541 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4545 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4546 0, minor_to_rbd_dev_id(1 << MINORBITS),
4551 rbd_dev->dev_id = new_dev_id;
4553 spin_lock(&rbd_dev_list_lock);
4554 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4555 spin_unlock(&rbd_dev_list_lock);
4557 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4563 * Remove an rbd_dev from the global list, and record that its
4564 * identifier is no longer in use.
4566 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4568 spin_lock(&rbd_dev_list_lock);
4569 list_del_init(&rbd_dev->node);
4570 spin_unlock(&rbd_dev_list_lock);
4572 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4574 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4578 * Skips over white space at *buf, and updates *buf to point to the
4579 * first found non-space character (if any). Returns the length of
4580 * the token (string of non-white space characters) found. Note
4581 * that *buf must be terminated with '\0'.
4583 static inline size_t next_token(const char **buf)
4586 * These are the characters that produce nonzero for
4587 * isspace() in the "C" and "POSIX" locales.
4589 const char *spaces = " \f\n\r\t\v";
4591 *buf += strspn(*buf, spaces); /* Find start of token */
4593 return strcspn(*buf, spaces); /* Return token length */
4597 * Finds the next token in *buf, and if the provided token buffer is
4598 * big enough, copies the found token into it. The result, if
4599 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4600 * must be terminated with '\0' on entry.
4602 * Returns the length of the token found (not including the '\0').
4603 * Return value will be 0 if no token is found, and it will be >=
4604 * token_size if the token would not fit.
4606 * The *buf pointer will be updated to point beyond the end of the
4607 * found token. Note that this occurs even if the token buffer is
4608 * too small to hold it.
4610 static inline size_t copy_token(const char **buf,
4616 len = next_token(buf);
4617 if (len < token_size) {
4618 memcpy(token, *buf, len);
4619 *(token + len) = '\0';
4627 * Finds the next token in *buf, dynamically allocates a buffer big
4628 * enough to hold a copy of it, and copies the token into the new
4629 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4630 * that a duplicate buffer is created even for a zero-length token.
4632 * Returns a pointer to the newly-allocated duplicate, or a null
4633 * pointer if memory for the duplicate was not available. If
4634 * the lenp argument is a non-null pointer, the length of the token
4635 * (not including the '\0') is returned in *lenp.
4637 * If successful, the *buf pointer will be updated to point beyond
4638 * the end of the found token.
4640 * Note: uses GFP_KERNEL for allocation.
4642 static inline char *dup_token(const char **buf, size_t *lenp)
4647 len = next_token(buf);
4648 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4651 *(dup + len) = '\0';
4661 * Parse the options provided for an "rbd add" (i.e., rbd image
4662 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4663 * and the data written is passed here via a NUL-terminated buffer.
4664 * Returns 0 if successful or an error code otherwise.
4666 * The information extracted from these options is recorded in
4667 * the other parameters which return dynamically-allocated
4670 * The address of a pointer that will refer to a ceph options
4671 * structure. Caller must release the returned pointer using
4672 * ceph_destroy_options() when it is no longer needed.
4674 * Address of an rbd options pointer. Fully initialized by
4675 * this function; caller must release with kfree().
4677 * Address of an rbd image specification pointer. Fully
4678 * initialized by this function based on parsed options.
4679 * Caller must release with rbd_spec_put().
4681 * The options passed take this form:
4682 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4685 * A comma-separated list of one or more monitor addresses.
4686 * A monitor address is an ip address, optionally followed
4687 * by a port number (separated by a colon).
4688 * I.e.: ip1[:port1][,ip2[:port2]...]
4690 * A comma-separated list of ceph and/or rbd options.
4692 * The name of the rados pool containing the rbd image.
4694 * The name of the image in that pool to map.
4696 * An optional snapshot id. If provided, the mapping will
4697 * present data from the image at the time that snapshot was
4698 * created. The image head is used if no snapshot id is
4699 * provided. Snapshot mappings are always read-only.
4701 static int rbd_add_parse_args(const char *buf,
4702 struct ceph_options **ceph_opts,
4703 struct rbd_options **opts,
4704 struct rbd_spec **rbd_spec)
4708 const char *mon_addrs;
4710 size_t mon_addrs_size;
4711 struct rbd_spec *spec = NULL;
4712 struct rbd_options *rbd_opts = NULL;
4713 struct ceph_options *copts;
4716 /* The first four tokens are required */
4718 len = next_token(&buf);
4720 rbd_warn(NULL, "no monitor address(es) provided");
4724 mon_addrs_size = len + 1;
4728 options = dup_token(&buf, NULL);
4732 rbd_warn(NULL, "no options provided");
4736 spec = rbd_spec_alloc();
4740 spec->pool_name = dup_token(&buf, NULL);
4741 if (!spec->pool_name)
4743 if (!*spec->pool_name) {
4744 rbd_warn(NULL, "no pool name provided");
4748 spec->image_name = dup_token(&buf, NULL);
4749 if (!spec->image_name)
4751 if (!*spec->image_name) {
4752 rbd_warn(NULL, "no image name provided");
4757 * Snapshot name is optional; default is to use "-"
4758 * (indicating the head/no snapshot).
4760 len = next_token(&buf);
4762 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4763 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4764 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4765 ret = -ENAMETOOLONG;
4768 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4771 *(snap_name + len) = '\0';
4772 spec->snap_name = snap_name;
4774 /* Initialize all rbd options to the defaults */
4776 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4780 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4782 copts = ceph_parse_options(options, mon_addrs,
4783 mon_addrs + mon_addrs_size - 1,
4784 parse_rbd_opts_token, rbd_opts);
4785 if (IS_ERR(copts)) {
4786 ret = PTR_ERR(copts);
4807 * Return pool id (>= 0) or a negative error code.
4809 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4812 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4817 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4818 if (ret == -ENOENT && tries++ < 1) {
4819 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4824 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4825 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4826 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4827 newest_epoch, timeout);
4830 /* the osdmap we have is new enough */
4839 * An rbd format 2 image has a unique identifier, distinct from the
4840 * name given to it by the user. Internally, that identifier is
4841 * what's used to specify the names of objects related to the image.
4843 * A special "rbd id" object is used to map an rbd image name to its
4844 * id. If that object doesn't exist, then there is no v2 rbd image
4845 * with the supplied name.
4847 * This function will record the given rbd_dev's image_id field if
4848 * it can be determined, and in that case will return 0. If any
4849 * errors occur a negative errno will be returned and the rbd_dev's
4850 * image_id field will be unchanged (and should be NULL).
4852 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4861 * When probing a parent image, the image id is already
4862 * known (and the image name likely is not). There's no
4863 * need to fetch the image id again in this case. We
4864 * do still need to set the image format though.
4866 if (rbd_dev->spec->image_id) {
4867 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4873 * First, see if the format 2 image id file exists, and if
4874 * so, get the image's persistent id from it.
4876 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4877 object_name = kmalloc(size, GFP_NOIO);
4880 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4881 dout("rbd id object name is %s\n", object_name);
4883 /* Response will be an encoded string, which includes a length */
4885 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4886 response = kzalloc(size, GFP_NOIO);
4892 /* If it doesn't exist we'll assume it's a format 1 image */
4894 ret = rbd_obj_method_sync(rbd_dev, object_name,
4895 "rbd", "get_id", NULL, 0,
4896 response, RBD_IMAGE_ID_LEN_MAX);
4897 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4898 if (ret == -ENOENT) {
4899 image_id = kstrdup("", GFP_KERNEL);
4900 ret = image_id ? 0 : -ENOMEM;
4902 rbd_dev->image_format = 1;
4903 } else if (ret > sizeof (__le32)) {
4906 image_id = ceph_extract_encoded_string(&p, p + ret,
4908 ret = PTR_ERR_OR_ZERO(image_id);
4910 rbd_dev->image_format = 2;
4916 rbd_dev->spec->image_id = image_id;
4917 dout("image_id is %s\n", image_id);
4927 * Undo whatever state changes are made by v1 or v2 header info
4930 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4932 struct rbd_image_header *header;
4934 /* Drop parent reference unless it's already been done (or none) */
4936 if (rbd_dev->parent_overlap)
4937 rbd_dev_parent_put(rbd_dev);
4939 /* Free dynamic fields from the header, then zero it out */
4941 header = &rbd_dev->header;
4942 ceph_put_snap_context(header->snapc);
4943 kfree(header->snap_sizes);
4944 kfree(header->snap_names);
4945 kfree(header->object_prefix);
4946 memset(header, 0, sizeof (*header));
4949 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4953 ret = rbd_dev_v2_object_prefix(rbd_dev);
4958 * Get the and check features for the image. Currently the
4959 * features are assumed to never change.
4961 ret = rbd_dev_v2_features(rbd_dev);
4965 /* If the image supports fancy striping, get its parameters */
4967 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4968 ret = rbd_dev_v2_striping_info(rbd_dev);
4972 /* No support for crypto and compression type format 2 images */
4976 rbd_dev->header.features = 0;
4977 kfree(rbd_dev->header.object_prefix);
4978 rbd_dev->header.object_prefix = NULL;
4983 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4985 struct rbd_device *parent = NULL;
4986 struct rbd_spec *parent_spec;
4987 struct rbd_client *rbdc;
4990 if (!rbd_dev->parent_spec)
4993 * We need to pass a reference to the client and the parent
4994 * spec when creating the parent rbd_dev. Images related by
4995 * parent/child relationships always share both.
4997 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4998 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5001 parent = rbd_dev_create(rbdc, parent_spec);
5005 ret = rbd_dev_image_probe(parent, false);
5008 rbd_dev->parent = parent;
5009 atomic_set(&rbd_dev->parent_ref, 1);
5014 rbd_dev_unparent(rbd_dev);
5015 kfree(rbd_dev->header_name);
5016 rbd_dev_destroy(parent);
5018 rbd_put_client(rbdc);
5019 rbd_spec_put(parent_spec);
5025 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5029 /* Get an id and fill in device name. */
5031 ret = rbd_dev_id_get(rbd_dev);
5035 BUILD_BUG_ON(DEV_NAME_LEN
5036 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5037 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5039 /* Record our major and minor device numbers. */
5041 if (!single_major) {
5042 ret = register_blkdev(0, rbd_dev->name);
5046 rbd_dev->major = ret;
5049 rbd_dev->major = rbd_major;
5050 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5053 /* Set up the blkdev mapping. */
5055 ret = rbd_init_disk(rbd_dev);
5057 goto err_out_blkdev;
5059 ret = rbd_dev_mapping_set(rbd_dev);
5062 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5064 ret = rbd_bus_add_dev(rbd_dev);
5066 goto err_out_mapping;
5068 /* Everything's ready. Announce the disk to the world. */
5070 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5071 add_disk(rbd_dev->disk);
5073 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5074 (unsigned long long) rbd_dev->mapping.size);
5079 rbd_dev_mapping_clear(rbd_dev);
5081 rbd_free_disk(rbd_dev);
5084 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5086 rbd_dev_id_put(rbd_dev);
5087 rbd_dev_mapping_clear(rbd_dev);
5092 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5094 struct rbd_spec *spec = rbd_dev->spec;
5097 /* Record the header object name for this rbd image. */
5099 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5101 if (rbd_dev->image_format == 1)
5102 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5104 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5106 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5107 if (!rbd_dev->header_name)
5110 if (rbd_dev->image_format == 1)
5111 sprintf(rbd_dev->header_name, "%s%s",
5112 spec->image_name, RBD_SUFFIX);
5114 sprintf(rbd_dev->header_name, "%s%s",
5115 RBD_HEADER_PREFIX, spec->image_id);
5119 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5121 rbd_dev_unprobe(rbd_dev);
5122 kfree(rbd_dev->header_name);
5123 rbd_dev->header_name = NULL;
5124 rbd_dev->image_format = 0;
5125 kfree(rbd_dev->spec->image_id);
5126 rbd_dev->spec->image_id = NULL;
5128 rbd_dev_destroy(rbd_dev);
5132 * Probe for the existence of the header object for the given rbd
5133 * device. If this image is the one being mapped (i.e., not a
5134 * parent), initiate a watch on its header object before using that
5135 * object to get detailed information about the rbd image.
5137 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5142 * Get the id from the image id object. Unless there's an
5143 * error, rbd_dev->spec->image_id will be filled in with
5144 * a dynamically-allocated string, and rbd_dev->image_format
5145 * will be set to either 1 or 2.
5147 ret = rbd_dev_image_id(rbd_dev);
5150 rbd_assert(rbd_dev->spec->image_id);
5151 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5153 ret = rbd_dev_header_name(rbd_dev);
5155 goto err_out_format;
5158 ret = rbd_dev_header_watch_sync(rbd_dev);
5160 goto out_header_name;
5163 if (rbd_dev->image_format == 1)
5164 ret = rbd_dev_v1_header_info(rbd_dev);
5166 ret = rbd_dev_v2_header_info(rbd_dev);
5170 ret = rbd_dev_spec_update(rbd_dev);
5174 ret = rbd_dev_probe_parent(rbd_dev);
5178 dout("discovered format %u image, header name is %s\n",
5179 rbd_dev->image_format, rbd_dev->header_name);
5183 rbd_dev_unprobe(rbd_dev);
5186 rbd_dev_header_unwatch_sync(rbd_dev);
5188 kfree(rbd_dev->header_name);
5189 rbd_dev->header_name = NULL;
5191 rbd_dev->image_format = 0;
5192 kfree(rbd_dev->spec->image_id);
5193 rbd_dev->spec->image_id = NULL;
5195 dout("probe failed, returning %d\n", ret);
5200 static ssize_t do_rbd_add(struct bus_type *bus,
5204 struct rbd_device *rbd_dev = NULL;
5205 struct ceph_options *ceph_opts = NULL;
5206 struct rbd_options *rbd_opts = NULL;
5207 struct rbd_spec *spec = NULL;
5208 struct rbd_client *rbdc;
5212 if (!try_module_get(THIS_MODULE))
5215 /* parse add command */
5216 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5218 goto err_out_module;
5219 read_only = rbd_opts->read_only;
5221 rbd_opts = NULL; /* done with this */
5223 rbdc = rbd_get_client(ceph_opts);
5230 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5232 goto err_out_client;
5233 spec->pool_id = (u64)rc;
5235 /* The ceph file layout needs to fit pool id in 32 bits */
5237 if (spec->pool_id > (u64)U32_MAX) {
5238 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5239 (unsigned long long)spec->pool_id, U32_MAX);
5241 goto err_out_client;
5244 rbd_dev = rbd_dev_create(rbdc, spec);
5246 goto err_out_client;
5247 rbdc = NULL; /* rbd_dev now owns this */
5248 spec = NULL; /* rbd_dev now owns this */
5250 rc = rbd_dev_image_probe(rbd_dev, true);
5252 goto err_out_rbd_dev;
5254 /* If we are mapping a snapshot it must be marked read-only */
5256 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5258 rbd_dev->mapping.read_only = read_only;
5260 rc = rbd_dev_device_setup(rbd_dev);
5263 * rbd_dev_header_unwatch_sync() can't be moved into
5264 * rbd_dev_image_release() without refactoring, see
5265 * commit 1f3ef78861ac.
5267 rbd_dev_header_unwatch_sync(rbd_dev);
5268 rbd_dev_image_release(rbd_dev);
5269 goto err_out_module;
5275 rbd_dev_destroy(rbd_dev);
5277 rbd_put_client(rbdc);
5281 module_put(THIS_MODULE);
5283 dout("Error adding device %s\n", buf);
5288 static ssize_t rbd_add(struct bus_type *bus,
5295 return do_rbd_add(bus, buf, count);
5298 static ssize_t rbd_add_single_major(struct bus_type *bus,
5302 return do_rbd_add(bus, buf, count);
5305 static void rbd_dev_device_release(struct device *dev)
5307 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5309 rbd_free_disk(rbd_dev);
5310 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5311 rbd_dev_mapping_clear(rbd_dev);
5313 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5314 rbd_dev_id_put(rbd_dev);
5315 rbd_dev_mapping_clear(rbd_dev);
5318 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5320 while (rbd_dev->parent) {
5321 struct rbd_device *first = rbd_dev;
5322 struct rbd_device *second = first->parent;
5323 struct rbd_device *third;
5326 * Follow to the parent with no grandparent and
5329 while (second && (third = second->parent)) {
5334 rbd_dev_image_release(second);
5335 first->parent = NULL;
5336 first->parent_overlap = 0;
5338 rbd_assert(first->parent_spec);
5339 rbd_spec_put(first->parent_spec);
5340 first->parent_spec = NULL;
5344 static ssize_t do_rbd_remove(struct bus_type *bus,
5348 struct rbd_device *rbd_dev = NULL;
5349 struct list_head *tmp;
5352 bool already = false;
5355 ret = kstrtoul(buf, 10, &ul);
5359 /* convert to int; abort if we lost anything in the conversion */
5365 spin_lock(&rbd_dev_list_lock);
5366 list_for_each(tmp, &rbd_dev_list) {
5367 rbd_dev = list_entry(tmp, struct rbd_device, node);
5368 if (rbd_dev->dev_id == dev_id) {
5374 spin_lock_irq(&rbd_dev->lock);
5375 if (rbd_dev->open_count)
5378 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5380 spin_unlock_irq(&rbd_dev->lock);
5382 spin_unlock(&rbd_dev_list_lock);
5383 if (ret < 0 || already)
5386 rbd_dev_header_unwatch_sync(rbd_dev);
5388 * flush remaining watch callbacks - these must be complete
5389 * before the osd_client is shutdown
5391 dout("%s: flushing notifies", __func__);
5392 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5395 * Don't free anything from rbd_dev->disk until after all
5396 * notifies are completely processed. Otherwise
5397 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5398 * in a potential use after free of rbd_dev->disk or rbd_dev.
5400 rbd_bus_del_dev(rbd_dev);
5401 rbd_dev_image_release(rbd_dev);
5402 module_put(THIS_MODULE);
5407 static ssize_t rbd_remove(struct bus_type *bus,
5414 return do_rbd_remove(bus, buf, count);
5417 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5421 return do_rbd_remove(bus, buf, count);
5425 * create control files in sysfs
5428 static int rbd_sysfs_init(void)
5432 ret = device_register(&rbd_root_dev);
5436 ret = bus_register(&rbd_bus_type);
5438 device_unregister(&rbd_root_dev);
5443 static void rbd_sysfs_cleanup(void)
5445 bus_unregister(&rbd_bus_type);
5446 device_unregister(&rbd_root_dev);
5449 static int rbd_slab_init(void)
5451 rbd_assert(!rbd_img_request_cache);
5452 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5453 sizeof (struct rbd_img_request),
5454 __alignof__(struct rbd_img_request),
5456 if (!rbd_img_request_cache)
5459 rbd_assert(!rbd_obj_request_cache);
5460 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5461 sizeof (struct rbd_obj_request),
5462 __alignof__(struct rbd_obj_request),
5464 if (!rbd_obj_request_cache)
5467 rbd_assert(!rbd_segment_name_cache);
5468 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5469 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5470 if (rbd_segment_name_cache)
5473 if (rbd_obj_request_cache) {
5474 kmem_cache_destroy(rbd_obj_request_cache);
5475 rbd_obj_request_cache = NULL;
5478 kmem_cache_destroy(rbd_img_request_cache);
5479 rbd_img_request_cache = NULL;
5484 static void rbd_slab_exit(void)
5486 rbd_assert(rbd_segment_name_cache);
5487 kmem_cache_destroy(rbd_segment_name_cache);
5488 rbd_segment_name_cache = NULL;
5490 rbd_assert(rbd_obj_request_cache);
5491 kmem_cache_destroy(rbd_obj_request_cache);
5492 rbd_obj_request_cache = NULL;
5494 rbd_assert(rbd_img_request_cache);
5495 kmem_cache_destroy(rbd_img_request_cache);
5496 rbd_img_request_cache = NULL;
5499 static int __init rbd_init(void)
5503 if (!libceph_compatible(NULL)) {
5504 rbd_warn(NULL, "libceph incompatibility (quitting)");
5508 rc = rbd_slab_init();
5513 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5514 if (rbd_major < 0) {
5520 rc = rbd_sysfs_init();
5522 goto err_out_blkdev;
5525 pr_info("loaded (major %d)\n", rbd_major);
5527 pr_info("loaded\n");
5533 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5539 static void __exit rbd_exit(void)
5541 ida_destroy(&rbd_dev_id_ida);
5542 rbd_sysfs_cleanup();
5544 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5548 module_init(rbd_init);
5549 module_exit(rbd_exit);
5551 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5552 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5553 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5554 /* following authorship retained from original osdblk.c */
5555 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5557 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5558 MODULE_LICENSE("GPL");