3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
548 static void rbd_release(struct gendisk *disk, fmode_t mode)
550 struct rbd_device *rbd_dev = disk->private_data;
551 unsigned long open_count_before;
553 spin_lock_irq(&rbd_dev->lock);
554 open_count_before = rbd_dev->open_count--;
555 spin_unlock_irq(&rbd_dev->lock);
556 rbd_assert(open_count_before > 0);
558 put_device(&rbd_dev->dev);
561 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
566 bool ro_changed = false;
568 /* get_user() may sleep, so call it before taking rbd_dev->lock */
569 if (get_user(val, (int __user *)(arg)))
572 ro = val ? true : false;
573 /* Snapshot doesn't allow to write*/
574 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
577 spin_lock_irq(&rbd_dev->lock);
578 /* prevent others open this device */
579 if (rbd_dev->open_count > 1) {
584 if (rbd_dev->mapping.read_only != ro) {
585 rbd_dev->mapping.read_only = ro;
590 spin_unlock_irq(&rbd_dev->lock);
591 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
592 if (ret == 0 && ro_changed)
593 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
598 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned int cmd, unsigned long arg)
601 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
606 ret = rbd_ioctl_set_ro(rbd_dev, arg);
616 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
617 unsigned int cmd, unsigned long arg)
619 return rbd_ioctl(bdev, mode, cmd, arg);
621 #endif /* CONFIG_COMPAT */
623 static const struct block_device_operations rbd_bd_ops = {
624 .owner = THIS_MODULE,
626 .release = rbd_release,
629 .compat_ioctl = rbd_compat_ioctl,
634 * Initialize an rbd client instance. Success or not, this function
635 * consumes ceph_opts. Caller holds client_mutex.
637 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
639 struct rbd_client *rbdc;
642 dout("%s:\n", __func__);
643 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
647 kref_init(&rbdc->kref);
648 INIT_LIST_HEAD(&rbdc->node);
650 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
651 if (IS_ERR(rbdc->client))
653 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
655 ret = ceph_open_session(rbdc->client);
659 spin_lock(&rbd_client_list_lock);
660 list_add_tail(&rbdc->node, &rbd_client_list);
661 spin_unlock(&rbd_client_list_lock);
663 dout("%s: rbdc %p\n", __func__, rbdc);
667 ceph_destroy_client(rbdc->client);
672 ceph_destroy_options(ceph_opts);
673 dout("%s: error %d\n", __func__, ret);
678 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
680 kref_get(&rbdc->kref);
686 * Find a ceph client with specific addr and configuration. If
687 * found, bump its reference count.
689 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
691 struct rbd_client *client_node;
694 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
697 spin_lock(&rbd_client_list_lock);
698 list_for_each_entry(client_node, &rbd_client_list, node) {
699 if (!ceph_compare_options(ceph_opts, client_node->client)) {
700 __rbd_get_client(client_node);
706 spin_unlock(&rbd_client_list_lock);
708 return found ? client_node : NULL;
718 /* string args above */
721 /* Boolean args above */
725 static match_table_t rbd_opts_tokens = {
727 /* string args above */
728 {Opt_read_only, "read_only"},
729 {Opt_read_only, "ro"}, /* Alternate spelling */
730 {Opt_read_write, "read_write"},
731 {Opt_read_write, "rw"}, /* Alternate spelling */
732 /* Boolean args above */
740 #define RBD_READ_ONLY_DEFAULT false
742 static int parse_rbd_opts_token(char *c, void *private)
744 struct rbd_options *rbd_opts = private;
745 substring_t argstr[MAX_OPT_ARGS];
746 int token, intval, ret;
748 token = match_token(c, rbd_opts_tokens, argstr);
752 if (token < Opt_last_int) {
753 ret = match_int(&argstr[0], &intval);
755 pr_err("bad mount option arg (not int) "
759 dout("got int token %d val %d\n", token, intval);
760 } else if (token > Opt_last_int && token < Opt_last_string) {
761 dout("got string token %d val %s\n", token,
763 } else if (token > Opt_last_string && token < Opt_last_bool) {
764 dout("got Boolean token %d\n", token);
766 dout("got token %d\n", token);
771 rbd_opts->read_only = true;
774 rbd_opts->read_only = false;
784 * Get a ceph client with specific addr and configuration, if one does
785 * not exist create it. Either way, ceph_opts is consumed by this
788 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
790 struct rbd_client *rbdc;
792 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
793 rbdc = rbd_client_find(ceph_opts);
794 if (rbdc) /* using an existing client */
795 ceph_destroy_options(ceph_opts);
797 rbdc = rbd_client_create(ceph_opts);
798 mutex_unlock(&client_mutex);
804 * Destroy ceph client
806 * Caller must hold rbd_client_list_lock.
808 static void rbd_client_release(struct kref *kref)
810 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
812 dout("%s: rbdc %p\n", __func__, rbdc);
813 spin_lock(&rbd_client_list_lock);
814 list_del(&rbdc->node);
815 spin_unlock(&rbd_client_list_lock);
817 ceph_destroy_client(rbdc->client);
822 * Drop reference to ceph client node. If it's not referenced anymore, release
825 static void rbd_put_client(struct rbd_client *rbdc)
828 kref_put(&rbdc->kref, rbd_client_release);
831 static bool rbd_image_format_valid(u32 image_format)
833 return image_format == 1 || image_format == 2;
836 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
841 /* The header has to start with the magic rbd header text */
842 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
845 /* The bio layer requires at least sector-sized I/O */
847 if (ondisk->options.order < SECTOR_SHIFT)
850 /* If we use u64 in a few spots we may be able to loosen this */
852 if (ondisk->options.order > 8 * sizeof (int) - 1)
856 * The size of a snapshot header has to fit in a size_t, and
857 * that limits the number of snapshots.
859 snap_count = le32_to_cpu(ondisk->snap_count);
860 size = SIZE_MAX - sizeof (struct ceph_snap_context);
861 if (snap_count > size / sizeof (__le64))
865 * Not only that, but the size of the entire the snapshot
866 * header must also be representable in a size_t.
868 size -= snap_count * sizeof (__le64);
869 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
876 * Fill an rbd image header with information from the given format 1
879 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
880 struct rbd_image_header_ondisk *ondisk)
882 struct rbd_image_header *header = &rbd_dev->header;
883 bool first_time = header->object_prefix == NULL;
884 struct ceph_snap_context *snapc;
885 char *object_prefix = NULL;
886 char *snap_names = NULL;
887 u64 *snap_sizes = NULL;
893 /* Allocate this now to avoid having to handle failure below */
898 len = strnlen(ondisk->object_prefix,
899 sizeof (ondisk->object_prefix));
900 object_prefix = kmalloc(len + 1, GFP_KERNEL);
903 memcpy(object_prefix, ondisk->object_prefix, len);
904 object_prefix[len] = '\0';
907 /* Allocate the snapshot context and fill it in */
909 snap_count = le32_to_cpu(ondisk->snap_count);
910 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
913 snapc->seq = le64_to_cpu(ondisk->snap_seq);
915 struct rbd_image_snap_ondisk *snaps;
916 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
918 /* We'll keep a copy of the snapshot names... */
920 if (snap_names_len > (u64)SIZE_MAX)
922 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
926 /* ...as well as the array of their sizes. */
928 size = snap_count * sizeof (*header->snap_sizes);
929 snap_sizes = kmalloc(size, GFP_KERNEL);
934 * Copy the names, and fill in each snapshot's id
937 * Note that rbd_dev_v1_header_info() guarantees the
938 * ondisk buffer we're working with has
939 * snap_names_len bytes beyond the end of the
940 * snapshot id array, this memcpy() is safe.
942 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
943 snaps = ondisk->snaps;
944 for (i = 0; i < snap_count; i++) {
945 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
946 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
950 /* We won't fail any more, fill in the header */
953 header->object_prefix = object_prefix;
954 header->obj_order = ondisk->options.order;
955 header->crypt_type = ondisk->options.crypt_type;
956 header->comp_type = ondisk->options.comp_type;
957 /* The rest aren't used for format 1 images */
958 header->stripe_unit = 0;
959 header->stripe_count = 0;
960 header->features = 0;
962 ceph_put_snap_context(header->snapc);
963 kfree(header->snap_names);
964 kfree(header->snap_sizes);
967 /* The remaining fields always get updated (when we refresh) */
969 header->image_size = le64_to_cpu(ondisk->image_size);
970 header->snapc = snapc;
971 header->snap_names = snap_names;
972 header->snap_sizes = snap_sizes;
974 /* Make sure mapping size is consistent with header info */
976 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
977 if (rbd_dev->mapping.size != header->image_size)
978 rbd_dev->mapping.size = header->image_size;
986 ceph_put_snap_context(snapc);
987 kfree(object_prefix);
992 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
994 const char *snap_name;
996 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
998 /* Skip over names until we find the one we are looking for */
1000 snap_name = rbd_dev->header.snap_names;
1002 snap_name += strlen(snap_name) + 1;
1004 return kstrdup(snap_name, GFP_KERNEL);
1008 * Snapshot id comparison function for use with qsort()/bsearch().
1009 * Note that result is for snapshots in *descending* order.
1011 static int snapid_compare_reverse(const void *s1, const void *s2)
1013 u64 snap_id1 = *(u64 *)s1;
1014 u64 snap_id2 = *(u64 *)s2;
1016 if (snap_id1 < snap_id2)
1018 return snap_id1 == snap_id2 ? 0 : -1;
1022 * Search a snapshot context to see if the given snapshot id is
1025 * Returns the position of the snapshot id in the array if it's found,
1026 * or BAD_SNAP_INDEX otherwise.
1028 * Note: The snapshot array is in kept sorted (by the osd) in
1029 * reverse order, highest snapshot id first.
1031 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1033 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1036 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1037 sizeof (snap_id), snapid_compare_reverse);
1039 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1042 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1046 const char *snap_name;
1048 which = rbd_dev_snap_index(rbd_dev, snap_id);
1049 if (which == BAD_SNAP_INDEX)
1050 return ERR_PTR(-ENOENT);
1052 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1053 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1056 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1058 if (snap_id == CEPH_NOSNAP)
1059 return RBD_SNAP_HEAD_NAME;
1061 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1062 if (rbd_dev->image_format == 1)
1063 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1065 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1068 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1071 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1072 if (snap_id == CEPH_NOSNAP) {
1073 *snap_size = rbd_dev->header.image_size;
1074 } else if (rbd_dev->image_format == 1) {
1077 which = rbd_dev_snap_index(rbd_dev, snap_id);
1078 if (which == BAD_SNAP_INDEX)
1081 *snap_size = rbd_dev->header.snap_sizes[which];
1086 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1095 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_features = rbd_dev->header.features;
1101 } else if (rbd_dev->image_format == 1) {
1102 *snap_features = 0; /* No features for format 1 */
1107 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1111 *snap_features = features;
1116 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1118 u64 snap_id = rbd_dev->spec->snap_id;
1123 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1126 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1130 rbd_dev->mapping.size = size;
1131 rbd_dev->mapping.features = features;
1136 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1138 rbd_dev->mapping.size = 0;
1139 rbd_dev->mapping.features = 0;
1142 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1149 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1152 segment = offset >> rbd_dev->header.obj_order;
1153 name_format = "%s.%012llx";
1154 if (rbd_dev->image_format == 2)
1155 name_format = "%s.%016llx";
1156 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1157 rbd_dev->header.object_prefix, segment);
1158 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1159 pr_err("error formatting segment name for #%llu (%d)\n",
1168 static void rbd_segment_name_free(const char *name)
1170 /* The explicit cast here is needed to drop the const qualifier */
1172 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1175 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1177 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1179 return offset & (segment_size - 1);
1182 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1183 u64 offset, u64 length)
1185 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1187 offset &= segment_size - 1;
1189 rbd_assert(length <= U64_MAX - offset);
1190 if (offset + length > segment_size)
1191 length = segment_size - offset;
1197 * returns the size of an object in the image
1199 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1201 return 1 << header->obj_order;
1208 static void bio_chain_put(struct bio *chain)
1214 chain = chain->bi_next;
1220 * zeros a bio chain, starting at specific offset
1222 static void zero_bio_chain(struct bio *chain, int start_ofs)
1225 struct bvec_iter iter;
1226 unsigned long flags;
1231 bio_for_each_segment(bv, chain, iter) {
1232 if (pos + bv.bv_len > start_ofs) {
1233 int remainder = max(start_ofs - pos, 0);
1234 buf = bvec_kmap_irq(&bv, &flags);
1235 memset(buf + remainder, 0,
1236 bv.bv_len - remainder);
1237 flush_dcache_page(bv.bv_page);
1238 bvec_kunmap_irq(buf, &flags);
1243 chain = chain->bi_next;
1248 * similar to zero_bio_chain(), zeros data defined by a page array,
1249 * starting at the given byte offset from the start of the array and
1250 * continuing up to the given end offset. The pages array is
1251 * assumed to be big enough to hold all bytes up to the end.
1253 static void zero_pages(struct page **pages, u64 offset, u64 end)
1255 struct page **page = &pages[offset >> PAGE_SHIFT];
1257 rbd_assert(end > offset);
1258 rbd_assert(end - offset <= (u64)SIZE_MAX);
1259 while (offset < end) {
1262 unsigned long flags;
1265 page_offset = offset & ~PAGE_MASK;
1266 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1267 local_irq_save(flags);
1268 kaddr = kmap_atomic(*page);
1269 memset(kaddr + page_offset, 0, length);
1270 flush_dcache_page(*page);
1271 kunmap_atomic(kaddr);
1272 local_irq_restore(flags);
1280 * Clone a portion of a bio, starting at the given byte offset
1281 * and continuing for the number of bytes indicated.
1283 static struct bio *bio_clone_range(struct bio *bio_src,
1284 unsigned int offset,
1290 bio = bio_clone(bio_src, gfpmask);
1292 return NULL; /* ENOMEM */
1294 bio_advance(bio, offset);
1295 bio->bi_iter.bi_size = len;
1301 * Clone a portion of a bio chain, starting at the given byte offset
1302 * into the first bio in the source chain and continuing for the
1303 * number of bytes indicated. The result is another bio chain of
1304 * exactly the given length, or a null pointer on error.
1306 * The bio_src and offset parameters are both in-out. On entry they
1307 * refer to the first source bio and the offset into that bio where
1308 * the start of data to be cloned is located.
1310 * On return, bio_src is updated to refer to the bio in the source
1311 * chain that contains first un-cloned byte, and *offset will
1312 * contain the offset of that byte within that bio.
1314 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1315 unsigned int *offset,
1319 struct bio *bi = *bio_src;
1320 unsigned int off = *offset;
1321 struct bio *chain = NULL;
1324 /* Build up a chain of clone bios up to the limit */
1326 if (!bi || off >= bi->bi_iter.bi_size || !len)
1327 return NULL; /* Nothing to clone */
1331 unsigned int bi_size;
1335 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1336 goto out_err; /* EINVAL; ran out of bio's */
1338 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1339 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1341 goto out_err; /* ENOMEM */
1344 end = &bio->bi_next;
1347 if (off == bi->bi_iter.bi_size) {
1358 bio_chain_put(chain);
1364 * The default/initial value for all object request flags is 0. For
1365 * each flag, once its value is set to 1 it is never reset to 0
1368 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1370 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1371 struct rbd_device *rbd_dev;
1373 rbd_dev = obj_request->img_request->rbd_dev;
1374 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1379 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1382 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1385 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1387 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1388 struct rbd_device *rbd_dev = NULL;
1390 if (obj_request_img_data_test(obj_request))
1391 rbd_dev = obj_request->img_request->rbd_dev;
1392 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1397 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1400 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1404 * This sets the KNOWN flag after (possibly) setting the EXISTS
1405 * flag. The latter is set based on the "exists" value provided.
1407 * Note that for our purposes once an object exists it never goes
1408 * away again. It's possible that the response from two existence
1409 * checks are separated by the creation of the target object, and
1410 * the first ("doesn't exist") response arrives *after* the second
1411 * ("does exist"). In that case we ignore the second one.
1413 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1417 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1418 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1422 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1425 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1428 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1434 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1436 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1437 atomic_read(&obj_request->kref.refcount));
1438 kref_get(&obj_request->kref);
1441 static void rbd_obj_request_destroy(struct kref *kref);
1442 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1444 rbd_assert(obj_request != NULL);
1445 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1446 atomic_read(&obj_request->kref.refcount));
1447 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1450 static void rbd_img_request_get(struct rbd_img_request *img_request)
1452 dout("%s: img %p (was %d)\n", __func__, img_request,
1453 atomic_read(&img_request->kref.refcount));
1454 kref_get(&img_request->kref);
1457 static bool img_request_child_test(struct rbd_img_request *img_request);
1458 static void rbd_parent_request_destroy(struct kref *kref);
1459 static void rbd_img_request_destroy(struct kref *kref);
1460 static void rbd_img_request_put(struct rbd_img_request *img_request)
1462 rbd_assert(img_request != NULL);
1463 dout("%s: img %p (was %d)\n", __func__, img_request,
1464 atomic_read(&img_request->kref.refcount));
1465 if (img_request_child_test(img_request))
1466 kref_put(&img_request->kref, rbd_parent_request_destroy);
1468 kref_put(&img_request->kref, rbd_img_request_destroy);
1471 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1472 struct rbd_obj_request *obj_request)
1474 rbd_assert(obj_request->img_request == NULL);
1476 /* Image request now owns object's original reference */
1477 obj_request->img_request = img_request;
1478 obj_request->which = img_request->obj_request_count;
1479 rbd_assert(!obj_request_img_data_test(obj_request));
1480 obj_request_img_data_set(obj_request);
1481 rbd_assert(obj_request->which != BAD_WHICH);
1482 img_request->obj_request_count++;
1483 list_add_tail(&obj_request->links, &img_request->obj_requests);
1484 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1485 obj_request->which);
1488 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1489 struct rbd_obj_request *obj_request)
1491 rbd_assert(obj_request->which != BAD_WHICH);
1493 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1494 obj_request->which);
1495 list_del(&obj_request->links);
1496 rbd_assert(img_request->obj_request_count > 0);
1497 img_request->obj_request_count--;
1498 rbd_assert(obj_request->which == img_request->obj_request_count);
1499 obj_request->which = BAD_WHICH;
1500 rbd_assert(obj_request_img_data_test(obj_request));
1501 rbd_assert(obj_request->img_request == img_request);
1502 obj_request->img_request = NULL;
1503 obj_request->callback = NULL;
1504 rbd_obj_request_put(obj_request);
1507 static bool obj_request_type_valid(enum obj_request_type type)
1510 case OBJ_REQUEST_NODATA:
1511 case OBJ_REQUEST_BIO:
1512 case OBJ_REQUEST_PAGES:
1519 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1520 struct rbd_obj_request *obj_request)
1522 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1524 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1527 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1530 dout("%s: img %p\n", __func__, img_request);
1533 * If no error occurred, compute the aggregate transfer
1534 * count for the image request. We could instead use
1535 * atomic64_cmpxchg() to update it as each object request
1536 * completes; not clear which way is better off hand.
1538 if (!img_request->result) {
1539 struct rbd_obj_request *obj_request;
1542 for_each_obj_request(img_request, obj_request)
1543 xferred += obj_request->xferred;
1544 img_request->xferred = xferred;
1547 if (img_request->callback)
1548 img_request->callback(img_request);
1550 rbd_img_request_put(img_request);
1553 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1555 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1557 dout("%s: obj %p\n", __func__, obj_request);
1559 return wait_for_completion_interruptible(&obj_request->completion);
1563 * The default/initial value for all image request flags is 0. Each
1564 * is conditionally set to 1 at image request initialization time
1565 * and currently never change thereafter.
1567 static void img_request_write_set(struct rbd_img_request *img_request)
1569 set_bit(IMG_REQ_WRITE, &img_request->flags);
1573 static bool img_request_write_test(struct rbd_img_request *img_request)
1576 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1579 static void img_request_child_set(struct rbd_img_request *img_request)
1581 set_bit(IMG_REQ_CHILD, &img_request->flags);
1585 static void img_request_child_clear(struct rbd_img_request *img_request)
1587 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1591 static bool img_request_child_test(struct rbd_img_request *img_request)
1594 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1597 static void img_request_layered_set(struct rbd_img_request *img_request)
1599 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1603 static void img_request_layered_clear(struct rbd_img_request *img_request)
1605 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1609 static bool img_request_layered_test(struct rbd_img_request *img_request)
1612 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1616 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1618 u64 xferred = obj_request->xferred;
1619 u64 length = obj_request->length;
1621 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1622 obj_request, obj_request->img_request, obj_request->result,
1625 * ENOENT means a hole in the image. We zero-fill the entire
1626 * length of the request. A short read also implies zero-fill
1627 * to the end of the request. An error requires the whole
1628 * length of the request to be reported finished with an error
1629 * to the block layer. In each case we update the xferred
1630 * count to indicate the whole request was satisfied.
1632 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1633 if (obj_request->result == -ENOENT) {
1634 if (obj_request->type == OBJ_REQUEST_BIO)
1635 zero_bio_chain(obj_request->bio_list, 0);
1637 zero_pages(obj_request->pages, 0, length);
1638 obj_request->result = 0;
1639 } else if (xferred < length && !obj_request->result) {
1640 if (obj_request->type == OBJ_REQUEST_BIO)
1641 zero_bio_chain(obj_request->bio_list, xferred);
1643 zero_pages(obj_request->pages, xferred, length);
1645 obj_request->xferred = length;
1646 obj_request_done_set(obj_request);
1649 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1651 dout("%s: obj %p cb %p\n", __func__, obj_request,
1652 obj_request->callback);
1653 if (obj_request->callback)
1654 obj_request->callback(obj_request);
1656 complete_all(&obj_request->completion);
1659 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1661 dout("%s: obj %p\n", __func__, obj_request);
1662 obj_request_done_set(obj_request);
1665 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1667 struct rbd_img_request *img_request = NULL;
1668 struct rbd_device *rbd_dev = NULL;
1669 bool layered = false;
1671 if (obj_request_img_data_test(obj_request)) {
1672 img_request = obj_request->img_request;
1673 layered = img_request && img_request_layered_test(img_request);
1674 rbd_dev = img_request->rbd_dev;
1677 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1678 obj_request, img_request, obj_request->result,
1679 obj_request->xferred, obj_request->length);
1680 if (layered && obj_request->result == -ENOENT &&
1681 obj_request->img_offset < rbd_dev->parent_overlap)
1682 rbd_img_parent_read(obj_request);
1683 else if (img_request)
1684 rbd_img_obj_request_read_callback(obj_request);
1686 obj_request_done_set(obj_request);
1689 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1691 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1692 obj_request->result, obj_request->length);
1694 * There is no such thing as a successful short write. Set
1695 * it to our originally-requested length.
1697 obj_request->xferred = obj_request->length;
1698 obj_request_done_set(obj_request);
1702 * For a simple stat call there's nothing to do. We'll do more if
1703 * this is part of a write sequence for a layered image.
1705 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1707 dout("%s: obj %p\n", __func__, obj_request);
1708 obj_request_done_set(obj_request);
1711 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1712 struct ceph_msg *msg)
1714 struct rbd_obj_request *obj_request = osd_req->r_priv;
1717 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1718 rbd_assert(osd_req == obj_request->osd_req);
1719 if (obj_request_img_data_test(obj_request)) {
1720 rbd_assert(obj_request->img_request);
1721 rbd_assert(obj_request->which != BAD_WHICH);
1723 rbd_assert(obj_request->which == BAD_WHICH);
1726 if (osd_req->r_result < 0)
1727 obj_request->result = osd_req->r_result;
1729 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1732 * We support a 64-bit length, but ultimately it has to be
1733 * passed to blk_end_request(), which takes an unsigned int.
1735 obj_request->xferred = osd_req->r_reply_op_len[0];
1736 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1738 opcode = osd_req->r_ops[0].op;
1740 case CEPH_OSD_OP_READ:
1741 rbd_osd_read_callback(obj_request);
1743 case CEPH_OSD_OP_SETALLOCHINT:
1744 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1746 case CEPH_OSD_OP_WRITE:
1747 rbd_osd_write_callback(obj_request);
1749 case CEPH_OSD_OP_STAT:
1750 rbd_osd_stat_callback(obj_request);
1752 case CEPH_OSD_OP_CALL:
1753 case CEPH_OSD_OP_NOTIFY_ACK:
1754 case CEPH_OSD_OP_WATCH:
1755 rbd_osd_trivial_callback(obj_request);
1758 rbd_warn(NULL, "%s: unsupported op %hu\n",
1759 obj_request->object_name, (unsigned short) opcode);
1763 if (obj_request_done_test(obj_request))
1764 rbd_obj_request_complete(obj_request);
1767 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1769 struct rbd_img_request *img_request = obj_request->img_request;
1770 struct ceph_osd_request *osd_req = obj_request->osd_req;
1773 rbd_assert(osd_req != NULL);
1775 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1776 ceph_osdc_build_request(osd_req, obj_request->offset,
1777 NULL, snap_id, NULL);
1780 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1782 struct rbd_img_request *img_request = obj_request->img_request;
1783 struct ceph_osd_request *osd_req = obj_request->osd_req;
1784 struct ceph_snap_context *snapc;
1785 struct timespec mtime = CURRENT_TIME;
1787 rbd_assert(osd_req != NULL);
1789 snapc = img_request ? img_request->snapc : NULL;
1790 ceph_osdc_build_request(osd_req, obj_request->offset,
1791 snapc, CEPH_NOSNAP, &mtime);
1795 * Create an osd request. A read request has one osd op (read).
1796 * A write request has either one (watch) or two (hint+write) osd ops.
1797 * (All rbd data writes are prefixed with an allocation hint op, but
1798 * technically osd watch is a write request, hence this distinction.)
1800 static struct ceph_osd_request *rbd_osd_req_create(
1801 struct rbd_device *rbd_dev,
1803 unsigned int num_ops,
1804 struct rbd_obj_request *obj_request)
1806 struct ceph_snap_context *snapc = NULL;
1807 struct ceph_osd_client *osdc;
1808 struct ceph_osd_request *osd_req;
1810 if (obj_request_img_data_test(obj_request)) {
1811 struct rbd_img_request *img_request = obj_request->img_request;
1813 rbd_assert(write_request ==
1814 img_request_write_test(img_request));
1816 snapc = img_request->snapc;
1819 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1821 /* Allocate and initialize the request, for the num_ops ops */
1823 osdc = &rbd_dev->rbd_client->client->osdc;
1824 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1827 return NULL; /* ENOMEM */
1830 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1832 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1834 osd_req->r_callback = rbd_osd_req_callback;
1835 osd_req->r_priv = obj_request;
1837 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1838 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1844 * Create a copyup osd request based on the information in the
1845 * object request supplied. A copyup request has three osd ops,
1846 * a copyup method call, a hint op, and a write op.
1848 static struct ceph_osd_request *
1849 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1851 struct rbd_img_request *img_request;
1852 struct ceph_snap_context *snapc;
1853 struct rbd_device *rbd_dev;
1854 struct ceph_osd_client *osdc;
1855 struct ceph_osd_request *osd_req;
1857 rbd_assert(obj_request_img_data_test(obj_request));
1858 img_request = obj_request->img_request;
1859 rbd_assert(img_request);
1860 rbd_assert(img_request_write_test(img_request));
1862 /* Allocate and initialize the request, for the three ops */
1864 snapc = img_request->snapc;
1865 rbd_dev = img_request->rbd_dev;
1866 osdc = &rbd_dev->rbd_client->client->osdc;
1867 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1869 return NULL; /* ENOMEM */
1871 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1872 osd_req->r_callback = rbd_osd_req_callback;
1873 osd_req->r_priv = obj_request;
1875 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1876 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1882 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1884 ceph_osdc_put_request(osd_req);
1887 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1889 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1890 u64 offset, u64 length,
1891 enum obj_request_type type)
1893 struct rbd_obj_request *obj_request;
1897 rbd_assert(obj_request_type_valid(type));
1899 size = strlen(object_name) + 1;
1900 name = kmalloc(size, GFP_KERNEL);
1904 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1910 obj_request->object_name = memcpy(name, object_name, size);
1911 obj_request->offset = offset;
1912 obj_request->length = length;
1913 obj_request->flags = 0;
1914 obj_request->which = BAD_WHICH;
1915 obj_request->type = type;
1916 INIT_LIST_HEAD(&obj_request->links);
1917 init_completion(&obj_request->completion);
1918 kref_init(&obj_request->kref);
1920 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1921 offset, length, (int)type, obj_request);
1926 static void rbd_obj_request_destroy(struct kref *kref)
1928 struct rbd_obj_request *obj_request;
1930 obj_request = container_of(kref, struct rbd_obj_request, kref);
1932 dout("%s: obj %p\n", __func__, obj_request);
1934 rbd_assert(obj_request->img_request == NULL);
1935 rbd_assert(obj_request->which == BAD_WHICH);
1937 if (obj_request->osd_req)
1938 rbd_osd_req_destroy(obj_request->osd_req);
1940 rbd_assert(obj_request_type_valid(obj_request->type));
1941 switch (obj_request->type) {
1942 case OBJ_REQUEST_NODATA:
1943 break; /* Nothing to do */
1944 case OBJ_REQUEST_BIO:
1945 if (obj_request->bio_list)
1946 bio_chain_put(obj_request->bio_list);
1948 case OBJ_REQUEST_PAGES:
1949 if (obj_request->pages)
1950 ceph_release_page_vector(obj_request->pages,
1951 obj_request->page_count);
1955 kfree(obj_request->object_name);
1956 obj_request->object_name = NULL;
1957 kmem_cache_free(rbd_obj_request_cache, obj_request);
1960 /* It's OK to call this for a device with no parent */
1962 static void rbd_spec_put(struct rbd_spec *spec);
1963 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1965 rbd_dev_remove_parent(rbd_dev);
1966 rbd_spec_put(rbd_dev->parent_spec);
1967 rbd_dev->parent_spec = NULL;
1968 rbd_dev->parent_overlap = 0;
1972 * Parent image reference counting is used to determine when an
1973 * image's parent fields can be safely torn down--after there are no
1974 * more in-flight requests to the parent image. When the last
1975 * reference is dropped, cleaning them up is safe.
1977 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1981 if (!rbd_dev->parent_spec)
1984 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1988 /* Last reference; clean up parent data structures */
1991 rbd_dev_unparent(rbd_dev);
1993 rbd_warn(rbd_dev, "parent reference underflow\n");
1997 * If an image has a non-zero parent overlap, get a reference to its
2000 * We must get the reference before checking for the overlap to
2001 * coordinate properly with zeroing the parent overlap in
2002 * rbd_dev_v2_parent_info() when an image gets flattened. We
2003 * drop it again if there is no overlap.
2005 * Returns true if the rbd device has a parent with a non-zero
2006 * overlap and a reference for it was successfully taken, or
2009 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2013 if (!rbd_dev->parent_spec)
2016 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2017 if (counter > 0 && rbd_dev->parent_overlap)
2020 /* Image was flattened, but parent is not yet torn down */
2023 rbd_warn(rbd_dev, "parent reference overflow\n");
2029 * Caller is responsible for filling in the list of object requests
2030 * that comprises the image request, and the Linux request pointer
2031 * (if there is one).
2033 static struct rbd_img_request *rbd_img_request_create(
2034 struct rbd_device *rbd_dev,
2035 u64 offset, u64 length,
2038 struct rbd_img_request *img_request;
2040 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2044 if (write_request) {
2045 down_read(&rbd_dev->header_rwsem);
2046 ceph_get_snap_context(rbd_dev->header.snapc);
2047 up_read(&rbd_dev->header_rwsem);
2050 img_request->rq = NULL;
2051 img_request->rbd_dev = rbd_dev;
2052 img_request->offset = offset;
2053 img_request->length = length;
2054 img_request->flags = 0;
2055 if (write_request) {
2056 img_request_write_set(img_request);
2057 img_request->snapc = rbd_dev->header.snapc;
2059 img_request->snap_id = rbd_dev->spec->snap_id;
2061 if (rbd_dev_parent_get(rbd_dev))
2062 img_request_layered_set(img_request);
2063 spin_lock_init(&img_request->completion_lock);
2064 img_request->next_completion = 0;
2065 img_request->callback = NULL;
2066 img_request->result = 0;
2067 img_request->obj_request_count = 0;
2068 INIT_LIST_HEAD(&img_request->obj_requests);
2069 kref_init(&img_request->kref);
2071 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2072 write_request ? "write" : "read", offset, length,
2078 static void rbd_img_request_destroy(struct kref *kref)
2080 struct rbd_img_request *img_request;
2081 struct rbd_obj_request *obj_request;
2082 struct rbd_obj_request *next_obj_request;
2084 img_request = container_of(kref, struct rbd_img_request, kref);
2086 dout("%s: img %p\n", __func__, img_request);
2088 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2089 rbd_img_obj_request_del(img_request, obj_request);
2090 rbd_assert(img_request->obj_request_count == 0);
2092 if (img_request_layered_test(img_request)) {
2093 img_request_layered_clear(img_request);
2094 rbd_dev_parent_put(img_request->rbd_dev);
2097 if (img_request_write_test(img_request))
2098 ceph_put_snap_context(img_request->snapc);
2100 kmem_cache_free(rbd_img_request_cache, img_request);
2103 static struct rbd_img_request *rbd_parent_request_create(
2104 struct rbd_obj_request *obj_request,
2105 u64 img_offset, u64 length)
2107 struct rbd_img_request *parent_request;
2108 struct rbd_device *rbd_dev;
2110 rbd_assert(obj_request->img_request);
2111 rbd_dev = obj_request->img_request->rbd_dev;
2113 parent_request = rbd_img_request_create(rbd_dev->parent,
2114 img_offset, length, false);
2115 if (!parent_request)
2118 img_request_child_set(parent_request);
2119 rbd_obj_request_get(obj_request);
2120 parent_request->obj_request = obj_request;
2122 return parent_request;
2125 static void rbd_parent_request_destroy(struct kref *kref)
2127 struct rbd_img_request *parent_request;
2128 struct rbd_obj_request *orig_request;
2130 parent_request = container_of(kref, struct rbd_img_request, kref);
2131 orig_request = parent_request->obj_request;
2133 parent_request->obj_request = NULL;
2134 rbd_obj_request_put(orig_request);
2135 img_request_child_clear(parent_request);
2137 rbd_img_request_destroy(kref);
2140 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2142 struct rbd_img_request *img_request;
2143 unsigned int xferred;
2147 rbd_assert(obj_request_img_data_test(obj_request));
2148 img_request = obj_request->img_request;
2150 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2151 xferred = (unsigned int)obj_request->xferred;
2152 result = obj_request->result;
2154 struct rbd_device *rbd_dev = img_request->rbd_dev;
2156 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2157 img_request_write_test(img_request) ? "write" : "read",
2158 obj_request->length, obj_request->img_offset,
2159 obj_request->offset);
2160 rbd_warn(rbd_dev, " result %d xferred %x\n",
2162 if (!img_request->result)
2163 img_request->result = result;
2166 /* Image object requests don't own their page array */
2168 if (obj_request->type == OBJ_REQUEST_PAGES) {
2169 obj_request->pages = NULL;
2170 obj_request->page_count = 0;
2173 if (img_request_child_test(img_request)) {
2174 rbd_assert(img_request->obj_request != NULL);
2175 more = obj_request->which < img_request->obj_request_count - 1;
2177 rbd_assert(img_request->rq != NULL);
2178 more = blk_end_request(img_request->rq, result, xferred);
2184 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2186 struct rbd_img_request *img_request;
2187 u32 which = obj_request->which;
2190 rbd_assert(obj_request_img_data_test(obj_request));
2191 img_request = obj_request->img_request;
2193 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2194 rbd_assert(img_request != NULL);
2195 rbd_assert(img_request->obj_request_count > 0);
2196 rbd_assert(which != BAD_WHICH);
2197 rbd_assert(which < img_request->obj_request_count);
2199 spin_lock_irq(&img_request->completion_lock);
2200 if (which != img_request->next_completion)
2203 for_each_obj_request_from(img_request, obj_request) {
2205 rbd_assert(which < img_request->obj_request_count);
2207 if (!obj_request_done_test(obj_request))
2209 more = rbd_img_obj_end_request(obj_request);
2213 rbd_assert(more ^ (which == img_request->obj_request_count));
2214 img_request->next_completion = which;
2216 spin_unlock_irq(&img_request->completion_lock);
2217 rbd_img_request_put(img_request);
2220 rbd_img_request_complete(img_request);
2224 * Split up an image request into one or more object requests, each
2225 * to a different object. The "type" parameter indicates whether
2226 * "data_desc" is the pointer to the head of a list of bio
2227 * structures, or the base of a page array. In either case this
2228 * function assumes data_desc describes memory sufficient to hold
2229 * all data described by the image request.
2231 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2232 enum obj_request_type type,
2235 struct rbd_device *rbd_dev = img_request->rbd_dev;
2236 struct rbd_obj_request *obj_request = NULL;
2237 struct rbd_obj_request *next_obj_request;
2238 bool write_request = img_request_write_test(img_request);
2239 struct bio *bio_list = NULL;
2240 unsigned int bio_offset = 0;
2241 struct page **pages = NULL;
2246 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2247 (int)type, data_desc);
2249 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2250 img_offset = img_request->offset;
2251 resid = img_request->length;
2252 rbd_assert(resid > 0);
2254 if (type == OBJ_REQUEST_BIO) {
2255 bio_list = data_desc;
2256 rbd_assert(img_offset ==
2257 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2259 rbd_assert(type == OBJ_REQUEST_PAGES);
2264 struct ceph_osd_request *osd_req;
2265 const char *object_name;
2268 unsigned int which = 0;
2270 object_name = rbd_segment_name(rbd_dev, img_offset);
2273 offset = rbd_segment_offset(rbd_dev, img_offset);
2274 length = rbd_segment_length(rbd_dev, img_offset, resid);
2275 obj_request = rbd_obj_request_create(object_name,
2276 offset, length, type);
2277 /* object request has its own copy of the object name */
2278 rbd_segment_name_free(object_name);
2283 * set obj_request->img_request before creating the
2284 * osd_request so that it gets the right snapc
2286 rbd_img_obj_request_add(img_request, obj_request);
2288 if (type == OBJ_REQUEST_BIO) {
2289 unsigned int clone_size;
2291 rbd_assert(length <= (u64)UINT_MAX);
2292 clone_size = (unsigned int)length;
2293 obj_request->bio_list =
2294 bio_chain_clone_range(&bio_list,
2298 if (!obj_request->bio_list)
2301 unsigned int page_count;
2303 obj_request->pages = pages;
2304 page_count = (u32)calc_pages_for(offset, length);
2305 obj_request->page_count = page_count;
2306 if ((offset + length) & ~PAGE_MASK)
2307 page_count--; /* more on last page */
2308 pages += page_count;
2311 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2312 (write_request ? 2 : 1),
2316 obj_request->osd_req = osd_req;
2317 obj_request->callback = rbd_img_obj_callback;
2318 rbd_img_request_get(img_request);
2320 if (write_request) {
2321 osd_req_op_alloc_hint_init(osd_req, which,
2322 rbd_obj_bytes(&rbd_dev->header),
2323 rbd_obj_bytes(&rbd_dev->header));
2327 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2329 if (type == OBJ_REQUEST_BIO)
2330 osd_req_op_extent_osd_data_bio(osd_req, which,
2331 obj_request->bio_list, length);
2333 osd_req_op_extent_osd_data_pages(osd_req, which,
2334 obj_request->pages, length,
2335 offset & ~PAGE_MASK, false, false);
2338 rbd_osd_req_format_write(obj_request);
2340 rbd_osd_req_format_read(obj_request);
2342 obj_request->img_offset = img_offset;
2344 img_offset += length;
2351 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2352 rbd_img_obj_request_del(img_request, obj_request);
2358 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2360 struct rbd_img_request *img_request;
2361 struct rbd_device *rbd_dev;
2362 struct page **pages;
2365 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2366 rbd_assert(obj_request_img_data_test(obj_request));
2367 img_request = obj_request->img_request;
2368 rbd_assert(img_request);
2370 rbd_dev = img_request->rbd_dev;
2371 rbd_assert(rbd_dev);
2373 pages = obj_request->copyup_pages;
2374 rbd_assert(pages != NULL);
2375 obj_request->copyup_pages = NULL;
2376 page_count = obj_request->copyup_page_count;
2377 rbd_assert(page_count);
2378 obj_request->copyup_page_count = 0;
2379 ceph_release_page_vector(pages, page_count);
2382 * We want the transfer count to reflect the size of the
2383 * original write request. There is no such thing as a
2384 * successful short write, so if the request was successful
2385 * we can just set it to the originally-requested length.
2387 if (!obj_request->result)
2388 obj_request->xferred = obj_request->length;
2390 /* Finish up with the normal image object callback */
2392 rbd_img_obj_callback(obj_request);
2396 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2398 struct rbd_obj_request *orig_request;
2399 struct ceph_osd_request *osd_req;
2400 struct ceph_osd_client *osdc;
2401 struct rbd_device *rbd_dev;
2402 struct page **pages;
2409 rbd_assert(img_request_child_test(img_request));
2411 /* First get what we need from the image request */
2413 pages = img_request->copyup_pages;
2414 rbd_assert(pages != NULL);
2415 img_request->copyup_pages = NULL;
2416 page_count = img_request->copyup_page_count;
2417 rbd_assert(page_count);
2418 img_request->copyup_page_count = 0;
2420 orig_request = img_request->obj_request;
2421 rbd_assert(orig_request != NULL);
2422 rbd_assert(obj_request_type_valid(orig_request->type));
2423 img_result = img_request->result;
2424 parent_length = img_request->length;
2425 rbd_assert(parent_length == img_request->xferred);
2426 rbd_img_request_put(img_request);
2428 rbd_assert(orig_request->img_request);
2429 rbd_dev = orig_request->img_request->rbd_dev;
2430 rbd_assert(rbd_dev);
2433 * If the overlap has become 0 (most likely because the
2434 * image has been flattened) we need to free the pages
2435 * and re-submit the original write request.
2437 if (!rbd_dev->parent_overlap) {
2438 struct ceph_osd_client *osdc;
2440 ceph_release_page_vector(pages, page_count);
2441 osdc = &rbd_dev->rbd_client->client->osdc;
2442 img_result = rbd_obj_request_submit(osdc, orig_request);
2451 * The original osd request is of no use to use any more.
2452 * We need a new one that can hold the three ops in a copyup
2453 * request. Allocate the new copyup osd request for the
2454 * original request, and release the old one.
2456 img_result = -ENOMEM;
2457 osd_req = rbd_osd_req_create_copyup(orig_request);
2460 rbd_osd_req_destroy(orig_request->osd_req);
2461 orig_request->osd_req = osd_req;
2462 orig_request->copyup_pages = pages;
2463 orig_request->copyup_page_count = page_count;
2465 /* Initialize the copyup op */
2467 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2468 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2471 /* Then the hint op */
2473 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2474 rbd_obj_bytes(&rbd_dev->header));
2476 /* And the original write request op */
2478 offset = orig_request->offset;
2479 length = orig_request->length;
2480 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2481 offset, length, 0, 0);
2482 if (orig_request->type == OBJ_REQUEST_BIO)
2483 osd_req_op_extent_osd_data_bio(osd_req, 2,
2484 orig_request->bio_list, length);
2486 osd_req_op_extent_osd_data_pages(osd_req, 2,
2487 orig_request->pages, length,
2488 offset & ~PAGE_MASK, false, false);
2490 rbd_osd_req_format_write(orig_request);
2492 /* All set, send it off. */
2494 orig_request->callback = rbd_img_obj_copyup_callback;
2495 osdc = &rbd_dev->rbd_client->client->osdc;
2496 img_result = rbd_obj_request_submit(osdc, orig_request);
2500 /* Record the error code and complete the request */
2502 orig_request->result = img_result;
2503 orig_request->xferred = 0;
2504 obj_request_done_set(orig_request);
2505 rbd_obj_request_complete(orig_request);
2509 * Read from the parent image the range of data that covers the
2510 * entire target of the given object request. This is used for
2511 * satisfying a layered image write request when the target of an
2512 * object request from the image request does not exist.
2514 * A page array big enough to hold the returned data is allocated
2515 * and supplied to rbd_img_request_fill() as the "data descriptor."
2516 * When the read completes, this page array will be transferred to
2517 * the original object request for the copyup operation.
2519 * If an error occurs, record it as the result of the original
2520 * object request and mark it done so it gets completed.
2522 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2524 struct rbd_img_request *img_request = NULL;
2525 struct rbd_img_request *parent_request = NULL;
2526 struct rbd_device *rbd_dev;
2529 struct page **pages = NULL;
2533 rbd_assert(obj_request_img_data_test(obj_request));
2534 rbd_assert(obj_request_type_valid(obj_request->type));
2536 img_request = obj_request->img_request;
2537 rbd_assert(img_request != NULL);
2538 rbd_dev = img_request->rbd_dev;
2539 rbd_assert(rbd_dev->parent != NULL);
2542 * Determine the byte range covered by the object in the
2543 * child image to which the original request was to be sent.
2545 img_offset = obj_request->img_offset - obj_request->offset;
2546 length = (u64)1 << rbd_dev->header.obj_order;
2549 * There is no defined parent data beyond the parent
2550 * overlap, so limit what we read at that boundary if
2553 if (img_offset + length > rbd_dev->parent_overlap) {
2554 rbd_assert(img_offset < rbd_dev->parent_overlap);
2555 length = rbd_dev->parent_overlap - img_offset;
2559 * Allocate a page array big enough to receive the data read
2562 page_count = (u32)calc_pages_for(0, length);
2563 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2564 if (IS_ERR(pages)) {
2565 result = PTR_ERR(pages);
2571 parent_request = rbd_parent_request_create(obj_request,
2572 img_offset, length);
2573 if (!parent_request)
2576 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2579 parent_request->copyup_pages = pages;
2580 parent_request->copyup_page_count = page_count;
2582 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2583 result = rbd_img_request_submit(parent_request);
2587 parent_request->copyup_pages = NULL;
2588 parent_request->copyup_page_count = 0;
2589 parent_request->obj_request = NULL;
2590 rbd_obj_request_put(obj_request);
2593 ceph_release_page_vector(pages, page_count);
2595 rbd_img_request_put(parent_request);
2596 obj_request->result = result;
2597 obj_request->xferred = 0;
2598 obj_request_done_set(obj_request);
2603 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2605 struct rbd_obj_request *orig_request;
2606 struct rbd_device *rbd_dev;
2609 rbd_assert(!obj_request_img_data_test(obj_request));
2612 * All we need from the object request is the original
2613 * request and the result of the STAT op. Grab those, then
2614 * we're done with the request.
2616 orig_request = obj_request->obj_request;
2617 obj_request->obj_request = NULL;
2618 rbd_obj_request_put(orig_request);
2619 rbd_assert(orig_request);
2620 rbd_assert(orig_request->img_request);
2622 result = obj_request->result;
2623 obj_request->result = 0;
2625 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2626 obj_request, orig_request, result,
2627 obj_request->xferred, obj_request->length);
2628 rbd_obj_request_put(obj_request);
2631 * If the overlap has become 0 (most likely because the
2632 * image has been flattened) we need to free the pages
2633 * and re-submit the original write request.
2635 rbd_dev = orig_request->img_request->rbd_dev;
2636 if (!rbd_dev->parent_overlap) {
2637 struct ceph_osd_client *osdc;
2639 osdc = &rbd_dev->rbd_client->client->osdc;
2640 result = rbd_obj_request_submit(osdc, orig_request);
2646 * Our only purpose here is to determine whether the object
2647 * exists, and we don't want to treat the non-existence as
2648 * an error. If something else comes back, transfer the
2649 * error to the original request and complete it now.
2652 obj_request_existence_set(orig_request, true);
2653 } else if (result == -ENOENT) {
2654 obj_request_existence_set(orig_request, false);
2655 } else if (result) {
2656 orig_request->result = result;
2661 * Resubmit the original request now that we have recorded
2662 * whether the target object exists.
2664 orig_request->result = rbd_img_obj_request_submit(orig_request);
2666 if (orig_request->result)
2667 rbd_obj_request_complete(orig_request);
2670 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2672 struct rbd_obj_request *stat_request;
2673 struct rbd_device *rbd_dev;
2674 struct ceph_osd_client *osdc;
2675 struct page **pages = NULL;
2681 * The response data for a STAT call consists of:
2688 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2689 page_count = (u32)calc_pages_for(0, size);
2690 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2692 return PTR_ERR(pages);
2695 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2700 rbd_obj_request_get(obj_request);
2701 stat_request->obj_request = obj_request;
2702 stat_request->pages = pages;
2703 stat_request->page_count = page_count;
2705 rbd_assert(obj_request->img_request);
2706 rbd_dev = obj_request->img_request->rbd_dev;
2707 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2709 if (!stat_request->osd_req)
2711 stat_request->callback = rbd_img_obj_exists_callback;
2713 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2714 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2716 rbd_osd_req_format_read(stat_request);
2718 osdc = &rbd_dev->rbd_client->client->osdc;
2719 ret = rbd_obj_request_submit(osdc, stat_request);
2722 rbd_obj_request_put(obj_request);
2727 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2729 struct rbd_img_request *img_request;
2730 struct rbd_device *rbd_dev;
2733 rbd_assert(obj_request_img_data_test(obj_request));
2735 img_request = obj_request->img_request;
2736 rbd_assert(img_request);
2737 rbd_dev = img_request->rbd_dev;
2740 * Only writes to layered images need special handling.
2741 * Reads and non-layered writes are simple object requests.
2742 * Layered writes that start beyond the end of the overlap
2743 * with the parent have no parent data, so they too are
2744 * simple object requests. Finally, if the target object is
2745 * known to already exist, its parent data has already been
2746 * copied, so a write to the object can also be handled as a
2747 * simple object request.
2749 if (!img_request_write_test(img_request) ||
2750 !img_request_layered_test(img_request) ||
2751 rbd_dev->parent_overlap <= obj_request->img_offset ||
2752 ((known = obj_request_known_test(obj_request)) &&
2753 obj_request_exists_test(obj_request))) {
2755 struct rbd_device *rbd_dev;
2756 struct ceph_osd_client *osdc;
2758 rbd_dev = obj_request->img_request->rbd_dev;
2759 osdc = &rbd_dev->rbd_client->client->osdc;
2761 return rbd_obj_request_submit(osdc, obj_request);
2765 * It's a layered write. The target object might exist but
2766 * we may not know that yet. If we know it doesn't exist,
2767 * start by reading the data for the full target object from
2768 * the parent so we can use it for a copyup to the target.
2771 return rbd_img_obj_parent_read_full(obj_request);
2773 /* We don't know whether the target exists. Go find out. */
2775 return rbd_img_obj_exists_submit(obj_request);
2778 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2780 struct rbd_obj_request *obj_request;
2781 struct rbd_obj_request *next_obj_request;
2783 dout("%s: img %p\n", __func__, img_request);
2784 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2787 ret = rbd_img_obj_request_submit(obj_request);
2795 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2797 struct rbd_obj_request *obj_request;
2798 struct rbd_device *rbd_dev;
2803 rbd_assert(img_request_child_test(img_request));
2805 /* First get what we need from the image request and release it */
2807 obj_request = img_request->obj_request;
2808 img_xferred = img_request->xferred;
2809 img_result = img_request->result;
2810 rbd_img_request_put(img_request);
2813 * If the overlap has become 0 (most likely because the
2814 * image has been flattened) we need to re-submit the
2817 rbd_assert(obj_request);
2818 rbd_assert(obj_request->img_request);
2819 rbd_dev = obj_request->img_request->rbd_dev;
2820 if (!rbd_dev->parent_overlap) {
2821 struct ceph_osd_client *osdc;
2823 osdc = &rbd_dev->rbd_client->client->osdc;
2824 img_result = rbd_obj_request_submit(osdc, obj_request);
2829 obj_request->result = img_result;
2830 if (obj_request->result)
2834 * We need to zero anything beyond the parent overlap
2835 * boundary. Since rbd_img_obj_request_read_callback()
2836 * will zero anything beyond the end of a short read, an
2837 * easy way to do this is to pretend the data from the
2838 * parent came up short--ending at the overlap boundary.
2840 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2841 obj_end = obj_request->img_offset + obj_request->length;
2842 if (obj_end > rbd_dev->parent_overlap) {
2845 if (obj_request->img_offset < rbd_dev->parent_overlap)
2846 xferred = rbd_dev->parent_overlap -
2847 obj_request->img_offset;
2849 obj_request->xferred = min(img_xferred, xferred);
2851 obj_request->xferred = img_xferred;
2854 rbd_img_obj_request_read_callback(obj_request);
2855 rbd_obj_request_complete(obj_request);
2858 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2860 struct rbd_img_request *img_request;
2863 rbd_assert(obj_request_img_data_test(obj_request));
2864 rbd_assert(obj_request->img_request != NULL);
2865 rbd_assert(obj_request->result == (s32) -ENOENT);
2866 rbd_assert(obj_request_type_valid(obj_request->type));
2868 /* rbd_read_finish(obj_request, obj_request->length); */
2869 img_request = rbd_parent_request_create(obj_request,
2870 obj_request->img_offset,
2871 obj_request->length);
2876 if (obj_request->type == OBJ_REQUEST_BIO)
2877 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2878 obj_request->bio_list);
2880 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2881 obj_request->pages);
2885 img_request->callback = rbd_img_parent_read_callback;
2886 result = rbd_img_request_submit(img_request);
2893 rbd_img_request_put(img_request);
2894 obj_request->result = result;
2895 obj_request->xferred = 0;
2896 obj_request_done_set(obj_request);
2899 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2901 struct rbd_obj_request *obj_request;
2902 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2905 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2906 OBJ_REQUEST_NODATA);
2911 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2913 if (!obj_request->osd_req)
2916 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2918 rbd_osd_req_format_read(obj_request);
2920 ret = rbd_obj_request_submit(osdc, obj_request);
2923 ret = rbd_obj_request_wait(obj_request);
2925 rbd_obj_request_put(obj_request);
2930 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2932 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2938 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2939 rbd_dev->header_name, (unsigned long long)notify_id,
2940 (unsigned int)opcode);
2941 ret = rbd_dev_refresh(rbd_dev);
2943 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2945 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2949 * Initiate a watch request, synchronously.
2951 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2953 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2954 struct rbd_obj_request *obj_request;
2957 rbd_assert(!rbd_dev->watch_event);
2958 rbd_assert(!rbd_dev->watch_request);
2960 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2961 &rbd_dev->watch_event);
2965 rbd_assert(rbd_dev->watch_event);
2967 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2968 OBJ_REQUEST_NODATA);
2974 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2976 if (!obj_request->osd_req) {
2981 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2983 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2984 rbd_dev->watch_event->cookie, 0, 1);
2985 rbd_osd_req_format_write(obj_request);
2987 ret = rbd_obj_request_submit(osdc, obj_request);
2991 ret = rbd_obj_request_wait(obj_request);
2995 ret = obj_request->result;
3000 * A watch request is set to linger, so the underlying osd
3001 * request won't go away until we unregister it. We retain
3002 * a pointer to the object request during that time (in
3003 * rbd_dev->watch_request), so we'll keep a reference to
3004 * it. We'll drop that reference (below) after we've
3007 rbd_dev->watch_request = obj_request;
3012 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
3014 rbd_obj_request_put(obj_request);
3016 ceph_osdc_cancel_event(rbd_dev->watch_event);
3017 rbd_dev->watch_event = NULL;
3023 * Tear down a watch request, synchronously.
3025 static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3027 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3028 struct rbd_obj_request *obj_request;
3031 rbd_assert(rbd_dev->watch_event);
3032 rbd_assert(rbd_dev->watch_request);
3034 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3035 OBJ_REQUEST_NODATA);
3041 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3043 if (!obj_request->osd_req) {
3048 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3049 rbd_dev->watch_event->cookie, 0, 0);
3050 rbd_osd_req_format_write(obj_request);
3052 ret = rbd_obj_request_submit(osdc, obj_request);
3056 ret = rbd_obj_request_wait(obj_request);
3060 ret = obj_request->result;
3064 /* We have successfully torn down the watch request */
3066 ceph_osdc_unregister_linger_request(osdc,
3067 rbd_dev->watch_request->osd_req);
3068 rbd_obj_request_put(rbd_dev->watch_request);
3069 rbd_dev->watch_request = NULL;
3072 rbd_obj_request_put(obj_request);
3074 ceph_osdc_cancel_event(rbd_dev->watch_event);
3075 rbd_dev->watch_event = NULL;
3080 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3084 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
3086 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3092 * Synchronous osd object method call. Returns the number of bytes
3093 * returned in the outbound buffer, or a negative error code.
3095 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3096 const char *object_name,
3097 const char *class_name,
3098 const char *method_name,
3099 const void *outbound,
3100 size_t outbound_size,
3102 size_t inbound_size)
3104 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3105 struct rbd_obj_request *obj_request;
3106 struct page **pages;
3111 * Method calls are ultimately read operations. The result
3112 * should placed into the inbound buffer provided. They
3113 * also supply outbound data--parameters for the object
3114 * method. Currently if this is present it will be a
3117 page_count = (u32)calc_pages_for(0, inbound_size);
3118 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3120 return PTR_ERR(pages);
3123 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3128 obj_request->pages = pages;
3129 obj_request->page_count = page_count;
3131 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3133 if (!obj_request->osd_req)
3136 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3137 class_name, method_name);
3138 if (outbound_size) {
3139 struct ceph_pagelist *pagelist;
3141 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3145 ceph_pagelist_init(pagelist);
3146 ceph_pagelist_append(pagelist, outbound, outbound_size);
3147 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3150 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3151 obj_request->pages, inbound_size,
3153 rbd_osd_req_format_read(obj_request);
3155 ret = rbd_obj_request_submit(osdc, obj_request);
3158 ret = rbd_obj_request_wait(obj_request);
3162 ret = obj_request->result;
3166 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3167 ret = (int)obj_request->xferred;
3168 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3171 rbd_obj_request_put(obj_request);
3173 ceph_release_page_vector(pages, page_count);
3178 static void rbd_request_fn(struct request_queue *q)
3179 __releases(q->queue_lock) __acquires(q->queue_lock)
3181 struct rbd_device *rbd_dev = q->queuedata;
3185 while ((rq = blk_fetch_request(q))) {
3186 bool write_request = rq_data_dir(rq) == WRITE;
3187 struct rbd_img_request *img_request;
3191 /* Ignore any non-FS requests that filter through. */
3193 if (rq->cmd_type != REQ_TYPE_FS) {
3194 dout("%s: non-fs request type %d\n", __func__,
3195 (int) rq->cmd_type);
3196 __blk_end_request_all(rq, 0);
3200 /* Ignore/skip any zero-length requests */
3202 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3203 length = (u64) blk_rq_bytes(rq);
3206 dout("%s: zero-length request\n", __func__);
3207 __blk_end_request_all(rq, 0);
3211 spin_unlock_irq(q->queue_lock);
3213 /* Disallow writes to a read-only device */
3215 if (write_request) {
3217 if (rbd_dev->mapping.read_only)
3219 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3223 * Quit early if the mapped snapshot no longer
3224 * exists. It's still possible the snapshot will
3225 * have disappeared by the time our request arrives
3226 * at the osd, but there's no sense in sending it if
3229 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3230 dout("request for non-existent snapshot");
3231 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3237 if (offset && length > U64_MAX - offset + 1) {
3238 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3240 goto end_request; /* Shouldn't happen */
3244 if (offset + length > rbd_dev->mapping.size) {
3245 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3246 offset, length, rbd_dev->mapping.size);
3251 img_request = rbd_img_request_create(rbd_dev, offset, length,
3256 img_request->rq = rq;
3258 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3261 result = rbd_img_request_submit(img_request);
3263 rbd_img_request_put(img_request);
3265 spin_lock_irq(q->queue_lock);
3267 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3268 write_request ? "write" : "read",
3269 length, offset, result);
3271 __blk_end_request_all(rq, result);
3277 * a queue callback. Makes sure that we don't create a bio that spans across
3278 * multiple osd objects. One exception would be with a single page bios,
3279 * which we handle later at bio_chain_clone_range()
3281 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3282 struct bio_vec *bvec)
3284 struct rbd_device *rbd_dev = q->queuedata;
3285 sector_t sector_offset;
3286 sector_t sectors_per_obj;
3287 sector_t obj_sector_offset;
3291 * Find how far into its rbd object the partition-relative
3292 * bio start sector is to offset relative to the enclosing
3295 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3296 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3297 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3300 * Compute the number of bytes from that offset to the end
3301 * of the object. Account for what's already used by the bio.
3303 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3304 if (ret > bmd->bi_size)
3305 ret -= bmd->bi_size;
3310 * Don't send back more than was asked for. And if the bio
3311 * was empty, let the whole thing through because: "Note
3312 * that a block device *must* allow a single page to be
3313 * added to an empty bio."
3315 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3316 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3317 ret = (int) bvec->bv_len;
3322 static void rbd_free_disk(struct rbd_device *rbd_dev)
3324 struct gendisk *disk = rbd_dev->disk;
3329 rbd_dev->disk = NULL;
3330 if (disk->flags & GENHD_FL_UP) {
3333 blk_cleanup_queue(disk->queue);
3338 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3339 const char *object_name,
3340 u64 offset, u64 length, void *buf)
3343 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3344 struct rbd_obj_request *obj_request;
3345 struct page **pages = NULL;
3350 page_count = (u32) calc_pages_for(offset, length);
3351 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3353 ret = PTR_ERR(pages);
3356 obj_request = rbd_obj_request_create(object_name, offset, length,
3361 obj_request->pages = pages;
3362 obj_request->page_count = page_count;
3364 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3366 if (!obj_request->osd_req)
3369 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3370 offset, length, 0, 0);
3371 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3373 obj_request->length,
3374 obj_request->offset & ~PAGE_MASK,
3376 rbd_osd_req_format_read(obj_request);
3378 ret = rbd_obj_request_submit(osdc, obj_request);
3381 ret = rbd_obj_request_wait(obj_request);
3385 ret = obj_request->result;
3389 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3390 size = (size_t) obj_request->xferred;
3391 ceph_copy_from_page_vector(pages, buf, 0, size);
3392 rbd_assert(size <= (size_t)INT_MAX);
3396 rbd_obj_request_put(obj_request);
3398 ceph_release_page_vector(pages, page_count);
3404 * Read the complete header for the given rbd device. On successful
3405 * return, the rbd_dev->header field will contain up-to-date
3406 * information about the image.
3408 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3410 struct rbd_image_header_ondisk *ondisk = NULL;
3417 * The complete header will include an array of its 64-bit
3418 * snapshot ids, followed by the names of those snapshots as
3419 * a contiguous block of NUL-terminated strings. Note that
3420 * the number of snapshots could change by the time we read
3421 * it in, in which case we re-read it.
3428 size = sizeof (*ondisk);
3429 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3431 ondisk = kmalloc(size, GFP_KERNEL);
3435 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3439 if ((size_t)ret < size) {
3441 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3445 if (!rbd_dev_ondisk_valid(ondisk)) {
3447 rbd_warn(rbd_dev, "invalid header");
3451 names_size = le64_to_cpu(ondisk->snap_names_len);
3452 want_count = snap_count;
3453 snap_count = le32_to_cpu(ondisk->snap_count);
3454 } while (snap_count != want_count);
3456 ret = rbd_header_from_disk(rbd_dev, ondisk);
3464 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3465 * has disappeared from the (just updated) snapshot context.
3467 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3471 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3474 snap_id = rbd_dev->spec->snap_id;
3475 if (snap_id == CEPH_NOSNAP)
3478 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3479 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3482 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3488 * Don't hold the lock while doing disk operations,
3489 * or lock ordering will conflict with the bdev mutex via:
3490 * rbd_add() -> blkdev_get() -> rbd_open()
3492 spin_lock_irq(&rbd_dev->lock);
3493 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3494 spin_unlock_irq(&rbd_dev->lock);
3496 * If the device is being removed, rbd_dev->disk has
3497 * been destroyed, so don't try to update its size
3500 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3501 dout("setting size to %llu sectors", (unsigned long long)size);
3502 set_capacity(rbd_dev->disk, size);
3503 revalidate_disk(rbd_dev->disk);
3507 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3512 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3513 down_write(&rbd_dev->header_rwsem);
3514 mapping_size = rbd_dev->mapping.size;
3515 if (rbd_dev->image_format == 1)
3516 ret = rbd_dev_v1_header_info(rbd_dev);
3518 ret = rbd_dev_v2_header_info(rbd_dev);
3520 /* If it's a mapped snapshot, validate its EXISTS flag */
3522 rbd_exists_validate(rbd_dev);
3523 up_write(&rbd_dev->header_rwsem);
3525 if (mapping_size != rbd_dev->mapping.size) {
3526 rbd_dev_update_size(rbd_dev);
3532 static int rbd_init_disk(struct rbd_device *rbd_dev)
3534 struct gendisk *disk;
3535 struct request_queue *q;
3538 /* create gendisk info */
3539 disk = alloc_disk(single_major ?
3540 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3541 RBD_MINORS_PER_MAJOR);
3545 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3547 disk->major = rbd_dev->major;
3548 disk->first_minor = rbd_dev->minor;
3550 disk->flags |= GENHD_FL_EXT_DEVT;
3551 disk->fops = &rbd_bd_ops;
3552 disk->private_data = rbd_dev;
3554 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3558 /* We use the default size, but let's be explicit about it. */
3559 blk_queue_physical_block_size(q, SECTOR_SIZE);
3561 /* set io sizes to object size */
3562 segment_size = rbd_obj_bytes(&rbd_dev->header);
3563 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3564 blk_queue_max_segment_size(q, segment_size);
3565 blk_queue_io_min(q, segment_size);
3566 blk_queue_io_opt(q, segment_size);
3568 blk_queue_merge_bvec(q, rbd_merge_bvec);
3571 q->queuedata = rbd_dev;
3573 rbd_dev->disk = disk;
3586 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3588 return container_of(dev, struct rbd_device, dev);
3591 static ssize_t rbd_size_show(struct device *dev,
3592 struct device_attribute *attr, char *buf)
3594 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3596 return sprintf(buf, "%llu\n",
3597 (unsigned long long)rbd_dev->mapping.size);
3601 * Note this shows the features for whatever's mapped, which is not
3602 * necessarily the base image.
3604 static ssize_t rbd_features_show(struct device *dev,
3605 struct device_attribute *attr, char *buf)
3607 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3609 return sprintf(buf, "0x%016llx\n",
3610 (unsigned long long)rbd_dev->mapping.features);
3613 static ssize_t rbd_major_show(struct device *dev,
3614 struct device_attribute *attr, char *buf)
3616 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3619 return sprintf(buf, "%d\n", rbd_dev->major);
3621 return sprintf(buf, "(none)\n");
3624 static ssize_t rbd_minor_show(struct device *dev,
3625 struct device_attribute *attr, char *buf)
3627 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3629 return sprintf(buf, "%d\n", rbd_dev->minor);
3632 static ssize_t rbd_client_id_show(struct device *dev,
3633 struct device_attribute *attr, char *buf)
3635 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3637 return sprintf(buf, "client%lld\n",
3638 ceph_client_id(rbd_dev->rbd_client->client));
3641 static ssize_t rbd_pool_show(struct device *dev,
3642 struct device_attribute *attr, char *buf)
3644 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3646 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3649 static ssize_t rbd_pool_id_show(struct device *dev,
3650 struct device_attribute *attr, char *buf)
3652 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3654 return sprintf(buf, "%llu\n",
3655 (unsigned long long) rbd_dev->spec->pool_id);
3658 static ssize_t rbd_name_show(struct device *dev,
3659 struct device_attribute *attr, char *buf)
3661 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3663 if (rbd_dev->spec->image_name)
3664 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3666 return sprintf(buf, "(unknown)\n");
3669 static ssize_t rbd_image_id_show(struct device *dev,
3670 struct device_attribute *attr, char *buf)
3672 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3674 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3678 * Shows the name of the currently-mapped snapshot (or
3679 * RBD_SNAP_HEAD_NAME for the base image).
3681 static ssize_t rbd_snap_show(struct device *dev,
3682 struct device_attribute *attr,
3685 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3687 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3691 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3692 * for the parent image. If there is no parent, simply shows
3693 * "(no parent image)".
3695 static ssize_t rbd_parent_show(struct device *dev,
3696 struct device_attribute *attr,
3699 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3700 struct rbd_spec *spec = rbd_dev->parent_spec;
3705 return sprintf(buf, "(no parent image)\n");
3707 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3708 (unsigned long long) spec->pool_id, spec->pool_name);
3713 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3714 spec->image_name ? spec->image_name : "(unknown)");
3719 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3720 (unsigned long long) spec->snap_id, spec->snap_name);
3725 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3730 return (ssize_t) (bufp - buf);
3733 static ssize_t rbd_image_refresh(struct device *dev,
3734 struct device_attribute *attr,
3738 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3741 ret = rbd_dev_refresh(rbd_dev);
3743 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3745 return ret < 0 ? ret : size;
3748 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3749 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3750 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3751 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3752 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3753 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3754 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3755 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3756 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3757 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3758 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3759 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3761 static struct attribute *rbd_attrs[] = {
3762 &dev_attr_size.attr,
3763 &dev_attr_features.attr,
3764 &dev_attr_major.attr,
3765 &dev_attr_minor.attr,
3766 &dev_attr_client_id.attr,
3767 &dev_attr_pool.attr,
3768 &dev_attr_pool_id.attr,
3769 &dev_attr_name.attr,
3770 &dev_attr_image_id.attr,
3771 &dev_attr_current_snap.attr,
3772 &dev_attr_parent.attr,
3773 &dev_attr_refresh.attr,
3777 static struct attribute_group rbd_attr_group = {
3781 static const struct attribute_group *rbd_attr_groups[] = {
3786 static void rbd_sysfs_dev_release(struct device *dev)
3790 static struct device_type rbd_device_type = {
3792 .groups = rbd_attr_groups,
3793 .release = rbd_sysfs_dev_release,
3796 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3798 kref_get(&spec->kref);
3803 static void rbd_spec_free(struct kref *kref);
3804 static void rbd_spec_put(struct rbd_spec *spec)
3807 kref_put(&spec->kref, rbd_spec_free);
3810 static struct rbd_spec *rbd_spec_alloc(void)
3812 struct rbd_spec *spec;
3814 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3817 kref_init(&spec->kref);
3822 static void rbd_spec_free(struct kref *kref)
3824 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3826 kfree(spec->pool_name);
3827 kfree(spec->image_id);
3828 kfree(spec->image_name);
3829 kfree(spec->snap_name);
3833 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3834 struct rbd_spec *spec)
3836 struct rbd_device *rbd_dev;
3838 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3842 spin_lock_init(&rbd_dev->lock);
3844 atomic_set(&rbd_dev->parent_ref, 0);
3845 INIT_LIST_HEAD(&rbd_dev->node);
3846 init_rwsem(&rbd_dev->header_rwsem);
3848 rbd_dev->spec = spec;
3849 rbd_dev->rbd_client = rbdc;
3851 /* Initialize the layout used for all rbd requests */
3853 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3854 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3855 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3856 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3861 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3863 rbd_put_client(rbd_dev->rbd_client);
3864 rbd_spec_put(rbd_dev->spec);
3869 * Get the size and object order for an image snapshot, or if
3870 * snap_id is CEPH_NOSNAP, gets this information for the base
3873 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3874 u8 *order, u64 *snap_size)
3876 __le64 snapid = cpu_to_le64(snap_id);
3881 } __attribute__ ((packed)) size_buf = { 0 };
3883 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3885 &snapid, sizeof (snapid),
3886 &size_buf, sizeof (size_buf));
3887 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3890 if (ret < sizeof (size_buf))
3894 *order = size_buf.order;
3895 dout(" order %u", (unsigned int)*order);
3897 *snap_size = le64_to_cpu(size_buf.size);
3899 dout(" snap_id 0x%016llx snap_size = %llu\n",
3900 (unsigned long long)snap_id,
3901 (unsigned long long)*snap_size);
3906 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3908 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3909 &rbd_dev->header.obj_order,
3910 &rbd_dev->header.image_size);
3913 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3919 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3923 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3924 "rbd", "get_object_prefix", NULL, 0,
3925 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3926 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3931 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3932 p + ret, NULL, GFP_NOIO);
3935 if (IS_ERR(rbd_dev->header.object_prefix)) {
3936 ret = PTR_ERR(rbd_dev->header.object_prefix);
3937 rbd_dev->header.object_prefix = NULL;
3939 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3947 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3950 __le64 snapid = cpu_to_le64(snap_id);
3954 } __attribute__ ((packed)) features_buf = { 0 };
3958 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3959 "rbd", "get_features",
3960 &snapid, sizeof (snapid),
3961 &features_buf, sizeof (features_buf));
3962 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3965 if (ret < sizeof (features_buf))
3968 incompat = le64_to_cpu(features_buf.incompat);
3969 if (incompat & ~RBD_FEATURES_SUPPORTED)
3972 *snap_features = le64_to_cpu(features_buf.features);
3974 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3975 (unsigned long long)snap_id,
3976 (unsigned long long)*snap_features,
3977 (unsigned long long)le64_to_cpu(features_buf.incompat));
3982 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3984 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3985 &rbd_dev->header.features);
3988 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3990 struct rbd_spec *parent_spec;
3992 void *reply_buf = NULL;
4002 parent_spec = rbd_spec_alloc();
4006 size = sizeof (__le64) + /* pool_id */
4007 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4008 sizeof (__le64) + /* snap_id */
4009 sizeof (__le64); /* overlap */
4010 reply_buf = kmalloc(size, GFP_KERNEL);
4016 snapid = cpu_to_le64(CEPH_NOSNAP);
4017 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4018 "rbd", "get_parent",
4019 &snapid, sizeof (snapid),
4021 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4026 end = reply_buf + ret;
4028 ceph_decode_64_safe(&p, end, pool_id, out_err);
4029 if (pool_id == CEPH_NOPOOL) {
4031 * Either the parent never existed, or we have
4032 * record of it but the image got flattened so it no
4033 * longer has a parent. When the parent of a
4034 * layered image disappears we immediately set the
4035 * overlap to 0. The effect of this is that all new
4036 * requests will be treated as if the image had no
4039 if (rbd_dev->parent_overlap) {
4040 rbd_dev->parent_overlap = 0;
4042 rbd_dev_parent_put(rbd_dev);
4043 pr_info("%s: clone image has been flattened\n",
4044 rbd_dev->disk->disk_name);
4047 goto out; /* No parent? No problem. */
4050 /* The ceph file layout needs to fit pool id in 32 bits */
4053 if (pool_id > (u64)U32_MAX) {
4054 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
4055 (unsigned long long)pool_id, U32_MAX);
4059 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4060 if (IS_ERR(image_id)) {
4061 ret = PTR_ERR(image_id);
4064 ceph_decode_64_safe(&p, end, snap_id, out_err);
4065 ceph_decode_64_safe(&p, end, overlap, out_err);
4068 * The parent won't change (except when the clone is
4069 * flattened, already handled that). So we only need to
4070 * record the parent spec we have not already done so.
4072 if (!rbd_dev->parent_spec) {
4073 parent_spec->pool_id = pool_id;
4074 parent_spec->image_id = image_id;
4075 parent_spec->snap_id = snap_id;
4076 rbd_dev->parent_spec = parent_spec;
4077 parent_spec = NULL; /* rbd_dev now owns this */
4081 * We always update the parent overlap. If it's zero we
4082 * treat it specially.
4084 rbd_dev->parent_overlap = overlap;
4088 /* A null parent_spec indicates it's the initial probe */
4092 * The overlap has become zero, so the clone
4093 * must have been resized down to 0 at some
4094 * point. Treat this the same as a flatten.
4096 rbd_dev_parent_put(rbd_dev);
4097 pr_info("%s: clone image now standalone\n",
4098 rbd_dev->disk->disk_name);
4101 * For the initial probe, if we find the
4102 * overlap is zero we just pretend there was
4105 rbd_warn(rbd_dev, "ignoring parent of "
4106 "clone with overlap 0\n");
4113 rbd_spec_put(parent_spec);
4118 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4122 __le64 stripe_count;
4123 } __attribute__ ((packed)) striping_info_buf = { 0 };
4124 size_t size = sizeof (striping_info_buf);
4131 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4132 "rbd", "get_stripe_unit_count", NULL, 0,
4133 (char *)&striping_info_buf, size);
4134 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4141 * We don't actually support the "fancy striping" feature
4142 * (STRIPINGV2) yet, but if the striping sizes are the
4143 * defaults the behavior is the same as before. So find
4144 * out, and only fail if the image has non-default values.
4147 obj_size = (u64)1 << rbd_dev->header.obj_order;
4148 p = &striping_info_buf;
4149 stripe_unit = ceph_decode_64(&p);
4150 if (stripe_unit != obj_size) {
4151 rbd_warn(rbd_dev, "unsupported stripe unit "
4152 "(got %llu want %llu)",
4153 stripe_unit, obj_size);
4156 stripe_count = ceph_decode_64(&p);
4157 if (stripe_count != 1) {
4158 rbd_warn(rbd_dev, "unsupported stripe count "
4159 "(got %llu want 1)", stripe_count);
4162 rbd_dev->header.stripe_unit = stripe_unit;
4163 rbd_dev->header.stripe_count = stripe_count;
4168 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4170 size_t image_id_size;
4175 void *reply_buf = NULL;
4177 char *image_name = NULL;
4180 rbd_assert(!rbd_dev->spec->image_name);
4182 len = strlen(rbd_dev->spec->image_id);
4183 image_id_size = sizeof (__le32) + len;
4184 image_id = kmalloc(image_id_size, GFP_KERNEL);
4189 end = image_id + image_id_size;
4190 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4192 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4193 reply_buf = kmalloc(size, GFP_KERNEL);
4197 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4198 "rbd", "dir_get_name",
4199 image_id, image_id_size,
4204 end = reply_buf + ret;
4206 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4207 if (IS_ERR(image_name))
4210 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4218 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4220 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4221 const char *snap_name;
4224 /* Skip over names until we find the one we are looking for */
4226 snap_name = rbd_dev->header.snap_names;
4227 while (which < snapc->num_snaps) {
4228 if (!strcmp(name, snap_name))
4229 return snapc->snaps[which];
4230 snap_name += strlen(snap_name) + 1;
4236 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4238 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4243 for (which = 0; !found && which < snapc->num_snaps; which++) {
4244 const char *snap_name;
4246 snap_id = snapc->snaps[which];
4247 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4248 if (IS_ERR(snap_name)) {
4249 /* ignore no-longer existing snapshots */
4250 if (PTR_ERR(snap_name) == -ENOENT)
4255 found = !strcmp(name, snap_name);
4258 return found ? snap_id : CEPH_NOSNAP;
4262 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4263 * no snapshot by that name is found, or if an error occurs.
4265 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4267 if (rbd_dev->image_format == 1)
4268 return rbd_v1_snap_id_by_name(rbd_dev, name);
4270 return rbd_v2_snap_id_by_name(rbd_dev, name);
4274 * When an rbd image has a parent image, it is identified by the
4275 * pool, image, and snapshot ids (not names). This function fills
4276 * in the names for those ids. (It's OK if we can't figure out the
4277 * name for an image id, but the pool and snapshot ids should always
4278 * exist and have names.) All names in an rbd spec are dynamically
4281 * When an image being mapped (not a parent) is probed, we have the
4282 * pool name and pool id, image name and image id, and the snapshot
4283 * name. The only thing we're missing is the snapshot id.
4285 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4287 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4288 struct rbd_spec *spec = rbd_dev->spec;
4289 const char *pool_name;
4290 const char *image_name;
4291 const char *snap_name;
4295 * An image being mapped will have the pool name (etc.), but
4296 * we need to look up the snapshot id.
4298 if (spec->pool_name) {
4299 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4302 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4303 if (snap_id == CEPH_NOSNAP)
4305 spec->snap_id = snap_id;
4307 spec->snap_id = CEPH_NOSNAP;
4313 /* Get the pool name; we have to make our own copy of this */
4315 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4317 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4320 pool_name = kstrdup(pool_name, GFP_KERNEL);
4324 /* Fetch the image name; tolerate failure here */
4326 image_name = rbd_dev_image_name(rbd_dev);
4328 rbd_warn(rbd_dev, "unable to get image name");
4330 /* Look up the snapshot name, and make a copy */
4332 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4333 if (IS_ERR(snap_name)) {
4334 ret = PTR_ERR(snap_name);
4338 spec->pool_name = pool_name;
4339 spec->image_name = image_name;
4340 spec->snap_name = snap_name;
4350 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4359 struct ceph_snap_context *snapc;
4363 * We'll need room for the seq value (maximum snapshot id),
4364 * snapshot count, and array of that many snapshot ids.
4365 * For now we have a fixed upper limit on the number we're
4366 * prepared to receive.
4368 size = sizeof (__le64) + sizeof (__le32) +
4369 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4370 reply_buf = kzalloc(size, GFP_KERNEL);
4374 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4375 "rbd", "get_snapcontext", NULL, 0,
4377 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4382 end = reply_buf + ret;
4384 ceph_decode_64_safe(&p, end, seq, out);
4385 ceph_decode_32_safe(&p, end, snap_count, out);
4388 * Make sure the reported number of snapshot ids wouldn't go
4389 * beyond the end of our buffer. But before checking that,
4390 * make sure the computed size of the snapshot context we
4391 * allocate is representable in a size_t.
4393 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4398 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4402 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4408 for (i = 0; i < snap_count; i++)
4409 snapc->snaps[i] = ceph_decode_64(&p);
4411 ceph_put_snap_context(rbd_dev->header.snapc);
4412 rbd_dev->header.snapc = snapc;
4414 dout(" snap context seq = %llu, snap_count = %u\n",
4415 (unsigned long long)seq, (unsigned int)snap_count);
4422 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4433 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4434 reply_buf = kmalloc(size, GFP_KERNEL);
4436 return ERR_PTR(-ENOMEM);
4438 snapid = cpu_to_le64(snap_id);
4439 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4440 "rbd", "get_snapshot_name",
4441 &snapid, sizeof (snapid),
4443 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4445 snap_name = ERR_PTR(ret);
4450 end = reply_buf + ret;
4451 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4452 if (IS_ERR(snap_name))
4455 dout(" snap_id 0x%016llx snap_name = %s\n",
4456 (unsigned long long)snap_id, snap_name);
4463 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4465 bool first_time = rbd_dev->header.object_prefix == NULL;
4468 ret = rbd_dev_v2_image_size(rbd_dev);
4473 ret = rbd_dev_v2_header_onetime(rbd_dev);
4479 * If the image supports layering, get the parent info. We
4480 * need to probe the first time regardless. Thereafter we
4481 * only need to if there's a parent, to see if it has
4482 * disappeared due to the mapped image getting flattened.
4484 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4485 (first_time || rbd_dev->parent_spec)) {
4488 ret = rbd_dev_v2_parent_info(rbd_dev);
4493 * Print a warning if this is the initial probe and
4494 * the image has a parent. Don't print it if the
4495 * image now being probed is itself a parent. We
4496 * can tell at this point because we won't know its
4497 * pool name yet (just its pool id).
4499 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4500 if (first_time && warn)
4501 rbd_warn(rbd_dev, "WARNING: kernel layering "
4502 "is EXPERIMENTAL!");
4505 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4506 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4507 rbd_dev->mapping.size = rbd_dev->header.image_size;
4509 ret = rbd_dev_v2_snap_context(rbd_dev);
4510 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4515 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4520 dev = &rbd_dev->dev;
4521 dev->bus = &rbd_bus_type;
4522 dev->type = &rbd_device_type;
4523 dev->parent = &rbd_root_dev;
4524 dev->release = rbd_dev_device_release;
4525 dev_set_name(dev, "%d", rbd_dev->dev_id);
4526 ret = device_register(dev);
4531 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4533 device_unregister(&rbd_dev->dev);
4537 * Get a unique rbd identifier for the given new rbd_dev, and add
4538 * the rbd_dev to the global list.
4540 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4544 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4545 0, minor_to_rbd_dev_id(1 << MINORBITS),
4550 rbd_dev->dev_id = new_dev_id;
4552 spin_lock(&rbd_dev_list_lock);
4553 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4554 spin_unlock(&rbd_dev_list_lock);
4556 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4562 * Remove an rbd_dev from the global list, and record that its
4563 * identifier is no longer in use.
4565 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4567 spin_lock(&rbd_dev_list_lock);
4568 list_del_init(&rbd_dev->node);
4569 spin_unlock(&rbd_dev_list_lock);
4571 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4573 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4577 * Skips over white space at *buf, and updates *buf to point to the
4578 * first found non-space character (if any). Returns the length of
4579 * the token (string of non-white space characters) found. Note
4580 * that *buf must be terminated with '\0'.
4582 static inline size_t next_token(const char **buf)
4585 * These are the characters that produce nonzero for
4586 * isspace() in the "C" and "POSIX" locales.
4588 const char *spaces = " \f\n\r\t\v";
4590 *buf += strspn(*buf, spaces); /* Find start of token */
4592 return strcspn(*buf, spaces); /* Return token length */
4596 * Finds the next token in *buf, and if the provided token buffer is
4597 * big enough, copies the found token into it. The result, if
4598 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4599 * must be terminated with '\0' on entry.
4601 * Returns the length of the token found (not including the '\0').
4602 * Return value will be 0 if no token is found, and it will be >=
4603 * token_size if the token would not fit.
4605 * The *buf pointer will be updated to point beyond the end of the
4606 * found token. Note that this occurs even if the token buffer is
4607 * too small to hold it.
4609 static inline size_t copy_token(const char **buf,
4615 len = next_token(buf);
4616 if (len < token_size) {
4617 memcpy(token, *buf, len);
4618 *(token + len) = '\0';
4626 * Finds the next token in *buf, dynamically allocates a buffer big
4627 * enough to hold a copy of it, and copies the token into the new
4628 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4629 * that a duplicate buffer is created even for a zero-length token.
4631 * Returns a pointer to the newly-allocated duplicate, or a null
4632 * pointer if memory for the duplicate was not available. If
4633 * the lenp argument is a non-null pointer, the length of the token
4634 * (not including the '\0') is returned in *lenp.
4636 * If successful, the *buf pointer will be updated to point beyond
4637 * the end of the found token.
4639 * Note: uses GFP_KERNEL for allocation.
4641 static inline char *dup_token(const char **buf, size_t *lenp)
4646 len = next_token(buf);
4647 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4650 *(dup + len) = '\0';
4660 * Parse the options provided for an "rbd add" (i.e., rbd image
4661 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4662 * and the data written is passed here via a NUL-terminated buffer.
4663 * Returns 0 if successful or an error code otherwise.
4665 * The information extracted from these options is recorded in
4666 * the other parameters which return dynamically-allocated
4669 * The address of a pointer that will refer to a ceph options
4670 * structure. Caller must release the returned pointer using
4671 * ceph_destroy_options() when it is no longer needed.
4673 * Address of an rbd options pointer. Fully initialized by
4674 * this function; caller must release with kfree().
4676 * Address of an rbd image specification pointer. Fully
4677 * initialized by this function based on parsed options.
4678 * Caller must release with rbd_spec_put().
4680 * The options passed take this form:
4681 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4684 * A comma-separated list of one or more monitor addresses.
4685 * A monitor address is an ip address, optionally followed
4686 * by a port number (separated by a colon).
4687 * I.e.: ip1[:port1][,ip2[:port2]...]
4689 * A comma-separated list of ceph and/or rbd options.
4691 * The name of the rados pool containing the rbd image.
4693 * The name of the image in that pool to map.
4695 * An optional snapshot id. If provided, the mapping will
4696 * present data from the image at the time that snapshot was
4697 * created. The image head is used if no snapshot id is
4698 * provided. Snapshot mappings are always read-only.
4700 static int rbd_add_parse_args(const char *buf,
4701 struct ceph_options **ceph_opts,
4702 struct rbd_options **opts,
4703 struct rbd_spec **rbd_spec)
4707 const char *mon_addrs;
4709 size_t mon_addrs_size;
4710 struct rbd_spec *spec = NULL;
4711 struct rbd_options *rbd_opts = NULL;
4712 struct ceph_options *copts;
4715 /* The first four tokens are required */
4717 len = next_token(&buf);
4719 rbd_warn(NULL, "no monitor address(es) provided");
4723 mon_addrs_size = len + 1;
4727 options = dup_token(&buf, NULL);
4731 rbd_warn(NULL, "no options provided");
4735 spec = rbd_spec_alloc();
4739 spec->pool_name = dup_token(&buf, NULL);
4740 if (!spec->pool_name)
4742 if (!*spec->pool_name) {
4743 rbd_warn(NULL, "no pool name provided");
4747 spec->image_name = dup_token(&buf, NULL);
4748 if (!spec->image_name)
4750 if (!*spec->image_name) {
4751 rbd_warn(NULL, "no image name provided");
4756 * Snapshot name is optional; default is to use "-"
4757 * (indicating the head/no snapshot).
4759 len = next_token(&buf);
4761 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4762 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4763 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4764 ret = -ENAMETOOLONG;
4767 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4770 *(snap_name + len) = '\0';
4771 spec->snap_name = snap_name;
4773 /* Initialize all rbd options to the defaults */
4775 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4779 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4781 copts = ceph_parse_options(options, mon_addrs,
4782 mon_addrs + mon_addrs_size - 1,
4783 parse_rbd_opts_token, rbd_opts);
4784 if (IS_ERR(copts)) {
4785 ret = PTR_ERR(copts);
4806 * Return pool id (>= 0) or a negative error code.
4808 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4811 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4816 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4817 if (ret == -ENOENT && tries++ < 1) {
4818 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4823 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4824 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4825 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4826 newest_epoch, timeout);
4829 /* the osdmap we have is new enough */
4838 * An rbd format 2 image has a unique identifier, distinct from the
4839 * name given to it by the user. Internally, that identifier is
4840 * what's used to specify the names of objects related to the image.
4842 * A special "rbd id" object is used to map an rbd image name to its
4843 * id. If that object doesn't exist, then there is no v2 rbd image
4844 * with the supplied name.
4846 * This function will record the given rbd_dev's image_id field if
4847 * it can be determined, and in that case will return 0. If any
4848 * errors occur a negative errno will be returned and the rbd_dev's
4849 * image_id field will be unchanged (and should be NULL).
4851 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4860 * When probing a parent image, the image id is already
4861 * known (and the image name likely is not). There's no
4862 * need to fetch the image id again in this case. We
4863 * do still need to set the image format though.
4865 if (rbd_dev->spec->image_id) {
4866 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4872 * First, see if the format 2 image id file exists, and if
4873 * so, get the image's persistent id from it.
4875 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4876 object_name = kmalloc(size, GFP_NOIO);
4879 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4880 dout("rbd id object name is %s\n", object_name);
4882 /* Response will be an encoded string, which includes a length */
4884 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4885 response = kzalloc(size, GFP_NOIO);
4891 /* If it doesn't exist we'll assume it's a format 1 image */
4893 ret = rbd_obj_method_sync(rbd_dev, object_name,
4894 "rbd", "get_id", NULL, 0,
4895 response, RBD_IMAGE_ID_LEN_MAX);
4896 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4897 if (ret == -ENOENT) {
4898 image_id = kstrdup("", GFP_KERNEL);
4899 ret = image_id ? 0 : -ENOMEM;
4901 rbd_dev->image_format = 1;
4902 } else if (ret > sizeof (__le32)) {
4905 image_id = ceph_extract_encoded_string(&p, p + ret,
4907 ret = PTR_ERR_OR_ZERO(image_id);
4909 rbd_dev->image_format = 2;
4915 rbd_dev->spec->image_id = image_id;
4916 dout("image_id is %s\n", image_id);
4926 * Undo whatever state changes are made by v1 or v2 header info
4929 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4931 struct rbd_image_header *header;
4933 /* Drop parent reference unless it's already been done (or none) */
4935 if (rbd_dev->parent_overlap)
4936 rbd_dev_parent_put(rbd_dev);
4938 /* Free dynamic fields from the header, then zero it out */
4940 header = &rbd_dev->header;
4941 ceph_put_snap_context(header->snapc);
4942 kfree(header->snap_sizes);
4943 kfree(header->snap_names);
4944 kfree(header->object_prefix);
4945 memset(header, 0, sizeof (*header));
4948 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4952 ret = rbd_dev_v2_object_prefix(rbd_dev);
4957 * Get the and check features for the image. Currently the
4958 * features are assumed to never change.
4960 ret = rbd_dev_v2_features(rbd_dev);
4964 /* If the image supports fancy striping, get its parameters */
4966 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4967 ret = rbd_dev_v2_striping_info(rbd_dev);
4971 /* No support for crypto and compression type format 2 images */
4975 rbd_dev->header.features = 0;
4976 kfree(rbd_dev->header.object_prefix);
4977 rbd_dev->header.object_prefix = NULL;
4982 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4984 struct rbd_device *parent = NULL;
4985 struct rbd_spec *parent_spec;
4986 struct rbd_client *rbdc;
4989 if (!rbd_dev->parent_spec)
4992 * We need to pass a reference to the client and the parent
4993 * spec when creating the parent rbd_dev. Images related by
4994 * parent/child relationships always share both.
4996 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4997 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5000 parent = rbd_dev_create(rbdc, parent_spec);
5004 ret = rbd_dev_image_probe(parent, false);
5007 rbd_dev->parent = parent;
5008 atomic_set(&rbd_dev->parent_ref, 1);
5013 rbd_dev_unparent(rbd_dev);
5014 kfree(rbd_dev->header_name);
5015 rbd_dev_destroy(parent);
5017 rbd_put_client(rbdc);
5018 rbd_spec_put(parent_spec);
5024 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5028 /* Get an id and fill in device name. */
5030 ret = rbd_dev_id_get(rbd_dev);
5034 BUILD_BUG_ON(DEV_NAME_LEN
5035 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5036 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5038 /* Record our major and minor device numbers. */
5040 if (!single_major) {
5041 ret = register_blkdev(0, rbd_dev->name);
5045 rbd_dev->major = ret;
5048 rbd_dev->major = rbd_major;
5049 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5052 /* Set up the blkdev mapping. */
5054 ret = rbd_init_disk(rbd_dev);
5056 goto err_out_blkdev;
5058 ret = rbd_dev_mapping_set(rbd_dev);
5061 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5062 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5064 ret = rbd_bus_add_dev(rbd_dev);
5066 goto err_out_mapping;
5068 /* Everything's ready. Announce the disk to the world. */
5070 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5071 add_disk(rbd_dev->disk);
5073 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5074 (unsigned long long) rbd_dev->mapping.size);
5079 rbd_dev_mapping_clear(rbd_dev);
5081 rbd_free_disk(rbd_dev);
5084 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5086 rbd_dev_id_put(rbd_dev);
5087 rbd_dev_mapping_clear(rbd_dev);
5092 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5094 struct rbd_spec *spec = rbd_dev->spec;
5097 /* Record the header object name for this rbd image. */
5099 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5101 if (rbd_dev->image_format == 1)
5102 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5104 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5106 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5107 if (!rbd_dev->header_name)
5110 if (rbd_dev->image_format == 1)
5111 sprintf(rbd_dev->header_name, "%s%s",
5112 spec->image_name, RBD_SUFFIX);
5114 sprintf(rbd_dev->header_name, "%s%s",
5115 RBD_HEADER_PREFIX, spec->image_id);
5119 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5121 rbd_dev_unprobe(rbd_dev);
5122 kfree(rbd_dev->header_name);
5123 rbd_dev->header_name = NULL;
5124 rbd_dev->image_format = 0;
5125 kfree(rbd_dev->spec->image_id);
5126 rbd_dev->spec->image_id = NULL;
5128 rbd_dev_destroy(rbd_dev);
5132 * Probe for the existence of the header object for the given rbd
5133 * device. If this image is the one being mapped (i.e., not a
5134 * parent), initiate a watch on its header object before using that
5135 * object to get detailed information about the rbd image.
5137 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5142 * Get the id from the image id object. Unless there's an
5143 * error, rbd_dev->spec->image_id will be filled in with
5144 * a dynamically-allocated string, and rbd_dev->image_format
5145 * will be set to either 1 or 2.
5147 ret = rbd_dev_image_id(rbd_dev);
5150 rbd_assert(rbd_dev->spec->image_id);
5151 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5153 ret = rbd_dev_header_name(rbd_dev);
5155 goto err_out_format;
5158 ret = rbd_dev_header_watch_sync(rbd_dev);
5160 goto out_header_name;
5163 if (rbd_dev->image_format == 1)
5164 ret = rbd_dev_v1_header_info(rbd_dev);
5166 ret = rbd_dev_v2_header_info(rbd_dev);
5170 ret = rbd_dev_spec_update(rbd_dev);
5174 ret = rbd_dev_probe_parent(rbd_dev);
5178 dout("discovered format %u image, header name is %s\n",
5179 rbd_dev->image_format, rbd_dev->header_name);
5183 rbd_dev_unprobe(rbd_dev);
5186 rbd_dev_header_unwatch_sync(rbd_dev);
5188 kfree(rbd_dev->header_name);
5189 rbd_dev->header_name = NULL;
5191 rbd_dev->image_format = 0;
5192 kfree(rbd_dev->spec->image_id);
5193 rbd_dev->spec->image_id = NULL;
5195 dout("probe failed, returning %d\n", ret);
5200 static ssize_t do_rbd_add(struct bus_type *bus,
5204 struct rbd_device *rbd_dev = NULL;
5205 struct ceph_options *ceph_opts = NULL;
5206 struct rbd_options *rbd_opts = NULL;
5207 struct rbd_spec *spec = NULL;
5208 struct rbd_client *rbdc;
5212 if (!try_module_get(THIS_MODULE))
5215 /* parse add command */
5216 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5218 goto err_out_module;
5219 read_only = rbd_opts->read_only;
5221 rbd_opts = NULL; /* done with this */
5223 rbdc = rbd_get_client(ceph_opts);
5230 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5232 goto err_out_client;
5233 spec->pool_id = (u64)rc;
5235 /* The ceph file layout needs to fit pool id in 32 bits */
5237 if (spec->pool_id > (u64)U32_MAX) {
5238 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5239 (unsigned long long)spec->pool_id, U32_MAX);
5241 goto err_out_client;
5244 rbd_dev = rbd_dev_create(rbdc, spec);
5246 goto err_out_client;
5247 rbdc = NULL; /* rbd_dev now owns this */
5248 spec = NULL; /* rbd_dev now owns this */
5250 rc = rbd_dev_image_probe(rbd_dev, true);
5252 goto err_out_rbd_dev;
5254 /* If we are mapping a snapshot it must be marked read-only */
5256 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5258 rbd_dev->mapping.read_only = read_only;
5260 rc = rbd_dev_device_setup(rbd_dev);
5263 * rbd_dev_header_unwatch_sync() can't be moved into
5264 * rbd_dev_image_release() without refactoring, see
5265 * commit 1f3ef78861ac.
5267 rbd_dev_header_unwatch_sync(rbd_dev);
5268 rbd_dev_image_release(rbd_dev);
5269 goto err_out_module;
5275 rbd_dev_destroy(rbd_dev);
5277 rbd_put_client(rbdc);
5281 module_put(THIS_MODULE);
5283 dout("Error adding device %s\n", buf);
5288 static ssize_t rbd_add(struct bus_type *bus,
5295 return do_rbd_add(bus, buf, count);
5298 static ssize_t rbd_add_single_major(struct bus_type *bus,
5302 return do_rbd_add(bus, buf, count);
5305 static void rbd_dev_device_release(struct device *dev)
5307 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5309 rbd_free_disk(rbd_dev);
5310 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5311 rbd_dev_mapping_clear(rbd_dev);
5313 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5314 rbd_dev_id_put(rbd_dev);
5315 rbd_dev_mapping_clear(rbd_dev);
5318 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5320 while (rbd_dev->parent) {
5321 struct rbd_device *first = rbd_dev;
5322 struct rbd_device *second = first->parent;
5323 struct rbd_device *third;
5326 * Follow to the parent with no grandparent and
5329 while (second && (third = second->parent)) {
5334 rbd_dev_image_release(second);
5335 first->parent = NULL;
5336 first->parent_overlap = 0;
5338 rbd_assert(first->parent_spec);
5339 rbd_spec_put(first->parent_spec);
5340 first->parent_spec = NULL;
5344 static ssize_t do_rbd_remove(struct bus_type *bus,
5348 struct rbd_device *rbd_dev = NULL;
5349 struct list_head *tmp;
5352 bool already = false;
5355 ret = kstrtoul(buf, 10, &ul);
5359 /* convert to int; abort if we lost anything in the conversion */
5365 spin_lock(&rbd_dev_list_lock);
5366 list_for_each(tmp, &rbd_dev_list) {
5367 rbd_dev = list_entry(tmp, struct rbd_device, node);
5368 if (rbd_dev->dev_id == dev_id) {
5374 spin_lock_irq(&rbd_dev->lock);
5375 if (rbd_dev->open_count)
5378 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5380 spin_unlock_irq(&rbd_dev->lock);
5382 spin_unlock(&rbd_dev_list_lock);
5383 if (ret < 0 || already)
5386 rbd_dev_header_unwatch_sync(rbd_dev);
5388 * flush remaining watch callbacks - these must be complete
5389 * before the osd_client is shutdown
5391 dout("%s: flushing notifies", __func__);
5392 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5395 * Don't free anything from rbd_dev->disk until after all
5396 * notifies are completely processed. Otherwise
5397 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5398 * in a potential use after free of rbd_dev->disk or rbd_dev.
5400 rbd_bus_del_dev(rbd_dev);
5401 rbd_dev_image_release(rbd_dev);
5402 module_put(THIS_MODULE);
5407 static ssize_t rbd_remove(struct bus_type *bus,
5414 return do_rbd_remove(bus, buf, count);
5417 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5421 return do_rbd_remove(bus, buf, count);
5425 * create control files in sysfs
5428 static int rbd_sysfs_init(void)
5432 ret = device_register(&rbd_root_dev);
5436 ret = bus_register(&rbd_bus_type);
5438 device_unregister(&rbd_root_dev);
5443 static void rbd_sysfs_cleanup(void)
5445 bus_unregister(&rbd_bus_type);
5446 device_unregister(&rbd_root_dev);
5449 static int rbd_slab_init(void)
5451 rbd_assert(!rbd_img_request_cache);
5452 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5453 sizeof (struct rbd_img_request),
5454 __alignof__(struct rbd_img_request),
5456 if (!rbd_img_request_cache)
5459 rbd_assert(!rbd_obj_request_cache);
5460 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5461 sizeof (struct rbd_obj_request),
5462 __alignof__(struct rbd_obj_request),
5464 if (!rbd_obj_request_cache)
5467 rbd_assert(!rbd_segment_name_cache);
5468 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5469 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5470 if (rbd_segment_name_cache)
5473 if (rbd_obj_request_cache) {
5474 kmem_cache_destroy(rbd_obj_request_cache);
5475 rbd_obj_request_cache = NULL;
5478 kmem_cache_destroy(rbd_img_request_cache);
5479 rbd_img_request_cache = NULL;
5484 static void rbd_slab_exit(void)
5486 rbd_assert(rbd_segment_name_cache);
5487 kmem_cache_destroy(rbd_segment_name_cache);
5488 rbd_segment_name_cache = NULL;
5490 rbd_assert(rbd_obj_request_cache);
5491 kmem_cache_destroy(rbd_obj_request_cache);
5492 rbd_obj_request_cache = NULL;
5494 rbd_assert(rbd_img_request_cache);
5495 kmem_cache_destroy(rbd_img_request_cache);
5496 rbd_img_request_cache = NULL;
5499 static int __init rbd_init(void)
5503 if (!libceph_compatible(NULL)) {
5504 rbd_warn(NULL, "libceph incompatibility (quitting)");
5508 rc = rbd_slab_init();
5513 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5514 if (rbd_major < 0) {
5520 rc = rbd_sysfs_init();
5522 goto err_out_blkdev;
5525 pr_info("loaded (major %d)\n", rbd_major);
5527 pr_info("loaded\n");
5533 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5539 static void __exit rbd_exit(void)
5541 ida_destroy(&rbd_dev_id_ida);
5542 rbd_sysfs_cleanup();
5544 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5548 module_init(rbd_init);
5549 module_exit(rbd_exit);
5551 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5552 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5553 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5554 /* following authorship retained from original osdblk.c */
5555 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5557 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5558 MODULE_LICENSE("GPL");