3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
548 static void rbd_release(struct gendisk *disk, fmode_t mode)
550 struct rbd_device *rbd_dev = disk->private_data;
551 unsigned long open_count_before;
553 spin_lock_irq(&rbd_dev->lock);
554 open_count_before = rbd_dev->open_count--;
555 spin_unlock_irq(&rbd_dev->lock);
556 rbd_assert(open_count_before > 0);
558 put_device(&rbd_dev->dev);
561 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
566 bool ro_changed = false;
568 /* get_user() may sleep, so call it before taking rbd_dev->lock */
569 if (get_user(val, (int __user *)(arg)))
572 ro = val ? true : false;
573 /* Snapshot doesn't allow to write*/
574 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
577 spin_lock_irq(&rbd_dev->lock);
578 /* prevent others open this device */
579 if (rbd_dev->open_count > 1) {
584 if (rbd_dev->mapping.read_only != ro) {
585 rbd_dev->mapping.read_only = ro;
590 spin_unlock_irq(&rbd_dev->lock);
591 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
592 if (ret == 0 && ro_changed)
593 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
598 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned int cmd, unsigned long arg)
601 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
606 ret = rbd_ioctl_set_ro(rbd_dev, arg);
616 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
617 unsigned int cmd, unsigned long arg)
619 return rbd_ioctl(bdev, mode, cmd, arg);
621 #endif /* CONFIG_COMPAT */
623 static const struct block_device_operations rbd_bd_ops = {
624 .owner = THIS_MODULE,
626 .release = rbd_release,
629 .compat_ioctl = rbd_compat_ioctl,
634 * Initialize an rbd client instance. Success or not, this function
635 * consumes ceph_opts. Caller holds client_mutex.
637 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
639 struct rbd_client *rbdc;
642 dout("%s:\n", __func__);
643 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
647 kref_init(&rbdc->kref);
648 INIT_LIST_HEAD(&rbdc->node);
650 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
651 if (IS_ERR(rbdc->client))
653 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
655 ret = ceph_open_session(rbdc->client);
659 spin_lock(&rbd_client_list_lock);
660 list_add_tail(&rbdc->node, &rbd_client_list);
661 spin_unlock(&rbd_client_list_lock);
663 dout("%s: rbdc %p\n", __func__, rbdc);
667 ceph_destroy_client(rbdc->client);
672 ceph_destroy_options(ceph_opts);
673 dout("%s: error %d\n", __func__, ret);
678 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
680 kref_get(&rbdc->kref);
686 * Find a ceph client with specific addr and configuration. If
687 * found, bump its reference count.
689 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
691 struct rbd_client *client_node;
694 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
697 spin_lock(&rbd_client_list_lock);
698 list_for_each_entry(client_node, &rbd_client_list, node) {
699 if (!ceph_compare_options(ceph_opts, client_node->client)) {
700 __rbd_get_client(client_node);
706 spin_unlock(&rbd_client_list_lock);
708 return found ? client_node : NULL;
718 /* string args above */
721 /* Boolean args above */
725 static match_table_t rbd_opts_tokens = {
727 /* string args above */
728 {Opt_read_only, "read_only"},
729 {Opt_read_only, "ro"}, /* Alternate spelling */
730 {Opt_read_write, "read_write"},
731 {Opt_read_write, "rw"}, /* Alternate spelling */
732 /* Boolean args above */
740 #define RBD_READ_ONLY_DEFAULT false
742 static int parse_rbd_opts_token(char *c, void *private)
744 struct rbd_options *rbd_opts = private;
745 substring_t argstr[MAX_OPT_ARGS];
746 int token, intval, ret;
748 token = match_token(c, rbd_opts_tokens, argstr);
752 if (token < Opt_last_int) {
753 ret = match_int(&argstr[0], &intval);
755 pr_err("bad mount option arg (not int) "
759 dout("got int token %d val %d\n", token, intval);
760 } else if (token > Opt_last_int && token < Opt_last_string) {
761 dout("got string token %d val %s\n", token,
763 } else if (token > Opt_last_string && token < Opt_last_bool) {
764 dout("got Boolean token %d\n", token);
766 dout("got token %d\n", token);
771 rbd_opts->read_only = true;
774 rbd_opts->read_only = false;
784 * Get a ceph client with specific addr and configuration, if one does
785 * not exist create it. Either way, ceph_opts is consumed by this
788 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
790 struct rbd_client *rbdc;
792 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
793 rbdc = rbd_client_find(ceph_opts);
794 if (rbdc) /* using an existing client */
795 ceph_destroy_options(ceph_opts);
797 rbdc = rbd_client_create(ceph_opts);
798 mutex_unlock(&client_mutex);
804 * Destroy ceph client
806 * Caller must hold rbd_client_list_lock.
808 static void rbd_client_release(struct kref *kref)
810 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
812 dout("%s: rbdc %p\n", __func__, rbdc);
813 spin_lock(&rbd_client_list_lock);
814 list_del(&rbdc->node);
815 spin_unlock(&rbd_client_list_lock);
817 ceph_destroy_client(rbdc->client);
822 * Drop reference to ceph client node. If it's not referenced anymore, release
825 static void rbd_put_client(struct rbd_client *rbdc)
828 kref_put(&rbdc->kref, rbd_client_release);
831 static bool rbd_image_format_valid(u32 image_format)
833 return image_format == 1 || image_format == 2;
836 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
841 /* The header has to start with the magic rbd header text */
842 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
845 /* The bio layer requires at least sector-sized I/O */
847 if (ondisk->options.order < SECTOR_SHIFT)
850 /* If we use u64 in a few spots we may be able to loosen this */
852 if (ondisk->options.order > 8 * sizeof (int) - 1)
856 * The size of a snapshot header has to fit in a size_t, and
857 * that limits the number of snapshots.
859 snap_count = le32_to_cpu(ondisk->snap_count);
860 size = SIZE_MAX - sizeof (struct ceph_snap_context);
861 if (snap_count > size / sizeof (__le64))
865 * Not only that, but the size of the entire the snapshot
866 * header must also be representable in a size_t.
868 size -= snap_count * sizeof (__le64);
869 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
876 * Fill an rbd image header with information from the given format 1
879 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
880 struct rbd_image_header_ondisk *ondisk)
882 struct rbd_image_header *header = &rbd_dev->header;
883 bool first_time = header->object_prefix == NULL;
884 struct ceph_snap_context *snapc;
885 char *object_prefix = NULL;
886 char *snap_names = NULL;
887 u64 *snap_sizes = NULL;
893 /* Allocate this now to avoid having to handle failure below */
898 len = strnlen(ondisk->object_prefix,
899 sizeof (ondisk->object_prefix));
900 object_prefix = kmalloc(len + 1, GFP_KERNEL);
903 memcpy(object_prefix, ondisk->object_prefix, len);
904 object_prefix[len] = '\0';
907 /* Allocate the snapshot context and fill it in */
909 snap_count = le32_to_cpu(ondisk->snap_count);
910 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
913 snapc->seq = le64_to_cpu(ondisk->snap_seq);
915 struct rbd_image_snap_ondisk *snaps;
916 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
918 /* We'll keep a copy of the snapshot names... */
920 if (snap_names_len > (u64)SIZE_MAX)
922 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
926 /* ...as well as the array of their sizes. */
928 size = snap_count * sizeof (*header->snap_sizes);
929 snap_sizes = kmalloc(size, GFP_KERNEL);
934 * Copy the names, and fill in each snapshot's id
937 * Note that rbd_dev_v1_header_info() guarantees the
938 * ondisk buffer we're working with has
939 * snap_names_len bytes beyond the end of the
940 * snapshot id array, this memcpy() is safe.
942 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
943 snaps = ondisk->snaps;
944 for (i = 0; i < snap_count; i++) {
945 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
946 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
950 /* We won't fail any more, fill in the header */
953 header->object_prefix = object_prefix;
954 header->obj_order = ondisk->options.order;
955 header->crypt_type = ondisk->options.crypt_type;
956 header->comp_type = ondisk->options.comp_type;
957 /* The rest aren't used for format 1 images */
958 header->stripe_unit = 0;
959 header->stripe_count = 0;
960 header->features = 0;
962 ceph_put_snap_context(header->snapc);
963 kfree(header->snap_names);
964 kfree(header->snap_sizes);
967 /* The remaining fields always get updated (when we refresh) */
969 header->image_size = le64_to_cpu(ondisk->image_size);
970 header->snapc = snapc;
971 header->snap_names = snap_names;
972 header->snap_sizes = snap_sizes;
974 /* Make sure mapping size is consistent with header info */
976 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
977 if (rbd_dev->mapping.size != header->image_size)
978 rbd_dev->mapping.size = header->image_size;
986 ceph_put_snap_context(snapc);
987 kfree(object_prefix);
992 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
994 const char *snap_name;
996 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
998 /* Skip over names until we find the one we are looking for */
1000 snap_name = rbd_dev->header.snap_names;
1002 snap_name += strlen(snap_name) + 1;
1004 return kstrdup(snap_name, GFP_KERNEL);
1008 * Snapshot id comparison function for use with qsort()/bsearch().
1009 * Note that result is for snapshots in *descending* order.
1011 static int snapid_compare_reverse(const void *s1, const void *s2)
1013 u64 snap_id1 = *(u64 *)s1;
1014 u64 snap_id2 = *(u64 *)s2;
1016 if (snap_id1 < snap_id2)
1018 return snap_id1 == snap_id2 ? 0 : -1;
1022 * Search a snapshot context to see if the given snapshot id is
1025 * Returns the position of the snapshot id in the array if it's found,
1026 * or BAD_SNAP_INDEX otherwise.
1028 * Note: The snapshot array is in kept sorted (by the osd) in
1029 * reverse order, highest snapshot id first.
1031 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1033 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1036 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1037 sizeof (snap_id), snapid_compare_reverse);
1039 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1042 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1046 const char *snap_name;
1048 which = rbd_dev_snap_index(rbd_dev, snap_id);
1049 if (which == BAD_SNAP_INDEX)
1050 return ERR_PTR(-ENOENT);
1052 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1053 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1056 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1058 if (snap_id == CEPH_NOSNAP)
1059 return RBD_SNAP_HEAD_NAME;
1061 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1062 if (rbd_dev->image_format == 1)
1063 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1065 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1068 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1071 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1072 if (snap_id == CEPH_NOSNAP) {
1073 *snap_size = rbd_dev->header.image_size;
1074 } else if (rbd_dev->image_format == 1) {
1077 which = rbd_dev_snap_index(rbd_dev, snap_id);
1078 if (which == BAD_SNAP_INDEX)
1081 *snap_size = rbd_dev->header.snap_sizes[which];
1086 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1095 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_features = rbd_dev->header.features;
1101 } else if (rbd_dev->image_format == 1) {
1102 *snap_features = 0; /* No features for format 1 */
1107 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1111 *snap_features = features;
1116 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1118 u64 snap_id = rbd_dev->spec->snap_id;
1123 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1126 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1130 rbd_dev->mapping.size = size;
1131 rbd_dev->mapping.features = features;
1136 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1138 rbd_dev->mapping.size = 0;
1139 rbd_dev->mapping.features = 0;
1142 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1149 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1152 segment = offset >> rbd_dev->header.obj_order;
1153 name_format = "%s.%012llx";
1154 if (rbd_dev->image_format == 2)
1155 name_format = "%s.%016llx";
1156 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1157 rbd_dev->header.object_prefix, segment);
1158 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1159 pr_err("error formatting segment name for #%llu (%d)\n",
1168 static void rbd_segment_name_free(const char *name)
1170 /* The explicit cast here is needed to drop the const qualifier */
1172 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1175 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1177 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1179 return offset & (segment_size - 1);
1182 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1183 u64 offset, u64 length)
1185 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1187 offset &= segment_size - 1;
1189 rbd_assert(length <= U64_MAX - offset);
1190 if (offset + length > segment_size)
1191 length = segment_size - offset;
1197 * returns the size of an object in the image
1199 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1201 return 1 << header->obj_order;
1208 static void bio_chain_put(struct bio *chain)
1214 chain = chain->bi_next;
1220 * zeros a bio chain, starting at specific offset
1222 static void zero_bio_chain(struct bio *chain, int start_ofs)
1225 struct bvec_iter iter;
1226 unsigned long flags;
1231 bio_for_each_segment(bv, chain, iter) {
1232 if (pos + bv.bv_len > start_ofs) {
1233 int remainder = max(start_ofs - pos, 0);
1234 buf = bvec_kmap_irq(&bv, &flags);
1235 memset(buf + remainder, 0,
1236 bv.bv_len - remainder);
1237 flush_dcache_page(bv.bv_page);
1238 bvec_kunmap_irq(buf, &flags);
1243 chain = chain->bi_next;
1248 * similar to zero_bio_chain(), zeros data defined by a page array,
1249 * starting at the given byte offset from the start of the array and
1250 * continuing up to the given end offset. The pages array is
1251 * assumed to be big enough to hold all bytes up to the end.
1253 static void zero_pages(struct page **pages, u64 offset, u64 end)
1255 struct page **page = &pages[offset >> PAGE_SHIFT];
1257 rbd_assert(end > offset);
1258 rbd_assert(end - offset <= (u64)SIZE_MAX);
1259 while (offset < end) {
1262 unsigned long flags;
1265 page_offset = offset & ~PAGE_MASK;
1266 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1267 local_irq_save(flags);
1268 kaddr = kmap_atomic(*page);
1269 memset(kaddr + page_offset, 0, length);
1270 flush_dcache_page(*page);
1271 kunmap_atomic(kaddr);
1272 local_irq_restore(flags);
1280 * Clone a portion of a bio, starting at the given byte offset
1281 * and continuing for the number of bytes indicated.
1283 static struct bio *bio_clone_range(struct bio *bio_src,
1284 unsigned int offset,
1290 bio = bio_clone(bio_src, gfpmask);
1292 return NULL; /* ENOMEM */
1294 bio_advance(bio, offset);
1295 bio->bi_iter.bi_size = len;
1301 * Clone a portion of a bio chain, starting at the given byte offset
1302 * into the first bio in the source chain and continuing for the
1303 * number of bytes indicated. The result is another bio chain of
1304 * exactly the given length, or a null pointer on error.
1306 * The bio_src and offset parameters are both in-out. On entry they
1307 * refer to the first source bio and the offset into that bio where
1308 * the start of data to be cloned is located.
1310 * On return, bio_src is updated to refer to the bio in the source
1311 * chain that contains first un-cloned byte, and *offset will
1312 * contain the offset of that byte within that bio.
1314 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1315 unsigned int *offset,
1319 struct bio *bi = *bio_src;
1320 unsigned int off = *offset;
1321 struct bio *chain = NULL;
1324 /* Build up a chain of clone bios up to the limit */
1326 if (!bi || off >= bi->bi_iter.bi_size || !len)
1327 return NULL; /* Nothing to clone */
1331 unsigned int bi_size;
1335 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1336 goto out_err; /* EINVAL; ran out of bio's */
1338 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1339 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1341 goto out_err; /* ENOMEM */
1344 end = &bio->bi_next;
1347 if (off == bi->bi_iter.bi_size) {
1358 bio_chain_put(chain);
1364 * The default/initial value for all object request flags is 0. For
1365 * each flag, once its value is set to 1 it is never reset to 0
1368 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1370 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1371 struct rbd_device *rbd_dev;
1373 rbd_dev = obj_request->img_request->rbd_dev;
1374 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1379 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1382 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1385 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1387 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1388 struct rbd_device *rbd_dev = NULL;
1390 if (obj_request_img_data_test(obj_request))
1391 rbd_dev = obj_request->img_request->rbd_dev;
1392 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1397 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1400 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1404 * This sets the KNOWN flag after (possibly) setting the EXISTS
1405 * flag. The latter is set based on the "exists" value provided.
1407 * Note that for our purposes once an object exists it never goes
1408 * away again. It's possible that the response from two existence
1409 * checks are separated by the creation of the target object, and
1410 * the first ("doesn't exist") response arrives *after* the second
1411 * ("does exist"). In that case we ignore the second one.
1413 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1417 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1418 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1422 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1425 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1428 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1434 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1436 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1438 return obj_request->img_offset <
1439 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1442 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1444 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1445 atomic_read(&obj_request->kref.refcount));
1446 kref_get(&obj_request->kref);
1449 static void rbd_obj_request_destroy(struct kref *kref);
1450 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1452 rbd_assert(obj_request != NULL);
1453 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1454 atomic_read(&obj_request->kref.refcount));
1455 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1458 static void rbd_img_request_get(struct rbd_img_request *img_request)
1460 dout("%s: img %p (was %d)\n", __func__, img_request,
1461 atomic_read(&img_request->kref.refcount));
1462 kref_get(&img_request->kref);
1465 static bool img_request_child_test(struct rbd_img_request *img_request);
1466 static void rbd_parent_request_destroy(struct kref *kref);
1467 static void rbd_img_request_destroy(struct kref *kref);
1468 static void rbd_img_request_put(struct rbd_img_request *img_request)
1470 rbd_assert(img_request != NULL);
1471 dout("%s: img %p (was %d)\n", __func__, img_request,
1472 atomic_read(&img_request->kref.refcount));
1473 if (img_request_child_test(img_request))
1474 kref_put(&img_request->kref, rbd_parent_request_destroy);
1476 kref_put(&img_request->kref, rbd_img_request_destroy);
1479 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1480 struct rbd_obj_request *obj_request)
1482 rbd_assert(obj_request->img_request == NULL);
1484 /* Image request now owns object's original reference */
1485 obj_request->img_request = img_request;
1486 obj_request->which = img_request->obj_request_count;
1487 rbd_assert(!obj_request_img_data_test(obj_request));
1488 obj_request_img_data_set(obj_request);
1489 rbd_assert(obj_request->which != BAD_WHICH);
1490 img_request->obj_request_count++;
1491 list_add_tail(&obj_request->links, &img_request->obj_requests);
1492 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1493 obj_request->which);
1496 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1497 struct rbd_obj_request *obj_request)
1499 rbd_assert(obj_request->which != BAD_WHICH);
1501 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1502 obj_request->which);
1503 list_del(&obj_request->links);
1504 rbd_assert(img_request->obj_request_count > 0);
1505 img_request->obj_request_count--;
1506 rbd_assert(obj_request->which == img_request->obj_request_count);
1507 obj_request->which = BAD_WHICH;
1508 rbd_assert(obj_request_img_data_test(obj_request));
1509 rbd_assert(obj_request->img_request == img_request);
1510 obj_request->img_request = NULL;
1511 obj_request->callback = NULL;
1512 rbd_obj_request_put(obj_request);
1515 static bool obj_request_type_valid(enum obj_request_type type)
1518 case OBJ_REQUEST_NODATA:
1519 case OBJ_REQUEST_BIO:
1520 case OBJ_REQUEST_PAGES:
1527 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1528 struct rbd_obj_request *obj_request)
1530 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1532 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1535 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1538 dout("%s: img %p\n", __func__, img_request);
1541 * If no error occurred, compute the aggregate transfer
1542 * count for the image request. We could instead use
1543 * atomic64_cmpxchg() to update it as each object request
1544 * completes; not clear which way is better off hand.
1546 if (!img_request->result) {
1547 struct rbd_obj_request *obj_request;
1550 for_each_obj_request(img_request, obj_request)
1551 xferred += obj_request->xferred;
1552 img_request->xferred = xferred;
1555 if (img_request->callback)
1556 img_request->callback(img_request);
1558 rbd_img_request_put(img_request);
1561 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1563 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1565 dout("%s: obj %p\n", __func__, obj_request);
1567 return wait_for_completion_interruptible(&obj_request->completion);
1571 * The default/initial value for all image request flags is 0. Each
1572 * is conditionally set to 1 at image request initialization time
1573 * and currently never change thereafter.
1575 static void img_request_write_set(struct rbd_img_request *img_request)
1577 set_bit(IMG_REQ_WRITE, &img_request->flags);
1581 static bool img_request_write_test(struct rbd_img_request *img_request)
1584 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1587 static void img_request_child_set(struct rbd_img_request *img_request)
1589 set_bit(IMG_REQ_CHILD, &img_request->flags);
1593 static void img_request_child_clear(struct rbd_img_request *img_request)
1595 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1599 static bool img_request_child_test(struct rbd_img_request *img_request)
1602 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1605 static void img_request_layered_set(struct rbd_img_request *img_request)
1607 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1611 static void img_request_layered_clear(struct rbd_img_request *img_request)
1613 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1617 static bool img_request_layered_test(struct rbd_img_request *img_request)
1620 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1624 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1626 u64 xferred = obj_request->xferred;
1627 u64 length = obj_request->length;
1629 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1630 obj_request, obj_request->img_request, obj_request->result,
1633 * ENOENT means a hole in the image. We zero-fill the entire
1634 * length of the request. A short read also implies zero-fill
1635 * to the end of the request. An error requires the whole
1636 * length of the request to be reported finished with an error
1637 * to the block layer. In each case we update the xferred
1638 * count to indicate the whole request was satisfied.
1640 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1641 if (obj_request->result == -ENOENT) {
1642 if (obj_request->type == OBJ_REQUEST_BIO)
1643 zero_bio_chain(obj_request->bio_list, 0);
1645 zero_pages(obj_request->pages, 0, length);
1646 obj_request->result = 0;
1647 } else if (xferred < length && !obj_request->result) {
1648 if (obj_request->type == OBJ_REQUEST_BIO)
1649 zero_bio_chain(obj_request->bio_list, xferred);
1651 zero_pages(obj_request->pages, xferred, length);
1653 obj_request->xferred = length;
1654 obj_request_done_set(obj_request);
1657 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1659 dout("%s: obj %p cb %p\n", __func__, obj_request,
1660 obj_request->callback);
1661 if (obj_request->callback)
1662 obj_request->callback(obj_request);
1664 complete_all(&obj_request->completion);
1667 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1669 dout("%s: obj %p\n", __func__, obj_request);
1670 obj_request_done_set(obj_request);
1673 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1675 struct rbd_img_request *img_request = NULL;
1676 struct rbd_device *rbd_dev = NULL;
1677 bool layered = false;
1679 if (obj_request_img_data_test(obj_request)) {
1680 img_request = obj_request->img_request;
1681 layered = img_request && img_request_layered_test(img_request);
1682 rbd_dev = img_request->rbd_dev;
1685 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1686 obj_request, img_request, obj_request->result,
1687 obj_request->xferred, obj_request->length);
1688 if (layered && obj_request->result == -ENOENT &&
1689 obj_request->img_offset < rbd_dev->parent_overlap)
1690 rbd_img_parent_read(obj_request);
1691 else if (img_request)
1692 rbd_img_obj_request_read_callback(obj_request);
1694 obj_request_done_set(obj_request);
1697 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1699 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1700 obj_request->result, obj_request->length);
1702 * There is no such thing as a successful short write. Set
1703 * it to our originally-requested length.
1705 obj_request->xferred = obj_request->length;
1706 obj_request_done_set(obj_request);
1710 * For a simple stat call there's nothing to do. We'll do more if
1711 * this is part of a write sequence for a layered image.
1713 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1715 dout("%s: obj %p\n", __func__, obj_request);
1716 obj_request_done_set(obj_request);
1719 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1720 struct ceph_msg *msg)
1722 struct rbd_obj_request *obj_request = osd_req->r_priv;
1725 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1726 rbd_assert(osd_req == obj_request->osd_req);
1727 if (obj_request_img_data_test(obj_request)) {
1728 rbd_assert(obj_request->img_request);
1729 rbd_assert(obj_request->which != BAD_WHICH);
1731 rbd_assert(obj_request->which == BAD_WHICH);
1734 if (osd_req->r_result < 0)
1735 obj_request->result = osd_req->r_result;
1737 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1740 * We support a 64-bit length, but ultimately it has to be
1741 * passed to blk_end_request(), which takes an unsigned int.
1743 obj_request->xferred = osd_req->r_reply_op_len[0];
1744 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1746 opcode = osd_req->r_ops[0].op;
1748 case CEPH_OSD_OP_READ:
1749 rbd_osd_read_callback(obj_request);
1751 case CEPH_OSD_OP_SETALLOCHINT:
1752 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1754 case CEPH_OSD_OP_WRITE:
1755 rbd_osd_write_callback(obj_request);
1757 case CEPH_OSD_OP_STAT:
1758 rbd_osd_stat_callback(obj_request);
1760 case CEPH_OSD_OP_CALL:
1761 case CEPH_OSD_OP_NOTIFY_ACK:
1762 case CEPH_OSD_OP_WATCH:
1763 rbd_osd_trivial_callback(obj_request);
1766 rbd_warn(NULL, "%s: unsupported op %hu\n",
1767 obj_request->object_name, (unsigned short) opcode);
1771 if (obj_request_done_test(obj_request))
1772 rbd_obj_request_complete(obj_request);
1775 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1777 struct rbd_img_request *img_request = obj_request->img_request;
1778 struct ceph_osd_request *osd_req = obj_request->osd_req;
1781 rbd_assert(osd_req != NULL);
1783 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1784 ceph_osdc_build_request(osd_req, obj_request->offset,
1785 NULL, snap_id, NULL);
1788 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1790 struct rbd_img_request *img_request = obj_request->img_request;
1791 struct ceph_osd_request *osd_req = obj_request->osd_req;
1792 struct ceph_snap_context *snapc;
1793 struct timespec mtime = CURRENT_TIME;
1795 rbd_assert(osd_req != NULL);
1797 snapc = img_request ? img_request->snapc : NULL;
1798 ceph_osdc_build_request(osd_req, obj_request->offset,
1799 snapc, CEPH_NOSNAP, &mtime);
1803 * Create an osd request. A read request has one osd op (read).
1804 * A write request has either one (watch) or two (hint+write) osd ops.
1805 * (All rbd data writes are prefixed with an allocation hint op, but
1806 * technically osd watch is a write request, hence this distinction.)
1808 static struct ceph_osd_request *rbd_osd_req_create(
1809 struct rbd_device *rbd_dev,
1811 unsigned int num_ops,
1812 struct rbd_obj_request *obj_request)
1814 struct ceph_snap_context *snapc = NULL;
1815 struct ceph_osd_client *osdc;
1816 struct ceph_osd_request *osd_req;
1818 if (obj_request_img_data_test(obj_request)) {
1819 struct rbd_img_request *img_request = obj_request->img_request;
1821 rbd_assert(write_request ==
1822 img_request_write_test(img_request));
1824 snapc = img_request->snapc;
1827 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1829 /* Allocate and initialize the request, for the num_ops ops */
1831 osdc = &rbd_dev->rbd_client->client->osdc;
1832 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1835 return NULL; /* ENOMEM */
1838 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1840 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1842 osd_req->r_callback = rbd_osd_req_callback;
1843 osd_req->r_priv = obj_request;
1845 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1846 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1852 * Create a copyup osd request based on the information in the
1853 * object request supplied. A copyup request has three osd ops,
1854 * a copyup method call, a hint op, and a write op.
1856 static struct ceph_osd_request *
1857 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1859 struct rbd_img_request *img_request;
1860 struct ceph_snap_context *snapc;
1861 struct rbd_device *rbd_dev;
1862 struct ceph_osd_client *osdc;
1863 struct ceph_osd_request *osd_req;
1865 rbd_assert(obj_request_img_data_test(obj_request));
1866 img_request = obj_request->img_request;
1867 rbd_assert(img_request);
1868 rbd_assert(img_request_write_test(img_request));
1870 /* Allocate and initialize the request, for the three ops */
1872 snapc = img_request->snapc;
1873 rbd_dev = img_request->rbd_dev;
1874 osdc = &rbd_dev->rbd_client->client->osdc;
1875 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1877 return NULL; /* ENOMEM */
1879 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1880 osd_req->r_callback = rbd_osd_req_callback;
1881 osd_req->r_priv = obj_request;
1883 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1884 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1890 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1892 ceph_osdc_put_request(osd_req);
1895 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1897 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1898 u64 offset, u64 length,
1899 enum obj_request_type type)
1901 struct rbd_obj_request *obj_request;
1905 rbd_assert(obj_request_type_valid(type));
1907 size = strlen(object_name) + 1;
1908 name = kmalloc(size, GFP_KERNEL);
1912 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1918 obj_request->object_name = memcpy(name, object_name, size);
1919 obj_request->offset = offset;
1920 obj_request->length = length;
1921 obj_request->flags = 0;
1922 obj_request->which = BAD_WHICH;
1923 obj_request->type = type;
1924 INIT_LIST_HEAD(&obj_request->links);
1925 init_completion(&obj_request->completion);
1926 kref_init(&obj_request->kref);
1928 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1929 offset, length, (int)type, obj_request);
1934 static void rbd_obj_request_destroy(struct kref *kref)
1936 struct rbd_obj_request *obj_request;
1938 obj_request = container_of(kref, struct rbd_obj_request, kref);
1940 dout("%s: obj %p\n", __func__, obj_request);
1942 rbd_assert(obj_request->img_request == NULL);
1943 rbd_assert(obj_request->which == BAD_WHICH);
1945 if (obj_request->osd_req)
1946 rbd_osd_req_destroy(obj_request->osd_req);
1948 rbd_assert(obj_request_type_valid(obj_request->type));
1949 switch (obj_request->type) {
1950 case OBJ_REQUEST_NODATA:
1951 break; /* Nothing to do */
1952 case OBJ_REQUEST_BIO:
1953 if (obj_request->bio_list)
1954 bio_chain_put(obj_request->bio_list);
1956 case OBJ_REQUEST_PAGES:
1957 if (obj_request->pages)
1958 ceph_release_page_vector(obj_request->pages,
1959 obj_request->page_count);
1963 kfree(obj_request->object_name);
1964 obj_request->object_name = NULL;
1965 kmem_cache_free(rbd_obj_request_cache, obj_request);
1968 /* It's OK to call this for a device with no parent */
1970 static void rbd_spec_put(struct rbd_spec *spec);
1971 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1973 rbd_dev_remove_parent(rbd_dev);
1974 rbd_spec_put(rbd_dev->parent_spec);
1975 rbd_dev->parent_spec = NULL;
1976 rbd_dev->parent_overlap = 0;
1980 * Parent image reference counting is used to determine when an
1981 * image's parent fields can be safely torn down--after there are no
1982 * more in-flight requests to the parent image. When the last
1983 * reference is dropped, cleaning them up is safe.
1985 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1989 if (!rbd_dev->parent_spec)
1992 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1996 /* Last reference; clean up parent data structures */
1999 rbd_dev_unparent(rbd_dev);
2001 rbd_warn(rbd_dev, "parent reference underflow\n");
2005 * If an image has a non-zero parent overlap, get a reference to its
2008 * We must get the reference before checking for the overlap to
2009 * coordinate properly with zeroing the parent overlap in
2010 * rbd_dev_v2_parent_info() when an image gets flattened. We
2011 * drop it again if there is no overlap.
2013 * Returns true if the rbd device has a parent with a non-zero
2014 * overlap and a reference for it was successfully taken, or
2017 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2021 if (!rbd_dev->parent_spec)
2024 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2025 if (counter > 0 && rbd_dev->parent_overlap)
2028 /* Image was flattened, but parent is not yet torn down */
2031 rbd_warn(rbd_dev, "parent reference overflow\n");
2037 * Caller is responsible for filling in the list of object requests
2038 * that comprises the image request, and the Linux request pointer
2039 * (if there is one).
2041 static struct rbd_img_request *rbd_img_request_create(
2042 struct rbd_device *rbd_dev,
2043 u64 offset, u64 length,
2046 struct rbd_img_request *img_request;
2048 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2052 if (write_request) {
2053 down_read(&rbd_dev->header_rwsem);
2054 ceph_get_snap_context(rbd_dev->header.snapc);
2055 up_read(&rbd_dev->header_rwsem);
2058 img_request->rq = NULL;
2059 img_request->rbd_dev = rbd_dev;
2060 img_request->offset = offset;
2061 img_request->length = length;
2062 img_request->flags = 0;
2063 if (write_request) {
2064 img_request_write_set(img_request);
2065 img_request->snapc = rbd_dev->header.snapc;
2067 img_request->snap_id = rbd_dev->spec->snap_id;
2069 if (rbd_dev_parent_get(rbd_dev))
2070 img_request_layered_set(img_request);
2071 spin_lock_init(&img_request->completion_lock);
2072 img_request->next_completion = 0;
2073 img_request->callback = NULL;
2074 img_request->result = 0;
2075 img_request->obj_request_count = 0;
2076 INIT_LIST_HEAD(&img_request->obj_requests);
2077 kref_init(&img_request->kref);
2079 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2080 write_request ? "write" : "read", offset, length,
2086 static void rbd_img_request_destroy(struct kref *kref)
2088 struct rbd_img_request *img_request;
2089 struct rbd_obj_request *obj_request;
2090 struct rbd_obj_request *next_obj_request;
2092 img_request = container_of(kref, struct rbd_img_request, kref);
2094 dout("%s: img %p\n", __func__, img_request);
2096 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2097 rbd_img_obj_request_del(img_request, obj_request);
2098 rbd_assert(img_request->obj_request_count == 0);
2100 if (img_request_layered_test(img_request)) {
2101 img_request_layered_clear(img_request);
2102 rbd_dev_parent_put(img_request->rbd_dev);
2105 if (img_request_write_test(img_request))
2106 ceph_put_snap_context(img_request->snapc);
2108 kmem_cache_free(rbd_img_request_cache, img_request);
2111 static struct rbd_img_request *rbd_parent_request_create(
2112 struct rbd_obj_request *obj_request,
2113 u64 img_offset, u64 length)
2115 struct rbd_img_request *parent_request;
2116 struct rbd_device *rbd_dev;
2118 rbd_assert(obj_request->img_request);
2119 rbd_dev = obj_request->img_request->rbd_dev;
2121 parent_request = rbd_img_request_create(rbd_dev->parent,
2122 img_offset, length, false);
2123 if (!parent_request)
2126 img_request_child_set(parent_request);
2127 rbd_obj_request_get(obj_request);
2128 parent_request->obj_request = obj_request;
2130 return parent_request;
2133 static void rbd_parent_request_destroy(struct kref *kref)
2135 struct rbd_img_request *parent_request;
2136 struct rbd_obj_request *orig_request;
2138 parent_request = container_of(kref, struct rbd_img_request, kref);
2139 orig_request = parent_request->obj_request;
2141 parent_request->obj_request = NULL;
2142 rbd_obj_request_put(orig_request);
2143 img_request_child_clear(parent_request);
2145 rbd_img_request_destroy(kref);
2148 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2150 struct rbd_img_request *img_request;
2151 unsigned int xferred;
2155 rbd_assert(obj_request_img_data_test(obj_request));
2156 img_request = obj_request->img_request;
2158 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2159 xferred = (unsigned int)obj_request->xferred;
2160 result = obj_request->result;
2162 struct rbd_device *rbd_dev = img_request->rbd_dev;
2164 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2165 img_request_write_test(img_request) ? "write" : "read",
2166 obj_request->length, obj_request->img_offset,
2167 obj_request->offset);
2168 rbd_warn(rbd_dev, " result %d xferred %x\n",
2170 if (!img_request->result)
2171 img_request->result = result;
2174 /* Image object requests don't own their page array */
2176 if (obj_request->type == OBJ_REQUEST_PAGES) {
2177 obj_request->pages = NULL;
2178 obj_request->page_count = 0;
2181 if (img_request_child_test(img_request)) {
2182 rbd_assert(img_request->obj_request != NULL);
2183 more = obj_request->which < img_request->obj_request_count - 1;
2185 rbd_assert(img_request->rq != NULL);
2186 more = blk_end_request(img_request->rq, result, xferred);
2192 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2194 struct rbd_img_request *img_request;
2195 u32 which = obj_request->which;
2198 rbd_assert(obj_request_img_data_test(obj_request));
2199 img_request = obj_request->img_request;
2201 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2202 rbd_assert(img_request != NULL);
2203 rbd_assert(img_request->obj_request_count > 0);
2204 rbd_assert(which != BAD_WHICH);
2205 rbd_assert(which < img_request->obj_request_count);
2207 spin_lock_irq(&img_request->completion_lock);
2208 if (which != img_request->next_completion)
2211 for_each_obj_request_from(img_request, obj_request) {
2213 rbd_assert(which < img_request->obj_request_count);
2215 if (!obj_request_done_test(obj_request))
2217 more = rbd_img_obj_end_request(obj_request);
2221 rbd_assert(more ^ (which == img_request->obj_request_count));
2222 img_request->next_completion = which;
2224 spin_unlock_irq(&img_request->completion_lock);
2225 rbd_img_request_put(img_request);
2228 rbd_img_request_complete(img_request);
2232 * Split up an image request into one or more object requests, each
2233 * to a different object. The "type" parameter indicates whether
2234 * "data_desc" is the pointer to the head of a list of bio
2235 * structures, or the base of a page array. In either case this
2236 * function assumes data_desc describes memory sufficient to hold
2237 * all data described by the image request.
2239 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2240 enum obj_request_type type,
2243 struct rbd_device *rbd_dev = img_request->rbd_dev;
2244 struct rbd_obj_request *obj_request = NULL;
2245 struct rbd_obj_request *next_obj_request;
2246 bool write_request = img_request_write_test(img_request);
2247 struct bio *bio_list = NULL;
2248 unsigned int bio_offset = 0;
2249 struct page **pages = NULL;
2254 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2255 (int)type, data_desc);
2257 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2258 img_offset = img_request->offset;
2259 resid = img_request->length;
2260 rbd_assert(resid > 0);
2262 if (type == OBJ_REQUEST_BIO) {
2263 bio_list = data_desc;
2264 rbd_assert(img_offset ==
2265 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2267 rbd_assert(type == OBJ_REQUEST_PAGES);
2272 struct ceph_osd_request *osd_req;
2273 const char *object_name;
2276 unsigned int which = 0;
2278 object_name = rbd_segment_name(rbd_dev, img_offset);
2281 offset = rbd_segment_offset(rbd_dev, img_offset);
2282 length = rbd_segment_length(rbd_dev, img_offset, resid);
2283 obj_request = rbd_obj_request_create(object_name,
2284 offset, length, type);
2285 /* object request has its own copy of the object name */
2286 rbd_segment_name_free(object_name);
2291 * set obj_request->img_request before creating the
2292 * osd_request so that it gets the right snapc
2294 rbd_img_obj_request_add(img_request, obj_request);
2296 if (type == OBJ_REQUEST_BIO) {
2297 unsigned int clone_size;
2299 rbd_assert(length <= (u64)UINT_MAX);
2300 clone_size = (unsigned int)length;
2301 obj_request->bio_list =
2302 bio_chain_clone_range(&bio_list,
2306 if (!obj_request->bio_list)
2309 unsigned int page_count;
2311 obj_request->pages = pages;
2312 page_count = (u32)calc_pages_for(offset, length);
2313 obj_request->page_count = page_count;
2314 if ((offset + length) & ~PAGE_MASK)
2315 page_count--; /* more on last page */
2316 pages += page_count;
2319 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2320 (write_request ? 2 : 1),
2324 obj_request->osd_req = osd_req;
2325 obj_request->callback = rbd_img_obj_callback;
2326 rbd_img_request_get(img_request);
2328 if (write_request) {
2329 osd_req_op_alloc_hint_init(osd_req, which,
2330 rbd_obj_bytes(&rbd_dev->header),
2331 rbd_obj_bytes(&rbd_dev->header));
2335 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2337 if (type == OBJ_REQUEST_BIO)
2338 osd_req_op_extent_osd_data_bio(osd_req, which,
2339 obj_request->bio_list, length);
2341 osd_req_op_extent_osd_data_pages(osd_req, which,
2342 obj_request->pages, length,
2343 offset & ~PAGE_MASK, false, false);
2346 rbd_osd_req_format_write(obj_request);
2348 rbd_osd_req_format_read(obj_request);
2350 obj_request->img_offset = img_offset;
2352 img_offset += length;
2359 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2360 rbd_img_obj_request_del(img_request, obj_request);
2366 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2368 struct rbd_img_request *img_request;
2369 struct rbd_device *rbd_dev;
2370 struct page **pages;
2373 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2374 rbd_assert(obj_request_img_data_test(obj_request));
2375 img_request = obj_request->img_request;
2376 rbd_assert(img_request);
2378 rbd_dev = img_request->rbd_dev;
2379 rbd_assert(rbd_dev);
2381 pages = obj_request->copyup_pages;
2382 rbd_assert(pages != NULL);
2383 obj_request->copyup_pages = NULL;
2384 page_count = obj_request->copyup_page_count;
2385 rbd_assert(page_count);
2386 obj_request->copyup_page_count = 0;
2387 ceph_release_page_vector(pages, page_count);
2390 * We want the transfer count to reflect the size of the
2391 * original write request. There is no such thing as a
2392 * successful short write, so if the request was successful
2393 * we can just set it to the originally-requested length.
2395 if (!obj_request->result)
2396 obj_request->xferred = obj_request->length;
2398 /* Finish up with the normal image object callback */
2400 rbd_img_obj_callback(obj_request);
2404 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2406 struct rbd_obj_request *orig_request;
2407 struct ceph_osd_request *osd_req;
2408 struct ceph_osd_client *osdc;
2409 struct rbd_device *rbd_dev;
2410 struct page **pages;
2417 rbd_assert(img_request_child_test(img_request));
2419 /* First get what we need from the image request */
2421 pages = img_request->copyup_pages;
2422 rbd_assert(pages != NULL);
2423 img_request->copyup_pages = NULL;
2424 page_count = img_request->copyup_page_count;
2425 rbd_assert(page_count);
2426 img_request->copyup_page_count = 0;
2428 orig_request = img_request->obj_request;
2429 rbd_assert(orig_request != NULL);
2430 rbd_assert(obj_request_type_valid(orig_request->type));
2431 img_result = img_request->result;
2432 parent_length = img_request->length;
2433 rbd_assert(parent_length == img_request->xferred);
2434 rbd_img_request_put(img_request);
2436 rbd_assert(orig_request->img_request);
2437 rbd_dev = orig_request->img_request->rbd_dev;
2438 rbd_assert(rbd_dev);
2441 * If the overlap has become 0 (most likely because the
2442 * image has been flattened) we need to free the pages
2443 * and re-submit the original write request.
2445 if (!rbd_dev->parent_overlap) {
2446 struct ceph_osd_client *osdc;
2448 ceph_release_page_vector(pages, page_count);
2449 osdc = &rbd_dev->rbd_client->client->osdc;
2450 img_result = rbd_obj_request_submit(osdc, orig_request);
2459 * The original osd request is of no use to use any more.
2460 * We need a new one that can hold the three ops in a copyup
2461 * request. Allocate the new copyup osd request for the
2462 * original request, and release the old one.
2464 img_result = -ENOMEM;
2465 osd_req = rbd_osd_req_create_copyup(orig_request);
2468 rbd_osd_req_destroy(orig_request->osd_req);
2469 orig_request->osd_req = osd_req;
2470 orig_request->copyup_pages = pages;
2471 orig_request->copyup_page_count = page_count;
2473 /* Initialize the copyup op */
2475 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2476 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2479 /* Then the hint op */
2481 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2482 rbd_obj_bytes(&rbd_dev->header));
2484 /* And the original write request op */
2486 offset = orig_request->offset;
2487 length = orig_request->length;
2488 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2489 offset, length, 0, 0);
2490 if (orig_request->type == OBJ_REQUEST_BIO)
2491 osd_req_op_extent_osd_data_bio(osd_req, 2,
2492 orig_request->bio_list, length);
2494 osd_req_op_extent_osd_data_pages(osd_req, 2,
2495 orig_request->pages, length,
2496 offset & ~PAGE_MASK, false, false);
2498 rbd_osd_req_format_write(orig_request);
2500 /* All set, send it off. */
2502 orig_request->callback = rbd_img_obj_copyup_callback;
2503 osdc = &rbd_dev->rbd_client->client->osdc;
2504 img_result = rbd_obj_request_submit(osdc, orig_request);
2508 /* Record the error code and complete the request */
2510 orig_request->result = img_result;
2511 orig_request->xferred = 0;
2512 obj_request_done_set(orig_request);
2513 rbd_obj_request_complete(orig_request);
2517 * Read from the parent image the range of data that covers the
2518 * entire target of the given object request. This is used for
2519 * satisfying a layered image write request when the target of an
2520 * object request from the image request does not exist.
2522 * A page array big enough to hold the returned data is allocated
2523 * and supplied to rbd_img_request_fill() as the "data descriptor."
2524 * When the read completes, this page array will be transferred to
2525 * the original object request for the copyup operation.
2527 * If an error occurs, record it as the result of the original
2528 * object request and mark it done so it gets completed.
2530 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2532 struct rbd_img_request *img_request = NULL;
2533 struct rbd_img_request *parent_request = NULL;
2534 struct rbd_device *rbd_dev;
2537 struct page **pages = NULL;
2541 rbd_assert(obj_request_img_data_test(obj_request));
2542 rbd_assert(obj_request_type_valid(obj_request->type));
2544 img_request = obj_request->img_request;
2545 rbd_assert(img_request != NULL);
2546 rbd_dev = img_request->rbd_dev;
2547 rbd_assert(rbd_dev->parent != NULL);
2550 * Determine the byte range covered by the object in the
2551 * child image to which the original request was to be sent.
2553 img_offset = obj_request->img_offset - obj_request->offset;
2554 length = (u64)1 << rbd_dev->header.obj_order;
2557 * There is no defined parent data beyond the parent
2558 * overlap, so limit what we read at that boundary if
2561 if (img_offset + length > rbd_dev->parent_overlap) {
2562 rbd_assert(img_offset < rbd_dev->parent_overlap);
2563 length = rbd_dev->parent_overlap - img_offset;
2567 * Allocate a page array big enough to receive the data read
2570 page_count = (u32)calc_pages_for(0, length);
2571 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2572 if (IS_ERR(pages)) {
2573 result = PTR_ERR(pages);
2579 parent_request = rbd_parent_request_create(obj_request,
2580 img_offset, length);
2581 if (!parent_request)
2584 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2587 parent_request->copyup_pages = pages;
2588 parent_request->copyup_page_count = page_count;
2590 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2591 result = rbd_img_request_submit(parent_request);
2595 parent_request->copyup_pages = NULL;
2596 parent_request->copyup_page_count = 0;
2597 parent_request->obj_request = NULL;
2598 rbd_obj_request_put(obj_request);
2601 ceph_release_page_vector(pages, page_count);
2603 rbd_img_request_put(parent_request);
2604 obj_request->result = result;
2605 obj_request->xferred = 0;
2606 obj_request_done_set(obj_request);
2611 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2613 struct rbd_obj_request *orig_request;
2614 struct rbd_device *rbd_dev;
2617 rbd_assert(!obj_request_img_data_test(obj_request));
2620 * All we need from the object request is the original
2621 * request and the result of the STAT op. Grab those, then
2622 * we're done with the request.
2624 orig_request = obj_request->obj_request;
2625 obj_request->obj_request = NULL;
2626 rbd_obj_request_put(orig_request);
2627 rbd_assert(orig_request);
2628 rbd_assert(orig_request->img_request);
2630 result = obj_request->result;
2631 obj_request->result = 0;
2633 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2634 obj_request, orig_request, result,
2635 obj_request->xferred, obj_request->length);
2636 rbd_obj_request_put(obj_request);
2639 * If the overlap has become 0 (most likely because the
2640 * image has been flattened) we need to free the pages
2641 * and re-submit the original write request.
2643 rbd_dev = orig_request->img_request->rbd_dev;
2644 if (!rbd_dev->parent_overlap) {
2645 struct ceph_osd_client *osdc;
2647 osdc = &rbd_dev->rbd_client->client->osdc;
2648 result = rbd_obj_request_submit(osdc, orig_request);
2654 * Our only purpose here is to determine whether the object
2655 * exists, and we don't want to treat the non-existence as
2656 * an error. If something else comes back, transfer the
2657 * error to the original request and complete it now.
2660 obj_request_existence_set(orig_request, true);
2661 } else if (result == -ENOENT) {
2662 obj_request_existence_set(orig_request, false);
2663 } else if (result) {
2664 orig_request->result = result;
2669 * Resubmit the original request now that we have recorded
2670 * whether the target object exists.
2672 orig_request->result = rbd_img_obj_request_submit(orig_request);
2674 if (orig_request->result)
2675 rbd_obj_request_complete(orig_request);
2678 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2680 struct rbd_obj_request *stat_request;
2681 struct rbd_device *rbd_dev;
2682 struct ceph_osd_client *osdc;
2683 struct page **pages = NULL;
2689 * The response data for a STAT call consists of:
2696 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2697 page_count = (u32)calc_pages_for(0, size);
2698 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2700 return PTR_ERR(pages);
2703 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2708 rbd_obj_request_get(obj_request);
2709 stat_request->obj_request = obj_request;
2710 stat_request->pages = pages;
2711 stat_request->page_count = page_count;
2713 rbd_assert(obj_request->img_request);
2714 rbd_dev = obj_request->img_request->rbd_dev;
2715 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2717 if (!stat_request->osd_req)
2719 stat_request->callback = rbd_img_obj_exists_callback;
2721 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2722 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2724 rbd_osd_req_format_read(stat_request);
2726 osdc = &rbd_dev->rbd_client->client->osdc;
2727 ret = rbd_obj_request_submit(osdc, stat_request);
2730 rbd_obj_request_put(obj_request);
2735 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2737 struct rbd_img_request *img_request;
2738 struct rbd_device *rbd_dev;
2741 rbd_assert(obj_request_img_data_test(obj_request));
2743 img_request = obj_request->img_request;
2744 rbd_assert(img_request);
2745 rbd_dev = img_request->rbd_dev;
2748 * Only writes to layered images need special handling.
2749 * Reads and non-layered writes are simple object requests.
2750 * Layered writes that start beyond the end of the overlap
2751 * with the parent have no parent data, so they too are
2752 * simple object requests. Finally, if the target object is
2753 * known to already exist, its parent data has already been
2754 * copied, so a write to the object can also be handled as a
2755 * simple object request.
2757 if (!img_request_write_test(img_request) ||
2758 !img_request_layered_test(img_request) ||
2759 !obj_request_overlaps_parent(obj_request) ||
2760 ((known = obj_request_known_test(obj_request)) &&
2761 obj_request_exists_test(obj_request))) {
2763 struct rbd_device *rbd_dev;
2764 struct ceph_osd_client *osdc;
2766 rbd_dev = obj_request->img_request->rbd_dev;
2767 osdc = &rbd_dev->rbd_client->client->osdc;
2769 return rbd_obj_request_submit(osdc, obj_request);
2773 * It's a layered write. The target object might exist but
2774 * we may not know that yet. If we know it doesn't exist,
2775 * start by reading the data for the full target object from
2776 * the parent so we can use it for a copyup to the target.
2779 return rbd_img_obj_parent_read_full(obj_request);
2781 /* We don't know whether the target exists. Go find out. */
2783 return rbd_img_obj_exists_submit(obj_request);
2786 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2788 struct rbd_obj_request *obj_request;
2789 struct rbd_obj_request *next_obj_request;
2791 dout("%s: img %p\n", __func__, img_request);
2792 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2795 ret = rbd_img_obj_request_submit(obj_request);
2803 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2805 struct rbd_obj_request *obj_request;
2806 struct rbd_device *rbd_dev;
2811 rbd_assert(img_request_child_test(img_request));
2813 /* First get what we need from the image request and release it */
2815 obj_request = img_request->obj_request;
2816 img_xferred = img_request->xferred;
2817 img_result = img_request->result;
2818 rbd_img_request_put(img_request);
2821 * If the overlap has become 0 (most likely because the
2822 * image has been flattened) we need to re-submit the
2825 rbd_assert(obj_request);
2826 rbd_assert(obj_request->img_request);
2827 rbd_dev = obj_request->img_request->rbd_dev;
2828 if (!rbd_dev->parent_overlap) {
2829 struct ceph_osd_client *osdc;
2831 osdc = &rbd_dev->rbd_client->client->osdc;
2832 img_result = rbd_obj_request_submit(osdc, obj_request);
2837 obj_request->result = img_result;
2838 if (obj_request->result)
2842 * We need to zero anything beyond the parent overlap
2843 * boundary. Since rbd_img_obj_request_read_callback()
2844 * will zero anything beyond the end of a short read, an
2845 * easy way to do this is to pretend the data from the
2846 * parent came up short--ending at the overlap boundary.
2848 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2849 obj_end = obj_request->img_offset + obj_request->length;
2850 if (obj_end > rbd_dev->parent_overlap) {
2853 if (obj_request->img_offset < rbd_dev->parent_overlap)
2854 xferred = rbd_dev->parent_overlap -
2855 obj_request->img_offset;
2857 obj_request->xferred = min(img_xferred, xferred);
2859 obj_request->xferred = img_xferred;
2862 rbd_img_obj_request_read_callback(obj_request);
2863 rbd_obj_request_complete(obj_request);
2866 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2868 struct rbd_img_request *img_request;
2871 rbd_assert(obj_request_img_data_test(obj_request));
2872 rbd_assert(obj_request->img_request != NULL);
2873 rbd_assert(obj_request->result == (s32) -ENOENT);
2874 rbd_assert(obj_request_type_valid(obj_request->type));
2876 /* rbd_read_finish(obj_request, obj_request->length); */
2877 img_request = rbd_parent_request_create(obj_request,
2878 obj_request->img_offset,
2879 obj_request->length);
2884 if (obj_request->type == OBJ_REQUEST_BIO)
2885 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2886 obj_request->bio_list);
2888 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2889 obj_request->pages);
2893 img_request->callback = rbd_img_parent_read_callback;
2894 result = rbd_img_request_submit(img_request);
2901 rbd_img_request_put(img_request);
2902 obj_request->result = result;
2903 obj_request->xferred = 0;
2904 obj_request_done_set(obj_request);
2907 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2909 struct rbd_obj_request *obj_request;
2910 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2913 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2914 OBJ_REQUEST_NODATA);
2919 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2921 if (!obj_request->osd_req)
2924 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2926 rbd_osd_req_format_read(obj_request);
2928 ret = rbd_obj_request_submit(osdc, obj_request);
2931 ret = rbd_obj_request_wait(obj_request);
2933 rbd_obj_request_put(obj_request);
2938 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2940 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2946 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2947 rbd_dev->header_name, (unsigned long long)notify_id,
2948 (unsigned int)opcode);
2949 ret = rbd_dev_refresh(rbd_dev);
2951 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2953 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2957 * Initiate a watch request, synchronously.
2959 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2961 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2962 struct rbd_obj_request *obj_request;
2965 rbd_assert(!rbd_dev->watch_event);
2966 rbd_assert(!rbd_dev->watch_request);
2968 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2969 &rbd_dev->watch_event);
2973 rbd_assert(rbd_dev->watch_event);
2975 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2976 OBJ_REQUEST_NODATA);
2982 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2984 if (!obj_request->osd_req) {
2989 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2991 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2992 rbd_dev->watch_event->cookie, 0, 1);
2993 rbd_osd_req_format_write(obj_request);
2995 ret = rbd_obj_request_submit(osdc, obj_request);
2999 ret = rbd_obj_request_wait(obj_request);
3003 ret = obj_request->result;
3008 * A watch request is set to linger, so the underlying osd
3009 * request won't go away until we unregister it. We retain
3010 * a pointer to the object request during that time (in
3011 * rbd_dev->watch_request), so we'll keep a reference to
3012 * it. We'll drop that reference (below) after we've
3015 rbd_dev->watch_request = obj_request;
3020 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
3022 rbd_obj_request_put(obj_request);
3024 ceph_osdc_cancel_event(rbd_dev->watch_event);
3025 rbd_dev->watch_event = NULL;
3031 * Tear down a watch request, synchronously.
3033 static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3035 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3036 struct rbd_obj_request *obj_request;
3039 rbd_assert(rbd_dev->watch_event);
3040 rbd_assert(rbd_dev->watch_request);
3042 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3043 OBJ_REQUEST_NODATA);
3049 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3051 if (!obj_request->osd_req) {
3056 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3057 rbd_dev->watch_event->cookie, 0, 0);
3058 rbd_osd_req_format_write(obj_request);
3060 ret = rbd_obj_request_submit(osdc, obj_request);
3064 ret = rbd_obj_request_wait(obj_request);
3068 ret = obj_request->result;
3072 /* We have successfully torn down the watch request */
3074 ceph_osdc_unregister_linger_request(osdc,
3075 rbd_dev->watch_request->osd_req);
3076 rbd_obj_request_put(rbd_dev->watch_request);
3077 rbd_dev->watch_request = NULL;
3080 rbd_obj_request_put(obj_request);
3082 ceph_osdc_cancel_event(rbd_dev->watch_event);
3083 rbd_dev->watch_event = NULL;
3088 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3092 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
3094 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3100 * Synchronous osd object method call. Returns the number of bytes
3101 * returned in the outbound buffer, or a negative error code.
3103 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3104 const char *object_name,
3105 const char *class_name,
3106 const char *method_name,
3107 const void *outbound,
3108 size_t outbound_size,
3110 size_t inbound_size)
3112 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3113 struct rbd_obj_request *obj_request;
3114 struct page **pages;
3119 * Method calls are ultimately read operations. The result
3120 * should placed into the inbound buffer provided. They
3121 * also supply outbound data--parameters for the object
3122 * method. Currently if this is present it will be a
3125 page_count = (u32)calc_pages_for(0, inbound_size);
3126 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3128 return PTR_ERR(pages);
3131 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3136 obj_request->pages = pages;
3137 obj_request->page_count = page_count;
3139 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3141 if (!obj_request->osd_req)
3144 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3145 class_name, method_name);
3146 if (outbound_size) {
3147 struct ceph_pagelist *pagelist;
3149 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3153 ceph_pagelist_init(pagelist);
3154 ceph_pagelist_append(pagelist, outbound, outbound_size);
3155 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3158 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3159 obj_request->pages, inbound_size,
3161 rbd_osd_req_format_read(obj_request);
3163 ret = rbd_obj_request_submit(osdc, obj_request);
3166 ret = rbd_obj_request_wait(obj_request);
3170 ret = obj_request->result;
3174 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3175 ret = (int)obj_request->xferred;
3176 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3179 rbd_obj_request_put(obj_request);
3181 ceph_release_page_vector(pages, page_count);
3186 static void rbd_request_fn(struct request_queue *q)
3187 __releases(q->queue_lock) __acquires(q->queue_lock)
3189 struct rbd_device *rbd_dev = q->queuedata;
3193 while ((rq = blk_fetch_request(q))) {
3194 bool write_request = rq_data_dir(rq) == WRITE;
3195 struct rbd_img_request *img_request;
3199 /* Ignore any non-FS requests that filter through. */
3201 if (rq->cmd_type != REQ_TYPE_FS) {
3202 dout("%s: non-fs request type %d\n", __func__,
3203 (int) rq->cmd_type);
3204 __blk_end_request_all(rq, 0);
3208 /* Ignore/skip any zero-length requests */
3210 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3211 length = (u64) blk_rq_bytes(rq);
3214 dout("%s: zero-length request\n", __func__);
3215 __blk_end_request_all(rq, 0);
3219 spin_unlock_irq(q->queue_lock);
3221 /* Disallow writes to a read-only device */
3223 if (write_request) {
3225 if (rbd_dev->mapping.read_only)
3227 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3231 * Quit early if the mapped snapshot no longer
3232 * exists. It's still possible the snapshot will
3233 * have disappeared by the time our request arrives
3234 * at the osd, but there's no sense in sending it if
3237 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3238 dout("request for non-existent snapshot");
3239 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3245 if (offset && length > U64_MAX - offset + 1) {
3246 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3248 goto end_request; /* Shouldn't happen */
3252 if (offset + length > rbd_dev->mapping.size) {
3253 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3254 offset, length, rbd_dev->mapping.size);
3259 img_request = rbd_img_request_create(rbd_dev, offset, length,
3264 img_request->rq = rq;
3266 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3269 result = rbd_img_request_submit(img_request);
3271 rbd_img_request_put(img_request);
3273 spin_lock_irq(q->queue_lock);
3275 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3276 write_request ? "write" : "read",
3277 length, offset, result);
3279 __blk_end_request_all(rq, result);
3285 * a queue callback. Makes sure that we don't create a bio that spans across
3286 * multiple osd objects. One exception would be with a single page bios,
3287 * which we handle later at bio_chain_clone_range()
3289 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3290 struct bio_vec *bvec)
3292 struct rbd_device *rbd_dev = q->queuedata;
3293 sector_t sector_offset;
3294 sector_t sectors_per_obj;
3295 sector_t obj_sector_offset;
3299 * Find how far into its rbd object the partition-relative
3300 * bio start sector is to offset relative to the enclosing
3303 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3304 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3305 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3308 * Compute the number of bytes from that offset to the end
3309 * of the object. Account for what's already used by the bio.
3311 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3312 if (ret > bmd->bi_size)
3313 ret -= bmd->bi_size;
3318 * Don't send back more than was asked for. And if the bio
3319 * was empty, let the whole thing through because: "Note
3320 * that a block device *must* allow a single page to be
3321 * added to an empty bio."
3323 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3324 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3325 ret = (int) bvec->bv_len;
3330 static void rbd_free_disk(struct rbd_device *rbd_dev)
3332 struct gendisk *disk = rbd_dev->disk;
3337 rbd_dev->disk = NULL;
3338 if (disk->flags & GENHD_FL_UP) {
3341 blk_cleanup_queue(disk->queue);
3346 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3347 const char *object_name,
3348 u64 offset, u64 length, void *buf)
3351 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3352 struct rbd_obj_request *obj_request;
3353 struct page **pages = NULL;
3358 page_count = (u32) calc_pages_for(offset, length);
3359 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3361 ret = PTR_ERR(pages);
3364 obj_request = rbd_obj_request_create(object_name, offset, length,
3369 obj_request->pages = pages;
3370 obj_request->page_count = page_count;
3372 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3374 if (!obj_request->osd_req)
3377 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3378 offset, length, 0, 0);
3379 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3381 obj_request->length,
3382 obj_request->offset & ~PAGE_MASK,
3384 rbd_osd_req_format_read(obj_request);
3386 ret = rbd_obj_request_submit(osdc, obj_request);
3389 ret = rbd_obj_request_wait(obj_request);
3393 ret = obj_request->result;
3397 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3398 size = (size_t) obj_request->xferred;
3399 ceph_copy_from_page_vector(pages, buf, 0, size);
3400 rbd_assert(size <= (size_t)INT_MAX);
3404 rbd_obj_request_put(obj_request);
3406 ceph_release_page_vector(pages, page_count);
3412 * Read the complete header for the given rbd device. On successful
3413 * return, the rbd_dev->header field will contain up-to-date
3414 * information about the image.
3416 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3418 struct rbd_image_header_ondisk *ondisk = NULL;
3425 * The complete header will include an array of its 64-bit
3426 * snapshot ids, followed by the names of those snapshots as
3427 * a contiguous block of NUL-terminated strings. Note that
3428 * the number of snapshots could change by the time we read
3429 * it in, in which case we re-read it.
3436 size = sizeof (*ondisk);
3437 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3439 ondisk = kmalloc(size, GFP_KERNEL);
3443 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3447 if ((size_t)ret < size) {
3449 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3453 if (!rbd_dev_ondisk_valid(ondisk)) {
3455 rbd_warn(rbd_dev, "invalid header");
3459 names_size = le64_to_cpu(ondisk->snap_names_len);
3460 want_count = snap_count;
3461 snap_count = le32_to_cpu(ondisk->snap_count);
3462 } while (snap_count != want_count);
3464 ret = rbd_header_from_disk(rbd_dev, ondisk);
3472 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3473 * has disappeared from the (just updated) snapshot context.
3475 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3479 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3482 snap_id = rbd_dev->spec->snap_id;
3483 if (snap_id == CEPH_NOSNAP)
3486 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3487 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3490 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3496 * Don't hold the lock while doing disk operations,
3497 * or lock ordering will conflict with the bdev mutex via:
3498 * rbd_add() -> blkdev_get() -> rbd_open()
3500 spin_lock_irq(&rbd_dev->lock);
3501 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3502 spin_unlock_irq(&rbd_dev->lock);
3504 * If the device is being removed, rbd_dev->disk has
3505 * been destroyed, so don't try to update its size
3508 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3509 dout("setting size to %llu sectors", (unsigned long long)size);
3510 set_capacity(rbd_dev->disk, size);
3511 revalidate_disk(rbd_dev->disk);
3515 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3520 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3521 down_write(&rbd_dev->header_rwsem);
3522 mapping_size = rbd_dev->mapping.size;
3523 if (rbd_dev->image_format == 1)
3524 ret = rbd_dev_v1_header_info(rbd_dev);
3526 ret = rbd_dev_v2_header_info(rbd_dev);
3528 /* If it's a mapped snapshot, validate its EXISTS flag */
3530 rbd_exists_validate(rbd_dev);
3531 up_write(&rbd_dev->header_rwsem);
3533 if (mapping_size != rbd_dev->mapping.size) {
3534 rbd_dev_update_size(rbd_dev);
3540 static int rbd_init_disk(struct rbd_device *rbd_dev)
3542 struct gendisk *disk;
3543 struct request_queue *q;
3546 /* create gendisk info */
3547 disk = alloc_disk(single_major ?
3548 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3549 RBD_MINORS_PER_MAJOR);
3553 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3555 disk->major = rbd_dev->major;
3556 disk->first_minor = rbd_dev->minor;
3558 disk->flags |= GENHD_FL_EXT_DEVT;
3559 disk->fops = &rbd_bd_ops;
3560 disk->private_data = rbd_dev;
3562 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3566 /* We use the default size, but let's be explicit about it. */
3567 blk_queue_physical_block_size(q, SECTOR_SIZE);
3569 /* set io sizes to object size */
3570 segment_size = rbd_obj_bytes(&rbd_dev->header);
3571 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3572 blk_queue_max_segment_size(q, segment_size);
3573 blk_queue_io_min(q, segment_size);
3574 blk_queue_io_opt(q, segment_size);
3576 blk_queue_merge_bvec(q, rbd_merge_bvec);
3579 q->queuedata = rbd_dev;
3581 rbd_dev->disk = disk;
3594 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3596 return container_of(dev, struct rbd_device, dev);
3599 static ssize_t rbd_size_show(struct device *dev,
3600 struct device_attribute *attr, char *buf)
3602 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3604 return sprintf(buf, "%llu\n",
3605 (unsigned long long)rbd_dev->mapping.size);
3609 * Note this shows the features for whatever's mapped, which is not
3610 * necessarily the base image.
3612 static ssize_t rbd_features_show(struct device *dev,
3613 struct device_attribute *attr, char *buf)
3615 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3617 return sprintf(buf, "0x%016llx\n",
3618 (unsigned long long)rbd_dev->mapping.features);
3621 static ssize_t rbd_major_show(struct device *dev,
3622 struct device_attribute *attr, char *buf)
3624 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3627 return sprintf(buf, "%d\n", rbd_dev->major);
3629 return sprintf(buf, "(none)\n");
3632 static ssize_t rbd_minor_show(struct device *dev,
3633 struct device_attribute *attr, char *buf)
3635 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3637 return sprintf(buf, "%d\n", rbd_dev->minor);
3640 static ssize_t rbd_client_id_show(struct device *dev,
3641 struct device_attribute *attr, char *buf)
3643 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3645 return sprintf(buf, "client%lld\n",
3646 ceph_client_id(rbd_dev->rbd_client->client));
3649 static ssize_t rbd_pool_show(struct device *dev,
3650 struct device_attribute *attr, char *buf)
3652 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3654 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3657 static ssize_t rbd_pool_id_show(struct device *dev,
3658 struct device_attribute *attr, char *buf)
3660 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3662 return sprintf(buf, "%llu\n",
3663 (unsigned long long) rbd_dev->spec->pool_id);
3666 static ssize_t rbd_name_show(struct device *dev,
3667 struct device_attribute *attr, char *buf)
3669 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3671 if (rbd_dev->spec->image_name)
3672 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3674 return sprintf(buf, "(unknown)\n");
3677 static ssize_t rbd_image_id_show(struct device *dev,
3678 struct device_attribute *attr, char *buf)
3680 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3682 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3686 * Shows the name of the currently-mapped snapshot (or
3687 * RBD_SNAP_HEAD_NAME for the base image).
3689 static ssize_t rbd_snap_show(struct device *dev,
3690 struct device_attribute *attr,
3693 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3695 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3699 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3700 * for the parent image. If there is no parent, simply shows
3701 * "(no parent image)".
3703 static ssize_t rbd_parent_show(struct device *dev,
3704 struct device_attribute *attr,
3707 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3708 struct rbd_spec *spec = rbd_dev->parent_spec;
3713 return sprintf(buf, "(no parent image)\n");
3715 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3716 (unsigned long long) spec->pool_id, spec->pool_name);
3721 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3722 spec->image_name ? spec->image_name : "(unknown)");
3727 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3728 (unsigned long long) spec->snap_id, spec->snap_name);
3733 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3738 return (ssize_t) (bufp - buf);
3741 static ssize_t rbd_image_refresh(struct device *dev,
3742 struct device_attribute *attr,
3746 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3749 ret = rbd_dev_refresh(rbd_dev);
3751 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3753 return ret < 0 ? ret : size;
3756 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3757 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3758 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3759 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3760 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3761 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3762 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3763 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3764 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3765 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3766 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3767 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3769 static struct attribute *rbd_attrs[] = {
3770 &dev_attr_size.attr,
3771 &dev_attr_features.attr,
3772 &dev_attr_major.attr,
3773 &dev_attr_minor.attr,
3774 &dev_attr_client_id.attr,
3775 &dev_attr_pool.attr,
3776 &dev_attr_pool_id.attr,
3777 &dev_attr_name.attr,
3778 &dev_attr_image_id.attr,
3779 &dev_attr_current_snap.attr,
3780 &dev_attr_parent.attr,
3781 &dev_attr_refresh.attr,
3785 static struct attribute_group rbd_attr_group = {
3789 static const struct attribute_group *rbd_attr_groups[] = {
3794 static void rbd_sysfs_dev_release(struct device *dev)
3798 static struct device_type rbd_device_type = {
3800 .groups = rbd_attr_groups,
3801 .release = rbd_sysfs_dev_release,
3804 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3806 kref_get(&spec->kref);
3811 static void rbd_spec_free(struct kref *kref);
3812 static void rbd_spec_put(struct rbd_spec *spec)
3815 kref_put(&spec->kref, rbd_spec_free);
3818 static struct rbd_spec *rbd_spec_alloc(void)
3820 struct rbd_spec *spec;
3822 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3825 kref_init(&spec->kref);
3830 static void rbd_spec_free(struct kref *kref)
3832 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3834 kfree(spec->pool_name);
3835 kfree(spec->image_id);
3836 kfree(spec->image_name);
3837 kfree(spec->snap_name);
3841 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3842 struct rbd_spec *spec)
3844 struct rbd_device *rbd_dev;
3846 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3850 spin_lock_init(&rbd_dev->lock);
3852 atomic_set(&rbd_dev->parent_ref, 0);
3853 INIT_LIST_HEAD(&rbd_dev->node);
3854 init_rwsem(&rbd_dev->header_rwsem);
3856 rbd_dev->spec = spec;
3857 rbd_dev->rbd_client = rbdc;
3859 /* Initialize the layout used for all rbd requests */
3861 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3862 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3863 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3864 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3869 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3871 rbd_put_client(rbd_dev->rbd_client);
3872 rbd_spec_put(rbd_dev->spec);
3877 * Get the size and object order for an image snapshot, or if
3878 * snap_id is CEPH_NOSNAP, gets this information for the base
3881 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3882 u8 *order, u64 *snap_size)
3884 __le64 snapid = cpu_to_le64(snap_id);
3889 } __attribute__ ((packed)) size_buf = { 0 };
3891 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3893 &snapid, sizeof (snapid),
3894 &size_buf, sizeof (size_buf));
3895 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3898 if (ret < sizeof (size_buf))
3902 *order = size_buf.order;
3903 dout(" order %u", (unsigned int)*order);
3905 *snap_size = le64_to_cpu(size_buf.size);
3907 dout(" snap_id 0x%016llx snap_size = %llu\n",
3908 (unsigned long long)snap_id,
3909 (unsigned long long)*snap_size);
3914 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3916 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3917 &rbd_dev->header.obj_order,
3918 &rbd_dev->header.image_size);
3921 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3927 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3931 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3932 "rbd", "get_object_prefix", NULL, 0,
3933 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3934 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3939 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3940 p + ret, NULL, GFP_NOIO);
3943 if (IS_ERR(rbd_dev->header.object_prefix)) {
3944 ret = PTR_ERR(rbd_dev->header.object_prefix);
3945 rbd_dev->header.object_prefix = NULL;
3947 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3955 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3958 __le64 snapid = cpu_to_le64(snap_id);
3962 } __attribute__ ((packed)) features_buf = { 0 };
3966 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3967 "rbd", "get_features",
3968 &snapid, sizeof (snapid),
3969 &features_buf, sizeof (features_buf));
3970 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3973 if (ret < sizeof (features_buf))
3976 incompat = le64_to_cpu(features_buf.incompat);
3977 if (incompat & ~RBD_FEATURES_SUPPORTED)
3980 *snap_features = le64_to_cpu(features_buf.features);
3982 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3983 (unsigned long long)snap_id,
3984 (unsigned long long)*snap_features,
3985 (unsigned long long)le64_to_cpu(features_buf.incompat));
3990 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3992 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3993 &rbd_dev->header.features);
3996 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3998 struct rbd_spec *parent_spec;
4000 void *reply_buf = NULL;
4010 parent_spec = rbd_spec_alloc();
4014 size = sizeof (__le64) + /* pool_id */
4015 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4016 sizeof (__le64) + /* snap_id */
4017 sizeof (__le64); /* overlap */
4018 reply_buf = kmalloc(size, GFP_KERNEL);
4024 snapid = cpu_to_le64(CEPH_NOSNAP);
4025 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4026 "rbd", "get_parent",
4027 &snapid, sizeof (snapid),
4029 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4034 end = reply_buf + ret;
4036 ceph_decode_64_safe(&p, end, pool_id, out_err);
4037 if (pool_id == CEPH_NOPOOL) {
4039 * Either the parent never existed, or we have
4040 * record of it but the image got flattened so it no
4041 * longer has a parent. When the parent of a
4042 * layered image disappears we immediately set the
4043 * overlap to 0. The effect of this is that all new
4044 * requests will be treated as if the image had no
4047 if (rbd_dev->parent_overlap) {
4048 rbd_dev->parent_overlap = 0;
4050 rbd_dev_parent_put(rbd_dev);
4051 pr_info("%s: clone image has been flattened\n",
4052 rbd_dev->disk->disk_name);
4055 goto out; /* No parent? No problem. */
4058 /* The ceph file layout needs to fit pool id in 32 bits */
4061 if (pool_id > (u64)U32_MAX) {
4062 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
4063 (unsigned long long)pool_id, U32_MAX);
4067 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4068 if (IS_ERR(image_id)) {
4069 ret = PTR_ERR(image_id);
4072 ceph_decode_64_safe(&p, end, snap_id, out_err);
4073 ceph_decode_64_safe(&p, end, overlap, out_err);
4076 * The parent won't change (except when the clone is
4077 * flattened, already handled that). So we only need to
4078 * record the parent spec we have not already done so.
4080 if (!rbd_dev->parent_spec) {
4081 parent_spec->pool_id = pool_id;
4082 parent_spec->image_id = image_id;
4083 parent_spec->snap_id = snap_id;
4084 rbd_dev->parent_spec = parent_spec;
4085 parent_spec = NULL; /* rbd_dev now owns this */
4089 * We always update the parent overlap. If it's zero we
4090 * treat it specially.
4092 rbd_dev->parent_overlap = overlap;
4096 /* A null parent_spec indicates it's the initial probe */
4100 * The overlap has become zero, so the clone
4101 * must have been resized down to 0 at some
4102 * point. Treat this the same as a flatten.
4104 rbd_dev_parent_put(rbd_dev);
4105 pr_info("%s: clone image now standalone\n",
4106 rbd_dev->disk->disk_name);
4109 * For the initial probe, if we find the
4110 * overlap is zero we just pretend there was
4113 rbd_warn(rbd_dev, "ignoring parent of "
4114 "clone with overlap 0\n");
4121 rbd_spec_put(parent_spec);
4126 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4130 __le64 stripe_count;
4131 } __attribute__ ((packed)) striping_info_buf = { 0 };
4132 size_t size = sizeof (striping_info_buf);
4139 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4140 "rbd", "get_stripe_unit_count", NULL, 0,
4141 (char *)&striping_info_buf, size);
4142 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4149 * We don't actually support the "fancy striping" feature
4150 * (STRIPINGV2) yet, but if the striping sizes are the
4151 * defaults the behavior is the same as before. So find
4152 * out, and only fail if the image has non-default values.
4155 obj_size = (u64)1 << rbd_dev->header.obj_order;
4156 p = &striping_info_buf;
4157 stripe_unit = ceph_decode_64(&p);
4158 if (stripe_unit != obj_size) {
4159 rbd_warn(rbd_dev, "unsupported stripe unit "
4160 "(got %llu want %llu)",
4161 stripe_unit, obj_size);
4164 stripe_count = ceph_decode_64(&p);
4165 if (stripe_count != 1) {
4166 rbd_warn(rbd_dev, "unsupported stripe count "
4167 "(got %llu want 1)", stripe_count);
4170 rbd_dev->header.stripe_unit = stripe_unit;
4171 rbd_dev->header.stripe_count = stripe_count;
4176 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4178 size_t image_id_size;
4183 void *reply_buf = NULL;
4185 char *image_name = NULL;
4188 rbd_assert(!rbd_dev->spec->image_name);
4190 len = strlen(rbd_dev->spec->image_id);
4191 image_id_size = sizeof (__le32) + len;
4192 image_id = kmalloc(image_id_size, GFP_KERNEL);
4197 end = image_id + image_id_size;
4198 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4200 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4201 reply_buf = kmalloc(size, GFP_KERNEL);
4205 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4206 "rbd", "dir_get_name",
4207 image_id, image_id_size,
4212 end = reply_buf + ret;
4214 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4215 if (IS_ERR(image_name))
4218 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4226 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4228 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4229 const char *snap_name;
4232 /* Skip over names until we find the one we are looking for */
4234 snap_name = rbd_dev->header.snap_names;
4235 while (which < snapc->num_snaps) {
4236 if (!strcmp(name, snap_name))
4237 return snapc->snaps[which];
4238 snap_name += strlen(snap_name) + 1;
4244 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4246 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4251 for (which = 0; !found && which < snapc->num_snaps; which++) {
4252 const char *snap_name;
4254 snap_id = snapc->snaps[which];
4255 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4256 if (IS_ERR(snap_name)) {
4257 /* ignore no-longer existing snapshots */
4258 if (PTR_ERR(snap_name) == -ENOENT)
4263 found = !strcmp(name, snap_name);
4266 return found ? snap_id : CEPH_NOSNAP;
4270 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4271 * no snapshot by that name is found, or if an error occurs.
4273 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4275 if (rbd_dev->image_format == 1)
4276 return rbd_v1_snap_id_by_name(rbd_dev, name);
4278 return rbd_v2_snap_id_by_name(rbd_dev, name);
4282 * When an rbd image has a parent image, it is identified by the
4283 * pool, image, and snapshot ids (not names). This function fills
4284 * in the names for those ids. (It's OK if we can't figure out the
4285 * name for an image id, but the pool and snapshot ids should always
4286 * exist and have names.) All names in an rbd spec are dynamically
4289 * When an image being mapped (not a parent) is probed, we have the
4290 * pool name and pool id, image name and image id, and the snapshot
4291 * name. The only thing we're missing is the snapshot id.
4293 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4295 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4296 struct rbd_spec *spec = rbd_dev->spec;
4297 const char *pool_name;
4298 const char *image_name;
4299 const char *snap_name;
4303 * An image being mapped will have the pool name (etc.), but
4304 * we need to look up the snapshot id.
4306 if (spec->pool_name) {
4307 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4310 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4311 if (snap_id == CEPH_NOSNAP)
4313 spec->snap_id = snap_id;
4315 spec->snap_id = CEPH_NOSNAP;
4321 /* Get the pool name; we have to make our own copy of this */
4323 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4325 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4328 pool_name = kstrdup(pool_name, GFP_KERNEL);
4332 /* Fetch the image name; tolerate failure here */
4334 image_name = rbd_dev_image_name(rbd_dev);
4336 rbd_warn(rbd_dev, "unable to get image name");
4338 /* Look up the snapshot name, and make a copy */
4340 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4341 if (IS_ERR(snap_name)) {
4342 ret = PTR_ERR(snap_name);
4346 spec->pool_name = pool_name;
4347 spec->image_name = image_name;
4348 spec->snap_name = snap_name;
4358 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4367 struct ceph_snap_context *snapc;
4371 * We'll need room for the seq value (maximum snapshot id),
4372 * snapshot count, and array of that many snapshot ids.
4373 * For now we have a fixed upper limit on the number we're
4374 * prepared to receive.
4376 size = sizeof (__le64) + sizeof (__le32) +
4377 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4378 reply_buf = kzalloc(size, GFP_KERNEL);
4382 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4383 "rbd", "get_snapcontext", NULL, 0,
4385 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4390 end = reply_buf + ret;
4392 ceph_decode_64_safe(&p, end, seq, out);
4393 ceph_decode_32_safe(&p, end, snap_count, out);
4396 * Make sure the reported number of snapshot ids wouldn't go
4397 * beyond the end of our buffer. But before checking that,
4398 * make sure the computed size of the snapshot context we
4399 * allocate is representable in a size_t.
4401 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4406 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4410 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4416 for (i = 0; i < snap_count; i++)
4417 snapc->snaps[i] = ceph_decode_64(&p);
4419 ceph_put_snap_context(rbd_dev->header.snapc);
4420 rbd_dev->header.snapc = snapc;
4422 dout(" snap context seq = %llu, snap_count = %u\n",
4423 (unsigned long long)seq, (unsigned int)snap_count);
4430 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4441 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4442 reply_buf = kmalloc(size, GFP_KERNEL);
4444 return ERR_PTR(-ENOMEM);
4446 snapid = cpu_to_le64(snap_id);
4447 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4448 "rbd", "get_snapshot_name",
4449 &snapid, sizeof (snapid),
4451 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4453 snap_name = ERR_PTR(ret);
4458 end = reply_buf + ret;
4459 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4460 if (IS_ERR(snap_name))
4463 dout(" snap_id 0x%016llx snap_name = %s\n",
4464 (unsigned long long)snap_id, snap_name);
4471 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4473 bool first_time = rbd_dev->header.object_prefix == NULL;
4476 ret = rbd_dev_v2_image_size(rbd_dev);
4481 ret = rbd_dev_v2_header_onetime(rbd_dev);
4487 * If the image supports layering, get the parent info. We
4488 * need to probe the first time regardless. Thereafter we
4489 * only need to if there's a parent, to see if it has
4490 * disappeared due to the mapped image getting flattened.
4492 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4493 (first_time || rbd_dev->parent_spec)) {
4496 ret = rbd_dev_v2_parent_info(rbd_dev);
4501 * Print a warning if this is the initial probe and
4502 * the image has a parent. Don't print it if the
4503 * image now being probed is itself a parent. We
4504 * can tell at this point because we won't know its
4505 * pool name yet (just its pool id).
4507 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4508 if (first_time && warn)
4509 rbd_warn(rbd_dev, "WARNING: kernel layering "
4510 "is EXPERIMENTAL!");
4513 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4514 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4515 rbd_dev->mapping.size = rbd_dev->header.image_size;
4517 ret = rbd_dev_v2_snap_context(rbd_dev);
4518 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4523 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4528 dev = &rbd_dev->dev;
4529 dev->bus = &rbd_bus_type;
4530 dev->type = &rbd_device_type;
4531 dev->parent = &rbd_root_dev;
4532 dev->release = rbd_dev_device_release;
4533 dev_set_name(dev, "%d", rbd_dev->dev_id);
4534 ret = device_register(dev);
4539 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4541 device_unregister(&rbd_dev->dev);
4545 * Get a unique rbd identifier for the given new rbd_dev, and add
4546 * the rbd_dev to the global list.
4548 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4552 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4553 0, minor_to_rbd_dev_id(1 << MINORBITS),
4558 rbd_dev->dev_id = new_dev_id;
4560 spin_lock(&rbd_dev_list_lock);
4561 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4562 spin_unlock(&rbd_dev_list_lock);
4564 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4570 * Remove an rbd_dev from the global list, and record that its
4571 * identifier is no longer in use.
4573 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4575 spin_lock(&rbd_dev_list_lock);
4576 list_del_init(&rbd_dev->node);
4577 spin_unlock(&rbd_dev_list_lock);
4579 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4581 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4585 * Skips over white space at *buf, and updates *buf to point to the
4586 * first found non-space character (if any). Returns the length of
4587 * the token (string of non-white space characters) found. Note
4588 * that *buf must be terminated with '\0'.
4590 static inline size_t next_token(const char **buf)
4593 * These are the characters that produce nonzero for
4594 * isspace() in the "C" and "POSIX" locales.
4596 const char *spaces = " \f\n\r\t\v";
4598 *buf += strspn(*buf, spaces); /* Find start of token */
4600 return strcspn(*buf, spaces); /* Return token length */
4604 * Finds the next token in *buf, and if the provided token buffer is
4605 * big enough, copies the found token into it. The result, if
4606 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4607 * must be terminated with '\0' on entry.
4609 * Returns the length of the token found (not including the '\0').
4610 * Return value will be 0 if no token is found, and it will be >=
4611 * token_size if the token would not fit.
4613 * The *buf pointer will be updated to point beyond the end of the
4614 * found token. Note that this occurs even if the token buffer is
4615 * too small to hold it.
4617 static inline size_t copy_token(const char **buf,
4623 len = next_token(buf);
4624 if (len < token_size) {
4625 memcpy(token, *buf, len);
4626 *(token + len) = '\0';
4634 * Finds the next token in *buf, dynamically allocates a buffer big
4635 * enough to hold a copy of it, and copies the token into the new
4636 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4637 * that a duplicate buffer is created even for a zero-length token.
4639 * Returns a pointer to the newly-allocated duplicate, or a null
4640 * pointer if memory for the duplicate was not available. If
4641 * the lenp argument is a non-null pointer, the length of the token
4642 * (not including the '\0') is returned in *lenp.
4644 * If successful, the *buf pointer will be updated to point beyond
4645 * the end of the found token.
4647 * Note: uses GFP_KERNEL for allocation.
4649 static inline char *dup_token(const char **buf, size_t *lenp)
4654 len = next_token(buf);
4655 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4658 *(dup + len) = '\0';
4668 * Parse the options provided for an "rbd add" (i.e., rbd image
4669 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4670 * and the data written is passed here via a NUL-terminated buffer.
4671 * Returns 0 if successful or an error code otherwise.
4673 * The information extracted from these options is recorded in
4674 * the other parameters which return dynamically-allocated
4677 * The address of a pointer that will refer to a ceph options
4678 * structure. Caller must release the returned pointer using
4679 * ceph_destroy_options() when it is no longer needed.
4681 * Address of an rbd options pointer. Fully initialized by
4682 * this function; caller must release with kfree().
4684 * Address of an rbd image specification pointer. Fully
4685 * initialized by this function based on parsed options.
4686 * Caller must release with rbd_spec_put().
4688 * The options passed take this form:
4689 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4692 * A comma-separated list of one or more monitor addresses.
4693 * A monitor address is an ip address, optionally followed
4694 * by a port number (separated by a colon).
4695 * I.e.: ip1[:port1][,ip2[:port2]...]
4697 * A comma-separated list of ceph and/or rbd options.
4699 * The name of the rados pool containing the rbd image.
4701 * The name of the image in that pool to map.
4703 * An optional snapshot id. If provided, the mapping will
4704 * present data from the image at the time that snapshot was
4705 * created. The image head is used if no snapshot id is
4706 * provided. Snapshot mappings are always read-only.
4708 static int rbd_add_parse_args(const char *buf,
4709 struct ceph_options **ceph_opts,
4710 struct rbd_options **opts,
4711 struct rbd_spec **rbd_spec)
4715 const char *mon_addrs;
4717 size_t mon_addrs_size;
4718 struct rbd_spec *spec = NULL;
4719 struct rbd_options *rbd_opts = NULL;
4720 struct ceph_options *copts;
4723 /* The first four tokens are required */
4725 len = next_token(&buf);
4727 rbd_warn(NULL, "no monitor address(es) provided");
4731 mon_addrs_size = len + 1;
4735 options = dup_token(&buf, NULL);
4739 rbd_warn(NULL, "no options provided");
4743 spec = rbd_spec_alloc();
4747 spec->pool_name = dup_token(&buf, NULL);
4748 if (!spec->pool_name)
4750 if (!*spec->pool_name) {
4751 rbd_warn(NULL, "no pool name provided");
4755 spec->image_name = dup_token(&buf, NULL);
4756 if (!spec->image_name)
4758 if (!*spec->image_name) {
4759 rbd_warn(NULL, "no image name provided");
4764 * Snapshot name is optional; default is to use "-"
4765 * (indicating the head/no snapshot).
4767 len = next_token(&buf);
4769 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4770 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4771 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4772 ret = -ENAMETOOLONG;
4775 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4778 *(snap_name + len) = '\0';
4779 spec->snap_name = snap_name;
4781 /* Initialize all rbd options to the defaults */
4783 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4787 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4789 copts = ceph_parse_options(options, mon_addrs,
4790 mon_addrs + mon_addrs_size - 1,
4791 parse_rbd_opts_token, rbd_opts);
4792 if (IS_ERR(copts)) {
4793 ret = PTR_ERR(copts);
4814 * Return pool id (>= 0) or a negative error code.
4816 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4819 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4824 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4825 if (ret == -ENOENT && tries++ < 1) {
4826 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4831 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4832 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4833 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4834 newest_epoch, timeout);
4837 /* the osdmap we have is new enough */
4846 * An rbd format 2 image has a unique identifier, distinct from the
4847 * name given to it by the user. Internally, that identifier is
4848 * what's used to specify the names of objects related to the image.
4850 * A special "rbd id" object is used to map an rbd image name to its
4851 * id. If that object doesn't exist, then there is no v2 rbd image
4852 * with the supplied name.
4854 * This function will record the given rbd_dev's image_id field if
4855 * it can be determined, and in that case will return 0. If any
4856 * errors occur a negative errno will be returned and the rbd_dev's
4857 * image_id field will be unchanged (and should be NULL).
4859 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4868 * When probing a parent image, the image id is already
4869 * known (and the image name likely is not). There's no
4870 * need to fetch the image id again in this case. We
4871 * do still need to set the image format though.
4873 if (rbd_dev->spec->image_id) {
4874 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4880 * First, see if the format 2 image id file exists, and if
4881 * so, get the image's persistent id from it.
4883 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4884 object_name = kmalloc(size, GFP_NOIO);
4887 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4888 dout("rbd id object name is %s\n", object_name);
4890 /* Response will be an encoded string, which includes a length */
4892 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4893 response = kzalloc(size, GFP_NOIO);
4899 /* If it doesn't exist we'll assume it's a format 1 image */
4901 ret = rbd_obj_method_sync(rbd_dev, object_name,
4902 "rbd", "get_id", NULL, 0,
4903 response, RBD_IMAGE_ID_LEN_MAX);
4904 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4905 if (ret == -ENOENT) {
4906 image_id = kstrdup("", GFP_KERNEL);
4907 ret = image_id ? 0 : -ENOMEM;
4909 rbd_dev->image_format = 1;
4910 } else if (ret > sizeof (__le32)) {
4913 image_id = ceph_extract_encoded_string(&p, p + ret,
4915 ret = PTR_ERR_OR_ZERO(image_id);
4917 rbd_dev->image_format = 2;
4923 rbd_dev->spec->image_id = image_id;
4924 dout("image_id is %s\n", image_id);
4934 * Undo whatever state changes are made by v1 or v2 header info
4937 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4939 struct rbd_image_header *header;
4941 /* Drop parent reference unless it's already been done (or none) */
4943 if (rbd_dev->parent_overlap)
4944 rbd_dev_parent_put(rbd_dev);
4946 /* Free dynamic fields from the header, then zero it out */
4948 header = &rbd_dev->header;
4949 ceph_put_snap_context(header->snapc);
4950 kfree(header->snap_sizes);
4951 kfree(header->snap_names);
4952 kfree(header->object_prefix);
4953 memset(header, 0, sizeof (*header));
4956 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4960 ret = rbd_dev_v2_object_prefix(rbd_dev);
4965 * Get the and check features for the image. Currently the
4966 * features are assumed to never change.
4968 ret = rbd_dev_v2_features(rbd_dev);
4972 /* If the image supports fancy striping, get its parameters */
4974 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4975 ret = rbd_dev_v2_striping_info(rbd_dev);
4979 /* No support for crypto and compression type format 2 images */
4983 rbd_dev->header.features = 0;
4984 kfree(rbd_dev->header.object_prefix);
4985 rbd_dev->header.object_prefix = NULL;
4990 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4992 struct rbd_device *parent = NULL;
4993 struct rbd_spec *parent_spec;
4994 struct rbd_client *rbdc;
4997 if (!rbd_dev->parent_spec)
5000 * We need to pass a reference to the client and the parent
5001 * spec when creating the parent rbd_dev. Images related by
5002 * parent/child relationships always share both.
5004 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5005 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5008 parent = rbd_dev_create(rbdc, parent_spec);
5012 ret = rbd_dev_image_probe(parent, false);
5015 rbd_dev->parent = parent;
5016 atomic_set(&rbd_dev->parent_ref, 1);
5021 rbd_dev_unparent(rbd_dev);
5022 kfree(rbd_dev->header_name);
5023 rbd_dev_destroy(parent);
5025 rbd_put_client(rbdc);
5026 rbd_spec_put(parent_spec);
5032 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5036 /* Get an id and fill in device name. */
5038 ret = rbd_dev_id_get(rbd_dev);
5042 BUILD_BUG_ON(DEV_NAME_LEN
5043 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5044 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5046 /* Record our major and minor device numbers. */
5048 if (!single_major) {
5049 ret = register_blkdev(0, rbd_dev->name);
5053 rbd_dev->major = ret;
5056 rbd_dev->major = rbd_major;
5057 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5060 /* Set up the blkdev mapping. */
5062 ret = rbd_init_disk(rbd_dev);
5064 goto err_out_blkdev;
5066 ret = rbd_dev_mapping_set(rbd_dev);
5069 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5070 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5072 ret = rbd_bus_add_dev(rbd_dev);
5074 goto err_out_mapping;
5076 /* Everything's ready. Announce the disk to the world. */
5078 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5079 add_disk(rbd_dev->disk);
5081 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5082 (unsigned long long) rbd_dev->mapping.size);
5087 rbd_dev_mapping_clear(rbd_dev);
5089 rbd_free_disk(rbd_dev);
5092 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5094 rbd_dev_id_put(rbd_dev);
5095 rbd_dev_mapping_clear(rbd_dev);
5100 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5102 struct rbd_spec *spec = rbd_dev->spec;
5105 /* Record the header object name for this rbd image. */
5107 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5109 if (rbd_dev->image_format == 1)
5110 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5112 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5114 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5115 if (!rbd_dev->header_name)
5118 if (rbd_dev->image_format == 1)
5119 sprintf(rbd_dev->header_name, "%s%s",
5120 spec->image_name, RBD_SUFFIX);
5122 sprintf(rbd_dev->header_name, "%s%s",
5123 RBD_HEADER_PREFIX, spec->image_id);
5127 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5129 rbd_dev_unprobe(rbd_dev);
5130 kfree(rbd_dev->header_name);
5131 rbd_dev->header_name = NULL;
5132 rbd_dev->image_format = 0;
5133 kfree(rbd_dev->spec->image_id);
5134 rbd_dev->spec->image_id = NULL;
5136 rbd_dev_destroy(rbd_dev);
5140 * Probe for the existence of the header object for the given rbd
5141 * device. If this image is the one being mapped (i.e., not a
5142 * parent), initiate a watch on its header object before using that
5143 * object to get detailed information about the rbd image.
5145 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5150 * Get the id from the image id object. Unless there's an
5151 * error, rbd_dev->spec->image_id will be filled in with
5152 * a dynamically-allocated string, and rbd_dev->image_format
5153 * will be set to either 1 or 2.
5155 ret = rbd_dev_image_id(rbd_dev);
5158 rbd_assert(rbd_dev->spec->image_id);
5159 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5161 ret = rbd_dev_header_name(rbd_dev);
5163 goto err_out_format;
5166 ret = rbd_dev_header_watch_sync(rbd_dev);
5168 goto out_header_name;
5171 if (rbd_dev->image_format == 1)
5172 ret = rbd_dev_v1_header_info(rbd_dev);
5174 ret = rbd_dev_v2_header_info(rbd_dev);
5178 ret = rbd_dev_spec_update(rbd_dev);
5182 ret = rbd_dev_probe_parent(rbd_dev);
5186 dout("discovered format %u image, header name is %s\n",
5187 rbd_dev->image_format, rbd_dev->header_name);
5191 rbd_dev_unprobe(rbd_dev);
5194 rbd_dev_header_unwatch_sync(rbd_dev);
5196 kfree(rbd_dev->header_name);
5197 rbd_dev->header_name = NULL;
5199 rbd_dev->image_format = 0;
5200 kfree(rbd_dev->spec->image_id);
5201 rbd_dev->spec->image_id = NULL;
5203 dout("probe failed, returning %d\n", ret);
5208 static ssize_t do_rbd_add(struct bus_type *bus,
5212 struct rbd_device *rbd_dev = NULL;
5213 struct ceph_options *ceph_opts = NULL;
5214 struct rbd_options *rbd_opts = NULL;
5215 struct rbd_spec *spec = NULL;
5216 struct rbd_client *rbdc;
5220 if (!try_module_get(THIS_MODULE))
5223 /* parse add command */
5224 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5226 goto err_out_module;
5227 read_only = rbd_opts->read_only;
5229 rbd_opts = NULL; /* done with this */
5231 rbdc = rbd_get_client(ceph_opts);
5238 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5240 goto err_out_client;
5241 spec->pool_id = (u64)rc;
5243 /* The ceph file layout needs to fit pool id in 32 bits */
5245 if (spec->pool_id > (u64)U32_MAX) {
5246 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5247 (unsigned long long)spec->pool_id, U32_MAX);
5249 goto err_out_client;
5252 rbd_dev = rbd_dev_create(rbdc, spec);
5254 goto err_out_client;
5255 rbdc = NULL; /* rbd_dev now owns this */
5256 spec = NULL; /* rbd_dev now owns this */
5258 rc = rbd_dev_image_probe(rbd_dev, true);
5260 goto err_out_rbd_dev;
5262 /* If we are mapping a snapshot it must be marked read-only */
5264 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5266 rbd_dev->mapping.read_only = read_only;
5268 rc = rbd_dev_device_setup(rbd_dev);
5271 * rbd_dev_header_unwatch_sync() can't be moved into
5272 * rbd_dev_image_release() without refactoring, see
5273 * commit 1f3ef78861ac.
5275 rbd_dev_header_unwatch_sync(rbd_dev);
5276 rbd_dev_image_release(rbd_dev);
5277 goto err_out_module;
5283 rbd_dev_destroy(rbd_dev);
5285 rbd_put_client(rbdc);
5289 module_put(THIS_MODULE);
5291 dout("Error adding device %s\n", buf);
5296 static ssize_t rbd_add(struct bus_type *bus,
5303 return do_rbd_add(bus, buf, count);
5306 static ssize_t rbd_add_single_major(struct bus_type *bus,
5310 return do_rbd_add(bus, buf, count);
5313 static void rbd_dev_device_release(struct device *dev)
5315 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5317 rbd_free_disk(rbd_dev);
5318 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5319 rbd_dev_mapping_clear(rbd_dev);
5321 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5322 rbd_dev_id_put(rbd_dev);
5323 rbd_dev_mapping_clear(rbd_dev);
5326 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5328 while (rbd_dev->parent) {
5329 struct rbd_device *first = rbd_dev;
5330 struct rbd_device *second = first->parent;
5331 struct rbd_device *third;
5334 * Follow to the parent with no grandparent and
5337 while (second && (third = second->parent)) {
5342 rbd_dev_image_release(second);
5343 first->parent = NULL;
5344 first->parent_overlap = 0;
5346 rbd_assert(first->parent_spec);
5347 rbd_spec_put(first->parent_spec);
5348 first->parent_spec = NULL;
5352 static ssize_t do_rbd_remove(struct bus_type *bus,
5356 struct rbd_device *rbd_dev = NULL;
5357 struct list_head *tmp;
5360 bool already = false;
5363 ret = kstrtoul(buf, 10, &ul);
5367 /* convert to int; abort if we lost anything in the conversion */
5373 spin_lock(&rbd_dev_list_lock);
5374 list_for_each(tmp, &rbd_dev_list) {
5375 rbd_dev = list_entry(tmp, struct rbd_device, node);
5376 if (rbd_dev->dev_id == dev_id) {
5382 spin_lock_irq(&rbd_dev->lock);
5383 if (rbd_dev->open_count)
5386 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5388 spin_unlock_irq(&rbd_dev->lock);
5390 spin_unlock(&rbd_dev_list_lock);
5391 if (ret < 0 || already)
5394 rbd_dev_header_unwatch_sync(rbd_dev);
5396 * flush remaining watch callbacks - these must be complete
5397 * before the osd_client is shutdown
5399 dout("%s: flushing notifies", __func__);
5400 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5403 * Don't free anything from rbd_dev->disk until after all
5404 * notifies are completely processed. Otherwise
5405 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5406 * in a potential use after free of rbd_dev->disk or rbd_dev.
5408 rbd_bus_del_dev(rbd_dev);
5409 rbd_dev_image_release(rbd_dev);
5410 module_put(THIS_MODULE);
5415 static ssize_t rbd_remove(struct bus_type *bus,
5422 return do_rbd_remove(bus, buf, count);
5425 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5429 return do_rbd_remove(bus, buf, count);
5433 * create control files in sysfs
5436 static int rbd_sysfs_init(void)
5440 ret = device_register(&rbd_root_dev);
5444 ret = bus_register(&rbd_bus_type);
5446 device_unregister(&rbd_root_dev);
5451 static void rbd_sysfs_cleanup(void)
5453 bus_unregister(&rbd_bus_type);
5454 device_unregister(&rbd_root_dev);
5457 static int rbd_slab_init(void)
5459 rbd_assert(!rbd_img_request_cache);
5460 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5461 sizeof (struct rbd_img_request),
5462 __alignof__(struct rbd_img_request),
5464 if (!rbd_img_request_cache)
5467 rbd_assert(!rbd_obj_request_cache);
5468 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5469 sizeof (struct rbd_obj_request),
5470 __alignof__(struct rbd_obj_request),
5472 if (!rbd_obj_request_cache)
5475 rbd_assert(!rbd_segment_name_cache);
5476 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5477 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5478 if (rbd_segment_name_cache)
5481 if (rbd_obj_request_cache) {
5482 kmem_cache_destroy(rbd_obj_request_cache);
5483 rbd_obj_request_cache = NULL;
5486 kmem_cache_destroy(rbd_img_request_cache);
5487 rbd_img_request_cache = NULL;
5492 static void rbd_slab_exit(void)
5494 rbd_assert(rbd_segment_name_cache);
5495 kmem_cache_destroy(rbd_segment_name_cache);
5496 rbd_segment_name_cache = NULL;
5498 rbd_assert(rbd_obj_request_cache);
5499 kmem_cache_destroy(rbd_obj_request_cache);
5500 rbd_obj_request_cache = NULL;
5502 rbd_assert(rbd_img_request_cache);
5503 kmem_cache_destroy(rbd_img_request_cache);
5504 rbd_img_request_cache = NULL;
5507 static int __init rbd_init(void)
5511 if (!libceph_compatible(NULL)) {
5512 rbd_warn(NULL, "libceph incompatibility (quitting)");
5516 rc = rbd_slab_init();
5521 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5522 if (rbd_major < 0) {
5528 rc = rbd_sysfs_init();
5530 goto err_out_blkdev;
5533 pr_info("loaded (major %d)\n", rbd_major);
5535 pr_info("loaded\n");
5541 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5547 static void __exit rbd_exit(void)
5549 ida_destroy(&rbd_dev_id_ida);
5550 rbd_sysfs_cleanup();
5552 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5556 module_init(rbd_init);
5557 module_exit(rbd_exit);
5559 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5560 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5561 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5562 /* following authorship retained from original osdblk.c */
5563 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5565 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5566 MODULE_LICENSE("GPL");