3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
548 static void rbd_release(struct gendisk *disk, fmode_t mode)
550 struct rbd_device *rbd_dev = disk->private_data;
551 unsigned long open_count_before;
553 spin_lock_irq(&rbd_dev->lock);
554 open_count_before = rbd_dev->open_count--;
555 spin_unlock_irq(&rbd_dev->lock);
556 rbd_assert(open_count_before > 0);
558 put_device(&rbd_dev->dev);
561 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
566 bool ro_changed = false;
568 /* get_user() may sleep, so call it before taking rbd_dev->lock */
569 if (get_user(val, (int __user *)(arg)))
572 ro = val ? true : false;
573 /* Snapshot doesn't allow to write*/
574 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
577 spin_lock_irq(&rbd_dev->lock);
578 /* prevent others open this device */
579 if (rbd_dev->open_count > 1) {
584 if (rbd_dev->mapping.read_only != ro) {
585 rbd_dev->mapping.read_only = ro;
590 spin_unlock_irq(&rbd_dev->lock);
591 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
592 if (ret == 0 && ro_changed)
593 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
598 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned int cmd, unsigned long arg)
601 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
606 ret = rbd_ioctl_set_ro(rbd_dev, arg);
616 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
617 unsigned int cmd, unsigned long arg)
619 return rbd_ioctl(bdev, mode, cmd, arg);
621 #endif /* CONFIG_COMPAT */
623 static const struct block_device_operations rbd_bd_ops = {
624 .owner = THIS_MODULE,
626 .release = rbd_release,
629 .compat_ioctl = rbd_compat_ioctl,
634 * Initialize an rbd client instance. Success or not, this function
635 * consumes ceph_opts. Caller holds client_mutex.
637 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
639 struct rbd_client *rbdc;
642 dout("%s:\n", __func__);
643 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
647 kref_init(&rbdc->kref);
648 INIT_LIST_HEAD(&rbdc->node);
650 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
651 if (IS_ERR(rbdc->client))
653 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
655 ret = ceph_open_session(rbdc->client);
659 spin_lock(&rbd_client_list_lock);
660 list_add_tail(&rbdc->node, &rbd_client_list);
661 spin_unlock(&rbd_client_list_lock);
663 dout("%s: rbdc %p\n", __func__, rbdc);
667 ceph_destroy_client(rbdc->client);
672 ceph_destroy_options(ceph_opts);
673 dout("%s: error %d\n", __func__, ret);
678 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
680 kref_get(&rbdc->kref);
686 * Find a ceph client with specific addr and configuration. If
687 * found, bump its reference count.
689 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
691 struct rbd_client *client_node;
694 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
697 spin_lock(&rbd_client_list_lock);
698 list_for_each_entry(client_node, &rbd_client_list, node) {
699 if (!ceph_compare_options(ceph_opts, client_node->client)) {
700 __rbd_get_client(client_node);
706 spin_unlock(&rbd_client_list_lock);
708 return found ? client_node : NULL;
718 /* string args above */
721 /* Boolean args above */
725 static match_table_t rbd_opts_tokens = {
727 /* string args above */
728 {Opt_read_only, "read_only"},
729 {Opt_read_only, "ro"}, /* Alternate spelling */
730 {Opt_read_write, "read_write"},
731 {Opt_read_write, "rw"}, /* Alternate spelling */
732 /* Boolean args above */
740 #define RBD_READ_ONLY_DEFAULT false
742 static int parse_rbd_opts_token(char *c, void *private)
744 struct rbd_options *rbd_opts = private;
745 substring_t argstr[MAX_OPT_ARGS];
746 int token, intval, ret;
748 token = match_token(c, rbd_opts_tokens, argstr);
752 if (token < Opt_last_int) {
753 ret = match_int(&argstr[0], &intval);
755 pr_err("bad mount option arg (not int) "
759 dout("got int token %d val %d\n", token, intval);
760 } else if (token > Opt_last_int && token < Opt_last_string) {
761 dout("got string token %d val %s\n", token,
763 } else if (token > Opt_last_string && token < Opt_last_bool) {
764 dout("got Boolean token %d\n", token);
766 dout("got token %d\n", token);
771 rbd_opts->read_only = true;
774 rbd_opts->read_only = false;
784 * Get a ceph client with specific addr and configuration, if one does
785 * not exist create it. Either way, ceph_opts is consumed by this
788 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
790 struct rbd_client *rbdc;
792 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
793 rbdc = rbd_client_find(ceph_opts);
794 if (rbdc) /* using an existing client */
795 ceph_destroy_options(ceph_opts);
797 rbdc = rbd_client_create(ceph_opts);
798 mutex_unlock(&client_mutex);
804 * Destroy ceph client
806 * Caller must hold rbd_client_list_lock.
808 static void rbd_client_release(struct kref *kref)
810 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
812 dout("%s: rbdc %p\n", __func__, rbdc);
813 spin_lock(&rbd_client_list_lock);
814 list_del(&rbdc->node);
815 spin_unlock(&rbd_client_list_lock);
817 ceph_destroy_client(rbdc->client);
822 * Drop reference to ceph client node. If it's not referenced anymore, release
825 static void rbd_put_client(struct rbd_client *rbdc)
828 kref_put(&rbdc->kref, rbd_client_release);
831 static bool rbd_image_format_valid(u32 image_format)
833 return image_format == 1 || image_format == 2;
836 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
841 /* The header has to start with the magic rbd header text */
842 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
845 /* The bio layer requires at least sector-sized I/O */
847 if (ondisk->options.order < SECTOR_SHIFT)
850 /* If we use u64 in a few spots we may be able to loosen this */
852 if (ondisk->options.order > 8 * sizeof (int) - 1)
856 * The size of a snapshot header has to fit in a size_t, and
857 * that limits the number of snapshots.
859 snap_count = le32_to_cpu(ondisk->snap_count);
860 size = SIZE_MAX - sizeof (struct ceph_snap_context);
861 if (snap_count > size / sizeof (__le64))
865 * Not only that, but the size of the entire the snapshot
866 * header must also be representable in a size_t.
868 size -= snap_count * sizeof (__le64);
869 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
876 * Fill an rbd image header with information from the given format 1
879 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
880 struct rbd_image_header_ondisk *ondisk)
882 struct rbd_image_header *header = &rbd_dev->header;
883 bool first_time = header->object_prefix == NULL;
884 struct ceph_snap_context *snapc;
885 char *object_prefix = NULL;
886 char *snap_names = NULL;
887 u64 *snap_sizes = NULL;
893 /* Allocate this now to avoid having to handle failure below */
898 len = strnlen(ondisk->object_prefix,
899 sizeof (ondisk->object_prefix));
900 object_prefix = kmalloc(len + 1, GFP_KERNEL);
903 memcpy(object_prefix, ondisk->object_prefix, len);
904 object_prefix[len] = '\0';
907 /* Allocate the snapshot context and fill it in */
909 snap_count = le32_to_cpu(ondisk->snap_count);
910 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
913 snapc->seq = le64_to_cpu(ondisk->snap_seq);
915 struct rbd_image_snap_ondisk *snaps;
916 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
918 /* We'll keep a copy of the snapshot names... */
920 if (snap_names_len > (u64)SIZE_MAX)
922 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
926 /* ...as well as the array of their sizes. */
928 size = snap_count * sizeof (*header->snap_sizes);
929 snap_sizes = kmalloc(size, GFP_KERNEL);
934 * Copy the names, and fill in each snapshot's id
937 * Note that rbd_dev_v1_header_info() guarantees the
938 * ondisk buffer we're working with has
939 * snap_names_len bytes beyond the end of the
940 * snapshot id array, this memcpy() is safe.
942 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
943 snaps = ondisk->snaps;
944 for (i = 0; i < snap_count; i++) {
945 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
946 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
950 /* We won't fail any more, fill in the header */
953 header->object_prefix = object_prefix;
954 header->obj_order = ondisk->options.order;
955 header->crypt_type = ondisk->options.crypt_type;
956 header->comp_type = ondisk->options.comp_type;
957 /* The rest aren't used for format 1 images */
958 header->stripe_unit = 0;
959 header->stripe_count = 0;
960 header->features = 0;
962 ceph_put_snap_context(header->snapc);
963 kfree(header->snap_names);
964 kfree(header->snap_sizes);
967 /* The remaining fields always get updated (when we refresh) */
969 header->image_size = le64_to_cpu(ondisk->image_size);
970 header->snapc = snapc;
971 header->snap_names = snap_names;
972 header->snap_sizes = snap_sizes;
974 /* Make sure mapping size is consistent with header info */
976 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
977 if (rbd_dev->mapping.size != header->image_size)
978 rbd_dev->mapping.size = header->image_size;
986 ceph_put_snap_context(snapc);
987 kfree(object_prefix);
992 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
994 const char *snap_name;
996 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
998 /* Skip over names until we find the one we are looking for */
1000 snap_name = rbd_dev->header.snap_names;
1002 snap_name += strlen(snap_name) + 1;
1004 return kstrdup(snap_name, GFP_KERNEL);
1008 * Snapshot id comparison function for use with qsort()/bsearch().
1009 * Note that result is for snapshots in *descending* order.
1011 static int snapid_compare_reverse(const void *s1, const void *s2)
1013 u64 snap_id1 = *(u64 *)s1;
1014 u64 snap_id2 = *(u64 *)s2;
1016 if (snap_id1 < snap_id2)
1018 return snap_id1 == snap_id2 ? 0 : -1;
1022 * Search a snapshot context to see if the given snapshot id is
1025 * Returns the position of the snapshot id in the array if it's found,
1026 * or BAD_SNAP_INDEX otherwise.
1028 * Note: The snapshot array is in kept sorted (by the osd) in
1029 * reverse order, highest snapshot id first.
1031 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1033 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1036 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1037 sizeof (snap_id), snapid_compare_reverse);
1039 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1042 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1046 const char *snap_name;
1048 which = rbd_dev_snap_index(rbd_dev, snap_id);
1049 if (which == BAD_SNAP_INDEX)
1050 return ERR_PTR(-ENOENT);
1052 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1053 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1056 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1058 if (snap_id == CEPH_NOSNAP)
1059 return RBD_SNAP_HEAD_NAME;
1061 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1062 if (rbd_dev->image_format == 1)
1063 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1065 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1068 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1071 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1072 if (snap_id == CEPH_NOSNAP) {
1073 *snap_size = rbd_dev->header.image_size;
1074 } else if (rbd_dev->image_format == 1) {
1077 which = rbd_dev_snap_index(rbd_dev, snap_id);
1078 if (which == BAD_SNAP_INDEX)
1081 *snap_size = rbd_dev->header.snap_sizes[which];
1086 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1095 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_features = rbd_dev->header.features;
1101 } else if (rbd_dev->image_format == 1) {
1102 *snap_features = 0; /* No features for format 1 */
1107 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1111 *snap_features = features;
1116 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1118 u64 snap_id = rbd_dev->spec->snap_id;
1123 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1126 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1130 rbd_dev->mapping.size = size;
1131 rbd_dev->mapping.features = features;
1136 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1138 rbd_dev->mapping.size = 0;
1139 rbd_dev->mapping.features = 0;
1142 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1149 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1152 segment = offset >> rbd_dev->header.obj_order;
1153 name_format = "%s.%012llx";
1154 if (rbd_dev->image_format == 2)
1155 name_format = "%s.%016llx";
1156 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1157 rbd_dev->header.object_prefix, segment);
1158 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1159 pr_err("error formatting segment name for #%llu (%d)\n",
1168 static void rbd_segment_name_free(const char *name)
1170 /* The explicit cast here is needed to drop the const qualifier */
1172 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1175 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1177 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1179 return offset & (segment_size - 1);
1182 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1183 u64 offset, u64 length)
1185 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1187 offset &= segment_size - 1;
1189 rbd_assert(length <= U64_MAX - offset);
1190 if (offset + length > segment_size)
1191 length = segment_size - offset;
1197 * returns the size of an object in the image
1199 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1201 return 1 << header->obj_order;
1208 static void bio_chain_put(struct bio *chain)
1214 chain = chain->bi_next;
1220 * zeros a bio chain, starting at specific offset
1222 static void zero_bio_chain(struct bio *chain, int start_ofs)
1225 struct bvec_iter iter;
1226 unsigned long flags;
1231 bio_for_each_segment(bv, chain, iter) {
1232 if (pos + bv.bv_len > start_ofs) {
1233 int remainder = max(start_ofs - pos, 0);
1234 buf = bvec_kmap_irq(&bv, &flags);
1235 memset(buf + remainder, 0,
1236 bv.bv_len - remainder);
1237 flush_dcache_page(bv.bv_page);
1238 bvec_kunmap_irq(buf, &flags);
1243 chain = chain->bi_next;
1248 * similar to zero_bio_chain(), zeros data defined by a page array,
1249 * starting at the given byte offset from the start of the array and
1250 * continuing up to the given end offset. The pages array is
1251 * assumed to be big enough to hold all bytes up to the end.
1253 static void zero_pages(struct page **pages, u64 offset, u64 end)
1255 struct page **page = &pages[offset >> PAGE_SHIFT];
1257 rbd_assert(end > offset);
1258 rbd_assert(end - offset <= (u64)SIZE_MAX);
1259 while (offset < end) {
1262 unsigned long flags;
1265 page_offset = offset & ~PAGE_MASK;
1266 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1267 local_irq_save(flags);
1268 kaddr = kmap_atomic(*page);
1269 memset(kaddr + page_offset, 0, length);
1270 flush_dcache_page(*page);
1271 kunmap_atomic(kaddr);
1272 local_irq_restore(flags);
1280 * Clone a portion of a bio, starting at the given byte offset
1281 * and continuing for the number of bytes indicated.
1283 static struct bio *bio_clone_range(struct bio *bio_src,
1284 unsigned int offset,
1290 bio = bio_clone(bio_src, gfpmask);
1292 return NULL; /* ENOMEM */
1294 bio_advance(bio, offset);
1295 bio->bi_iter.bi_size = len;
1301 * Clone a portion of a bio chain, starting at the given byte offset
1302 * into the first bio in the source chain and continuing for the
1303 * number of bytes indicated. The result is another bio chain of
1304 * exactly the given length, or a null pointer on error.
1306 * The bio_src and offset parameters are both in-out. On entry they
1307 * refer to the first source bio and the offset into that bio where
1308 * the start of data to be cloned is located.
1310 * On return, bio_src is updated to refer to the bio in the source
1311 * chain that contains first un-cloned byte, and *offset will
1312 * contain the offset of that byte within that bio.
1314 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1315 unsigned int *offset,
1319 struct bio *bi = *bio_src;
1320 unsigned int off = *offset;
1321 struct bio *chain = NULL;
1324 /* Build up a chain of clone bios up to the limit */
1326 if (!bi || off >= bi->bi_iter.bi_size || !len)
1327 return NULL; /* Nothing to clone */
1331 unsigned int bi_size;
1335 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1336 goto out_err; /* EINVAL; ran out of bio's */
1338 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1339 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1341 goto out_err; /* ENOMEM */
1344 end = &bio->bi_next;
1347 if (off == bi->bi_iter.bi_size) {
1358 bio_chain_put(chain);
1364 * The default/initial value for all object request flags is 0. For
1365 * each flag, once its value is set to 1 it is never reset to 0
1368 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1370 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1371 struct rbd_device *rbd_dev;
1373 rbd_dev = obj_request->img_request->rbd_dev;
1374 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1379 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1382 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1385 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1387 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1388 struct rbd_device *rbd_dev = NULL;
1390 if (obj_request_img_data_test(obj_request))
1391 rbd_dev = obj_request->img_request->rbd_dev;
1392 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1397 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1400 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1404 * This sets the KNOWN flag after (possibly) setting the EXISTS
1405 * flag. The latter is set based on the "exists" value provided.
1407 * Note that for our purposes once an object exists it never goes
1408 * away again. It's possible that the response from two existence
1409 * checks are separated by the creation of the target object, and
1410 * the first ("doesn't exist") response arrives *after* the second
1411 * ("does exist"). In that case we ignore the second one.
1413 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1417 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1418 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1422 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1425 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1428 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1434 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1436 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1438 return obj_request->img_offset <
1439 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1442 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1444 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1445 atomic_read(&obj_request->kref.refcount));
1446 kref_get(&obj_request->kref);
1449 static void rbd_obj_request_destroy(struct kref *kref);
1450 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1452 rbd_assert(obj_request != NULL);
1453 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1454 atomic_read(&obj_request->kref.refcount));
1455 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1458 static void rbd_img_request_get(struct rbd_img_request *img_request)
1460 dout("%s: img %p (was %d)\n", __func__, img_request,
1461 atomic_read(&img_request->kref.refcount));
1462 kref_get(&img_request->kref);
1465 static bool img_request_child_test(struct rbd_img_request *img_request);
1466 static void rbd_parent_request_destroy(struct kref *kref);
1467 static void rbd_img_request_destroy(struct kref *kref);
1468 static void rbd_img_request_put(struct rbd_img_request *img_request)
1470 rbd_assert(img_request != NULL);
1471 dout("%s: img %p (was %d)\n", __func__, img_request,
1472 atomic_read(&img_request->kref.refcount));
1473 if (img_request_child_test(img_request))
1474 kref_put(&img_request->kref, rbd_parent_request_destroy);
1476 kref_put(&img_request->kref, rbd_img_request_destroy);
1479 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1480 struct rbd_obj_request *obj_request)
1482 rbd_assert(obj_request->img_request == NULL);
1484 /* Image request now owns object's original reference */
1485 obj_request->img_request = img_request;
1486 obj_request->which = img_request->obj_request_count;
1487 rbd_assert(!obj_request_img_data_test(obj_request));
1488 obj_request_img_data_set(obj_request);
1489 rbd_assert(obj_request->which != BAD_WHICH);
1490 img_request->obj_request_count++;
1491 list_add_tail(&obj_request->links, &img_request->obj_requests);
1492 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1493 obj_request->which);
1496 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1497 struct rbd_obj_request *obj_request)
1499 rbd_assert(obj_request->which != BAD_WHICH);
1501 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1502 obj_request->which);
1503 list_del(&obj_request->links);
1504 rbd_assert(img_request->obj_request_count > 0);
1505 img_request->obj_request_count--;
1506 rbd_assert(obj_request->which == img_request->obj_request_count);
1507 obj_request->which = BAD_WHICH;
1508 rbd_assert(obj_request_img_data_test(obj_request));
1509 rbd_assert(obj_request->img_request == img_request);
1510 obj_request->img_request = NULL;
1511 obj_request->callback = NULL;
1512 rbd_obj_request_put(obj_request);
1515 static bool obj_request_type_valid(enum obj_request_type type)
1518 case OBJ_REQUEST_NODATA:
1519 case OBJ_REQUEST_BIO:
1520 case OBJ_REQUEST_PAGES:
1527 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1528 struct rbd_obj_request *obj_request)
1530 dout("%s %p\n", __func__, obj_request);
1531 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1534 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1536 dout("%s %p\n", __func__, obj_request);
1537 ceph_osdc_cancel_request(obj_request->osd_req);
1541 * Wait for an object request to complete. If interrupted, cancel the
1542 * underlying osd request.
1544 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1548 dout("%s %p\n", __func__, obj_request);
1550 ret = wait_for_completion_interruptible(&obj_request->completion);
1552 dout("%s %p interrupted\n", __func__, obj_request);
1553 rbd_obj_request_end(obj_request);
1557 dout("%s %p done\n", __func__, obj_request);
1561 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1564 dout("%s: img %p\n", __func__, img_request);
1567 * If no error occurred, compute the aggregate transfer
1568 * count for the image request. We could instead use
1569 * atomic64_cmpxchg() to update it as each object request
1570 * completes; not clear which way is better off hand.
1572 if (!img_request->result) {
1573 struct rbd_obj_request *obj_request;
1576 for_each_obj_request(img_request, obj_request)
1577 xferred += obj_request->xferred;
1578 img_request->xferred = xferred;
1581 if (img_request->callback)
1582 img_request->callback(img_request);
1584 rbd_img_request_put(img_request);
1588 * The default/initial value for all image request flags is 0. Each
1589 * is conditionally set to 1 at image request initialization time
1590 * and currently never change thereafter.
1592 static void img_request_write_set(struct rbd_img_request *img_request)
1594 set_bit(IMG_REQ_WRITE, &img_request->flags);
1598 static bool img_request_write_test(struct rbd_img_request *img_request)
1601 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1604 static void img_request_child_set(struct rbd_img_request *img_request)
1606 set_bit(IMG_REQ_CHILD, &img_request->flags);
1610 static void img_request_child_clear(struct rbd_img_request *img_request)
1612 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1616 static bool img_request_child_test(struct rbd_img_request *img_request)
1619 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1622 static void img_request_layered_set(struct rbd_img_request *img_request)
1624 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1628 static void img_request_layered_clear(struct rbd_img_request *img_request)
1630 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1634 static bool img_request_layered_test(struct rbd_img_request *img_request)
1637 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1641 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1643 u64 xferred = obj_request->xferred;
1644 u64 length = obj_request->length;
1646 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1647 obj_request, obj_request->img_request, obj_request->result,
1650 * ENOENT means a hole in the image. We zero-fill the entire
1651 * length of the request. A short read also implies zero-fill
1652 * to the end of the request. An error requires the whole
1653 * length of the request to be reported finished with an error
1654 * to the block layer. In each case we update the xferred
1655 * count to indicate the whole request was satisfied.
1657 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1658 if (obj_request->result == -ENOENT) {
1659 if (obj_request->type == OBJ_REQUEST_BIO)
1660 zero_bio_chain(obj_request->bio_list, 0);
1662 zero_pages(obj_request->pages, 0, length);
1663 obj_request->result = 0;
1664 } else if (xferred < length && !obj_request->result) {
1665 if (obj_request->type == OBJ_REQUEST_BIO)
1666 zero_bio_chain(obj_request->bio_list, xferred);
1668 zero_pages(obj_request->pages, xferred, length);
1670 obj_request->xferred = length;
1671 obj_request_done_set(obj_request);
1674 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1676 dout("%s: obj %p cb %p\n", __func__, obj_request,
1677 obj_request->callback);
1678 if (obj_request->callback)
1679 obj_request->callback(obj_request);
1681 complete_all(&obj_request->completion);
1684 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1686 dout("%s: obj %p\n", __func__, obj_request);
1687 obj_request_done_set(obj_request);
1690 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1692 struct rbd_img_request *img_request = NULL;
1693 struct rbd_device *rbd_dev = NULL;
1694 bool layered = false;
1696 if (obj_request_img_data_test(obj_request)) {
1697 img_request = obj_request->img_request;
1698 layered = img_request && img_request_layered_test(img_request);
1699 rbd_dev = img_request->rbd_dev;
1702 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1703 obj_request, img_request, obj_request->result,
1704 obj_request->xferred, obj_request->length);
1705 if (layered && obj_request->result == -ENOENT &&
1706 obj_request->img_offset < rbd_dev->parent_overlap)
1707 rbd_img_parent_read(obj_request);
1708 else if (img_request)
1709 rbd_img_obj_request_read_callback(obj_request);
1711 obj_request_done_set(obj_request);
1714 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1716 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1717 obj_request->result, obj_request->length);
1719 * There is no such thing as a successful short write. Set
1720 * it to our originally-requested length.
1722 obj_request->xferred = obj_request->length;
1723 obj_request_done_set(obj_request);
1727 * For a simple stat call there's nothing to do. We'll do more if
1728 * this is part of a write sequence for a layered image.
1730 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1732 dout("%s: obj %p\n", __func__, obj_request);
1733 obj_request_done_set(obj_request);
1736 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1737 struct ceph_msg *msg)
1739 struct rbd_obj_request *obj_request = osd_req->r_priv;
1742 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1743 rbd_assert(osd_req == obj_request->osd_req);
1744 if (obj_request_img_data_test(obj_request)) {
1745 rbd_assert(obj_request->img_request);
1746 rbd_assert(obj_request->which != BAD_WHICH);
1748 rbd_assert(obj_request->which == BAD_WHICH);
1751 if (osd_req->r_result < 0)
1752 obj_request->result = osd_req->r_result;
1754 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1757 * We support a 64-bit length, but ultimately it has to be
1758 * passed to blk_end_request(), which takes an unsigned int.
1760 obj_request->xferred = osd_req->r_reply_op_len[0];
1761 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1763 opcode = osd_req->r_ops[0].op;
1765 case CEPH_OSD_OP_READ:
1766 rbd_osd_read_callback(obj_request);
1768 case CEPH_OSD_OP_SETALLOCHINT:
1769 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1771 case CEPH_OSD_OP_WRITE:
1772 rbd_osd_write_callback(obj_request);
1774 case CEPH_OSD_OP_STAT:
1775 rbd_osd_stat_callback(obj_request);
1777 case CEPH_OSD_OP_CALL:
1778 case CEPH_OSD_OP_NOTIFY_ACK:
1779 case CEPH_OSD_OP_WATCH:
1780 rbd_osd_trivial_callback(obj_request);
1783 rbd_warn(NULL, "%s: unsupported op %hu\n",
1784 obj_request->object_name, (unsigned short) opcode);
1788 if (obj_request_done_test(obj_request))
1789 rbd_obj_request_complete(obj_request);
1792 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1794 struct rbd_img_request *img_request = obj_request->img_request;
1795 struct ceph_osd_request *osd_req = obj_request->osd_req;
1798 rbd_assert(osd_req != NULL);
1800 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1801 ceph_osdc_build_request(osd_req, obj_request->offset,
1802 NULL, snap_id, NULL);
1805 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1807 struct rbd_img_request *img_request = obj_request->img_request;
1808 struct ceph_osd_request *osd_req = obj_request->osd_req;
1809 struct ceph_snap_context *snapc;
1810 struct timespec mtime = CURRENT_TIME;
1812 rbd_assert(osd_req != NULL);
1814 snapc = img_request ? img_request->snapc : NULL;
1815 ceph_osdc_build_request(osd_req, obj_request->offset,
1816 snapc, CEPH_NOSNAP, &mtime);
1820 * Create an osd request. A read request has one osd op (read).
1821 * A write request has either one (watch) or two (hint+write) osd ops.
1822 * (All rbd data writes are prefixed with an allocation hint op, but
1823 * technically osd watch is a write request, hence this distinction.)
1825 static struct ceph_osd_request *rbd_osd_req_create(
1826 struct rbd_device *rbd_dev,
1828 unsigned int num_ops,
1829 struct rbd_obj_request *obj_request)
1831 struct ceph_snap_context *snapc = NULL;
1832 struct ceph_osd_client *osdc;
1833 struct ceph_osd_request *osd_req;
1835 if (obj_request_img_data_test(obj_request)) {
1836 struct rbd_img_request *img_request = obj_request->img_request;
1838 rbd_assert(write_request ==
1839 img_request_write_test(img_request));
1841 snapc = img_request->snapc;
1844 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1846 /* Allocate and initialize the request, for the num_ops ops */
1848 osdc = &rbd_dev->rbd_client->client->osdc;
1849 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1852 return NULL; /* ENOMEM */
1855 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1857 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1859 osd_req->r_callback = rbd_osd_req_callback;
1860 osd_req->r_priv = obj_request;
1862 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1863 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1869 * Create a copyup osd request based on the information in the
1870 * object request supplied. A copyup request has three osd ops,
1871 * a copyup method call, a hint op, and a write op.
1873 static struct ceph_osd_request *
1874 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1876 struct rbd_img_request *img_request;
1877 struct ceph_snap_context *snapc;
1878 struct rbd_device *rbd_dev;
1879 struct ceph_osd_client *osdc;
1880 struct ceph_osd_request *osd_req;
1882 rbd_assert(obj_request_img_data_test(obj_request));
1883 img_request = obj_request->img_request;
1884 rbd_assert(img_request);
1885 rbd_assert(img_request_write_test(img_request));
1887 /* Allocate and initialize the request, for the three ops */
1889 snapc = img_request->snapc;
1890 rbd_dev = img_request->rbd_dev;
1891 osdc = &rbd_dev->rbd_client->client->osdc;
1892 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1894 return NULL; /* ENOMEM */
1896 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1897 osd_req->r_callback = rbd_osd_req_callback;
1898 osd_req->r_priv = obj_request;
1900 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1901 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1907 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1909 ceph_osdc_put_request(osd_req);
1912 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1914 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1915 u64 offset, u64 length,
1916 enum obj_request_type type)
1918 struct rbd_obj_request *obj_request;
1922 rbd_assert(obj_request_type_valid(type));
1924 size = strlen(object_name) + 1;
1925 name = kmalloc(size, GFP_KERNEL);
1929 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1935 obj_request->object_name = memcpy(name, object_name, size);
1936 obj_request->offset = offset;
1937 obj_request->length = length;
1938 obj_request->flags = 0;
1939 obj_request->which = BAD_WHICH;
1940 obj_request->type = type;
1941 INIT_LIST_HEAD(&obj_request->links);
1942 init_completion(&obj_request->completion);
1943 kref_init(&obj_request->kref);
1945 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1946 offset, length, (int)type, obj_request);
1951 static void rbd_obj_request_destroy(struct kref *kref)
1953 struct rbd_obj_request *obj_request;
1955 obj_request = container_of(kref, struct rbd_obj_request, kref);
1957 dout("%s: obj %p\n", __func__, obj_request);
1959 rbd_assert(obj_request->img_request == NULL);
1960 rbd_assert(obj_request->which == BAD_WHICH);
1962 if (obj_request->osd_req)
1963 rbd_osd_req_destroy(obj_request->osd_req);
1965 rbd_assert(obj_request_type_valid(obj_request->type));
1966 switch (obj_request->type) {
1967 case OBJ_REQUEST_NODATA:
1968 break; /* Nothing to do */
1969 case OBJ_REQUEST_BIO:
1970 if (obj_request->bio_list)
1971 bio_chain_put(obj_request->bio_list);
1973 case OBJ_REQUEST_PAGES:
1974 if (obj_request->pages)
1975 ceph_release_page_vector(obj_request->pages,
1976 obj_request->page_count);
1980 kfree(obj_request->object_name);
1981 obj_request->object_name = NULL;
1982 kmem_cache_free(rbd_obj_request_cache, obj_request);
1985 /* It's OK to call this for a device with no parent */
1987 static void rbd_spec_put(struct rbd_spec *spec);
1988 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1990 rbd_dev_remove_parent(rbd_dev);
1991 rbd_spec_put(rbd_dev->parent_spec);
1992 rbd_dev->parent_spec = NULL;
1993 rbd_dev->parent_overlap = 0;
1997 * Parent image reference counting is used to determine when an
1998 * image's parent fields can be safely torn down--after there are no
1999 * more in-flight requests to the parent image. When the last
2000 * reference is dropped, cleaning them up is safe.
2002 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2006 if (!rbd_dev->parent_spec)
2009 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2013 /* Last reference; clean up parent data structures */
2016 rbd_dev_unparent(rbd_dev);
2018 rbd_warn(rbd_dev, "parent reference underflow\n");
2022 * If an image has a non-zero parent overlap, get a reference to its
2025 * We must get the reference before checking for the overlap to
2026 * coordinate properly with zeroing the parent overlap in
2027 * rbd_dev_v2_parent_info() when an image gets flattened. We
2028 * drop it again if there is no overlap.
2030 * Returns true if the rbd device has a parent with a non-zero
2031 * overlap and a reference for it was successfully taken, or
2034 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2038 if (!rbd_dev->parent_spec)
2041 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2042 if (counter > 0 && rbd_dev->parent_overlap)
2045 /* Image was flattened, but parent is not yet torn down */
2048 rbd_warn(rbd_dev, "parent reference overflow\n");
2054 * Caller is responsible for filling in the list of object requests
2055 * that comprises the image request, and the Linux request pointer
2056 * (if there is one).
2058 static struct rbd_img_request *rbd_img_request_create(
2059 struct rbd_device *rbd_dev,
2060 u64 offset, u64 length,
2063 struct rbd_img_request *img_request;
2065 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2069 if (write_request) {
2070 down_read(&rbd_dev->header_rwsem);
2071 ceph_get_snap_context(rbd_dev->header.snapc);
2072 up_read(&rbd_dev->header_rwsem);
2075 img_request->rq = NULL;
2076 img_request->rbd_dev = rbd_dev;
2077 img_request->offset = offset;
2078 img_request->length = length;
2079 img_request->flags = 0;
2080 if (write_request) {
2081 img_request_write_set(img_request);
2082 img_request->snapc = rbd_dev->header.snapc;
2084 img_request->snap_id = rbd_dev->spec->snap_id;
2086 if (rbd_dev_parent_get(rbd_dev))
2087 img_request_layered_set(img_request);
2088 spin_lock_init(&img_request->completion_lock);
2089 img_request->next_completion = 0;
2090 img_request->callback = NULL;
2091 img_request->result = 0;
2092 img_request->obj_request_count = 0;
2093 INIT_LIST_HEAD(&img_request->obj_requests);
2094 kref_init(&img_request->kref);
2096 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2097 write_request ? "write" : "read", offset, length,
2103 static void rbd_img_request_destroy(struct kref *kref)
2105 struct rbd_img_request *img_request;
2106 struct rbd_obj_request *obj_request;
2107 struct rbd_obj_request *next_obj_request;
2109 img_request = container_of(kref, struct rbd_img_request, kref);
2111 dout("%s: img %p\n", __func__, img_request);
2113 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2114 rbd_img_obj_request_del(img_request, obj_request);
2115 rbd_assert(img_request->obj_request_count == 0);
2117 if (img_request_layered_test(img_request)) {
2118 img_request_layered_clear(img_request);
2119 rbd_dev_parent_put(img_request->rbd_dev);
2122 if (img_request_write_test(img_request))
2123 ceph_put_snap_context(img_request->snapc);
2125 kmem_cache_free(rbd_img_request_cache, img_request);
2128 static struct rbd_img_request *rbd_parent_request_create(
2129 struct rbd_obj_request *obj_request,
2130 u64 img_offset, u64 length)
2132 struct rbd_img_request *parent_request;
2133 struct rbd_device *rbd_dev;
2135 rbd_assert(obj_request->img_request);
2136 rbd_dev = obj_request->img_request->rbd_dev;
2138 parent_request = rbd_img_request_create(rbd_dev->parent,
2139 img_offset, length, false);
2140 if (!parent_request)
2143 img_request_child_set(parent_request);
2144 rbd_obj_request_get(obj_request);
2145 parent_request->obj_request = obj_request;
2147 return parent_request;
2150 static void rbd_parent_request_destroy(struct kref *kref)
2152 struct rbd_img_request *parent_request;
2153 struct rbd_obj_request *orig_request;
2155 parent_request = container_of(kref, struct rbd_img_request, kref);
2156 orig_request = parent_request->obj_request;
2158 parent_request->obj_request = NULL;
2159 rbd_obj_request_put(orig_request);
2160 img_request_child_clear(parent_request);
2162 rbd_img_request_destroy(kref);
2165 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2167 struct rbd_img_request *img_request;
2168 unsigned int xferred;
2172 rbd_assert(obj_request_img_data_test(obj_request));
2173 img_request = obj_request->img_request;
2175 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2176 xferred = (unsigned int)obj_request->xferred;
2177 result = obj_request->result;
2179 struct rbd_device *rbd_dev = img_request->rbd_dev;
2181 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2182 img_request_write_test(img_request) ? "write" : "read",
2183 obj_request->length, obj_request->img_offset,
2184 obj_request->offset);
2185 rbd_warn(rbd_dev, " result %d xferred %x\n",
2187 if (!img_request->result)
2188 img_request->result = result;
2191 /* Image object requests don't own their page array */
2193 if (obj_request->type == OBJ_REQUEST_PAGES) {
2194 obj_request->pages = NULL;
2195 obj_request->page_count = 0;
2198 if (img_request_child_test(img_request)) {
2199 rbd_assert(img_request->obj_request != NULL);
2200 more = obj_request->which < img_request->obj_request_count - 1;
2202 rbd_assert(img_request->rq != NULL);
2203 more = blk_end_request(img_request->rq, result, xferred);
2209 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2211 struct rbd_img_request *img_request;
2212 u32 which = obj_request->which;
2215 rbd_assert(obj_request_img_data_test(obj_request));
2216 img_request = obj_request->img_request;
2218 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2219 rbd_assert(img_request != NULL);
2220 rbd_assert(img_request->obj_request_count > 0);
2221 rbd_assert(which != BAD_WHICH);
2222 rbd_assert(which < img_request->obj_request_count);
2224 spin_lock_irq(&img_request->completion_lock);
2225 if (which != img_request->next_completion)
2228 for_each_obj_request_from(img_request, obj_request) {
2230 rbd_assert(which < img_request->obj_request_count);
2232 if (!obj_request_done_test(obj_request))
2234 more = rbd_img_obj_end_request(obj_request);
2238 rbd_assert(more ^ (which == img_request->obj_request_count));
2239 img_request->next_completion = which;
2241 spin_unlock_irq(&img_request->completion_lock);
2242 rbd_img_request_put(img_request);
2245 rbd_img_request_complete(img_request);
2249 * Split up an image request into one or more object requests, each
2250 * to a different object. The "type" parameter indicates whether
2251 * "data_desc" is the pointer to the head of a list of bio
2252 * structures, or the base of a page array. In either case this
2253 * function assumes data_desc describes memory sufficient to hold
2254 * all data described by the image request.
2256 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2257 enum obj_request_type type,
2260 struct rbd_device *rbd_dev = img_request->rbd_dev;
2261 struct rbd_obj_request *obj_request = NULL;
2262 struct rbd_obj_request *next_obj_request;
2263 bool write_request = img_request_write_test(img_request);
2264 struct bio *bio_list = NULL;
2265 unsigned int bio_offset = 0;
2266 struct page **pages = NULL;
2271 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2272 (int)type, data_desc);
2274 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2275 img_offset = img_request->offset;
2276 resid = img_request->length;
2277 rbd_assert(resid > 0);
2279 if (type == OBJ_REQUEST_BIO) {
2280 bio_list = data_desc;
2281 rbd_assert(img_offset ==
2282 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2284 rbd_assert(type == OBJ_REQUEST_PAGES);
2289 struct ceph_osd_request *osd_req;
2290 const char *object_name;
2293 unsigned int which = 0;
2295 object_name = rbd_segment_name(rbd_dev, img_offset);
2298 offset = rbd_segment_offset(rbd_dev, img_offset);
2299 length = rbd_segment_length(rbd_dev, img_offset, resid);
2300 obj_request = rbd_obj_request_create(object_name,
2301 offset, length, type);
2302 /* object request has its own copy of the object name */
2303 rbd_segment_name_free(object_name);
2308 * set obj_request->img_request before creating the
2309 * osd_request so that it gets the right snapc
2311 rbd_img_obj_request_add(img_request, obj_request);
2313 if (type == OBJ_REQUEST_BIO) {
2314 unsigned int clone_size;
2316 rbd_assert(length <= (u64)UINT_MAX);
2317 clone_size = (unsigned int)length;
2318 obj_request->bio_list =
2319 bio_chain_clone_range(&bio_list,
2323 if (!obj_request->bio_list)
2326 unsigned int page_count;
2328 obj_request->pages = pages;
2329 page_count = (u32)calc_pages_for(offset, length);
2330 obj_request->page_count = page_count;
2331 if ((offset + length) & ~PAGE_MASK)
2332 page_count--; /* more on last page */
2333 pages += page_count;
2336 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2337 (write_request ? 2 : 1),
2341 obj_request->osd_req = osd_req;
2342 obj_request->callback = rbd_img_obj_callback;
2343 rbd_img_request_get(img_request);
2345 if (write_request) {
2346 osd_req_op_alloc_hint_init(osd_req, which,
2347 rbd_obj_bytes(&rbd_dev->header),
2348 rbd_obj_bytes(&rbd_dev->header));
2352 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2354 if (type == OBJ_REQUEST_BIO)
2355 osd_req_op_extent_osd_data_bio(osd_req, which,
2356 obj_request->bio_list, length);
2358 osd_req_op_extent_osd_data_pages(osd_req, which,
2359 obj_request->pages, length,
2360 offset & ~PAGE_MASK, false, false);
2363 rbd_osd_req_format_write(obj_request);
2365 rbd_osd_req_format_read(obj_request);
2367 obj_request->img_offset = img_offset;
2369 img_offset += length;
2376 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2377 rbd_img_obj_request_del(img_request, obj_request);
2383 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2385 struct rbd_img_request *img_request;
2386 struct rbd_device *rbd_dev;
2387 struct page **pages;
2390 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2391 rbd_assert(obj_request_img_data_test(obj_request));
2392 img_request = obj_request->img_request;
2393 rbd_assert(img_request);
2395 rbd_dev = img_request->rbd_dev;
2396 rbd_assert(rbd_dev);
2398 pages = obj_request->copyup_pages;
2399 rbd_assert(pages != NULL);
2400 obj_request->copyup_pages = NULL;
2401 page_count = obj_request->copyup_page_count;
2402 rbd_assert(page_count);
2403 obj_request->copyup_page_count = 0;
2404 ceph_release_page_vector(pages, page_count);
2407 * We want the transfer count to reflect the size of the
2408 * original write request. There is no such thing as a
2409 * successful short write, so if the request was successful
2410 * we can just set it to the originally-requested length.
2412 if (!obj_request->result)
2413 obj_request->xferred = obj_request->length;
2415 /* Finish up with the normal image object callback */
2417 rbd_img_obj_callback(obj_request);
2421 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2423 struct rbd_obj_request *orig_request;
2424 struct ceph_osd_request *osd_req;
2425 struct ceph_osd_client *osdc;
2426 struct rbd_device *rbd_dev;
2427 struct page **pages;
2434 rbd_assert(img_request_child_test(img_request));
2436 /* First get what we need from the image request */
2438 pages = img_request->copyup_pages;
2439 rbd_assert(pages != NULL);
2440 img_request->copyup_pages = NULL;
2441 page_count = img_request->copyup_page_count;
2442 rbd_assert(page_count);
2443 img_request->copyup_page_count = 0;
2445 orig_request = img_request->obj_request;
2446 rbd_assert(orig_request != NULL);
2447 rbd_assert(obj_request_type_valid(orig_request->type));
2448 img_result = img_request->result;
2449 parent_length = img_request->length;
2450 rbd_assert(parent_length == img_request->xferred);
2451 rbd_img_request_put(img_request);
2453 rbd_assert(orig_request->img_request);
2454 rbd_dev = orig_request->img_request->rbd_dev;
2455 rbd_assert(rbd_dev);
2458 * If the overlap has become 0 (most likely because the
2459 * image has been flattened) we need to free the pages
2460 * and re-submit the original write request.
2462 if (!rbd_dev->parent_overlap) {
2463 struct ceph_osd_client *osdc;
2465 ceph_release_page_vector(pages, page_count);
2466 osdc = &rbd_dev->rbd_client->client->osdc;
2467 img_result = rbd_obj_request_submit(osdc, orig_request);
2476 * The original osd request is of no use to use any more.
2477 * We need a new one that can hold the three ops in a copyup
2478 * request. Allocate the new copyup osd request for the
2479 * original request, and release the old one.
2481 img_result = -ENOMEM;
2482 osd_req = rbd_osd_req_create_copyup(orig_request);
2485 rbd_osd_req_destroy(orig_request->osd_req);
2486 orig_request->osd_req = osd_req;
2487 orig_request->copyup_pages = pages;
2488 orig_request->copyup_page_count = page_count;
2490 /* Initialize the copyup op */
2492 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2493 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2496 /* Then the hint op */
2498 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2499 rbd_obj_bytes(&rbd_dev->header));
2501 /* And the original write request op */
2503 offset = orig_request->offset;
2504 length = orig_request->length;
2505 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2506 offset, length, 0, 0);
2507 if (orig_request->type == OBJ_REQUEST_BIO)
2508 osd_req_op_extent_osd_data_bio(osd_req, 2,
2509 orig_request->bio_list, length);
2511 osd_req_op_extent_osd_data_pages(osd_req, 2,
2512 orig_request->pages, length,
2513 offset & ~PAGE_MASK, false, false);
2515 rbd_osd_req_format_write(orig_request);
2517 /* All set, send it off. */
2519 orig_request->callback = rbd_img_obj_copyup_callback;
2520 osdc = &rbd_dev->rbd_client->client->osdc;
2521 img_result = rbd_obj_request_submit(osdc, orig_request);
2525 /* Record the error code and complete the request */
2527 orig_request->result = img_result;
2528 orig_request->xferred = 0;
2529 obj_request_done_set(orig_request);
2530 rbd_obj_request_complete(orig_request);
2534 * Read from the parent image the range of data that covers the
2535 * entire target of the given object request. This is used for
2536 * satisfying a layered image write request when the target of an
2537 * object request from the image request does not exist.
2539 * A page array big enough to hold the returned data is allocated
2540 * and supplied to rbd_img_request_fill() as the "data descriptor."
2541 * When the read completes, this page array will be transferred to
2542 * the original object request for the copyup operation.
2544 * If an error occurs, record it as the result of the original
2545 * object request and mark it done so it gets completed.
2547 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2549 struct rbd_img_request *img_request = NULL;
2550 struct rbd_img_request *parent_request = NULL;
2551 struct rbd_device *rbd_dev;
2554 struct page **pages = NULL;
2558 rbd_assert(obj_request_img_data_test(obj_request));
2559 rbd_assert(obj_request_type_valid(obj_request->type));
2561 img_request = obj_request->img_request;
2562 rbd_assert(img_request != NULL);
2563 rbd_dev = img_request->rbd_dev;
2564 rbd_assert(rbd_dev->parent != NULL);
2567 * Determine the byte range covered by the object in the
2568 * child image to which the original request was to be sent.
2570 img_offset = obj_request->img_offset - obj_request->offset;
2571 length = (u64)1 << rbd_dev->header.obj_order;
2574 * There is no defined parent data beyond the parent
2575 * overlap, so limit what we read at that boundary if
2578 if (img_offset + length > rbd_dev->parent_overlap) {
2579 rbd_assert(img_offset < rbd_dev->parent_overlap);
2580 length = rbd_dev->parent_overlap - img_offset;
2584 * Allocate a page array big enough to receive the data read
2587 page_count = (u32)calc_pages_for(0, length);
2588 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2589 if (IS_ERR(pages)) {
2590 result = PTR_ERR(pages);
2596 parent_request = rbd_parent_request_create(obj_request,
2597 img_offset, length);
2598 if (!parent_request)
2601 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2604 parent_request->copyup_pages = pages;
2605 parent_request->copyup_page_count = page_count;
2607 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2608 result = rbd_img_request_submit(parent_request);
2612 parent_request->copyup_pages = NULL;
2613 parent_request->copyup_page_count = 0;
2614 parent_request->obj_request = NULL;
2615 rbd_obj_request_put(obj_request);
2618 ceph_release_page_vector(pages, page_count);
2620 rbd_img_request_put(parent_request);
2621 obj_request->result = result;
2622 obj_request->xferred = 0;
2623 obj_request_done_set(obj_request);
2628 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2630 struct rbd_obj_request *orig_request;
2631 struct rbd_device *rbd_dev;
2634 rbd_assert(!obj_request_img_data_test(obj_request));
2637 * All we need from the object request is the original
2638 * request and the result of the STAT op. Grab those, then
2639 * we're done with the request.
2641 orig_request = obj_request->obj_request;
2642 obj_request->obj_request = NULL;
2643 rbd_obj_request_put(orig_request);
2644 rbd_assert(orig_request);
2645 rbd_assert(orig_request->img_request);
2647 result = obj_request->result;
2648 obj_request->result = 0;
2650 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2651 obj_request, orig_request, result,
2652 obj_request->xferred, obj_request->length);
2653 rbd_obj_request_put(obj_request);
2656 * If the overlap has become 0 (most likely because the
2657 * image has been flattened) we need to free the pages
2658 * and re-submit the original write request.
2660 rbd_dev = orig_request->img_request->rbd_dev;
2661 if (!rbd_dev->parent_overlap) {
2662 struct ceph_osd_client *osdc;
2664 osdc = &rbd_dev->rbd_client->client->osdc;
2665 result = rbd_obj_request_submit(osdc, orig_request);
2671 * Our only purpose here is to determine whether the object
2672 * exists, and we don't want to treat the non-existence as
2673 * an error. If something else comes back, transfer the
2674 * error to the original request and complete it now.
2677 obj_request_existence_set(orig_request, true);
2678 } else if (result == -ENOENT) {
2679 obj_request_existence_set(orig_request, false);
2680 } else if (result) {
2681 orig_request->result = result;
2686 * Resubmit the original request now that we have recorded
2687 * whether the target object exists.
2689 orig_request->result = rbd_img_obj_request_submit(orig_request);
2691 if (orig_request->result)
2692 rbd_obj_request_complete(orig_request);
2695 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2697 struct rbd_obj_request *stat_request;
2698 struct rbd_device *rbd_dev;
2699 struct ceph_osd_client *osdc;
2700 struct page **pages = NULL;
2706 * The response data for a STAT call consists of:
2713 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2714 page_count = (u32)calc_pages_for(0, size);
2715 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2717 return PTR_ERR(pages);
2720 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2725 rbd_obj_request_get(obj_request);
2726 stat_request->obj_request = obj_request;
2727 stat_request->pages = pages;
2728 stat_request->page_count = page_count;
2730 rbd_assert(obj_request->img_request);
2731 rbd_dev = obj_request->img_request->rbd_dev;
2732 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2734 if (!stat_request->osd_req)
2736 stat_request->callback = rbd_img_obj_exists_callback;
2738 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2739 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2741 rbd_osd_req_format_read(stat_request);
2743 osdc = &rbd_dev->rbd_client->client->osdc;
2744 ret = rbd_obj_request_submit(osdc, stat_request);
2747 rbd_obj_request_put(obj_request);
2752 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2754 struct rbd_img_request *img_request;
2755 struct rbd_device *rbd_dev;
2758 rbd_assert(obj_request_img_data_test(obj_request));
2760 img_request = obj_request->img_request;
2761 rbd_assert(img_request);
2762 rbd_dev = img_request->rbd_dev;
2765 * Only writes to layered images need special handling.
2766 * Reads and non-layered writes are simple object requests.
2767 * Layered writes that start beyond the end of the overlap
2768 * with the parent have no parent data, so they too are
2769 * simple object requests. Finally, if the target object is
2770 * known to already exist, its parent data has already been
2771 * copied, so a write to the object can also be handled as a
2772 * simple object request.
2774 if (!img_request_write_test(img_request) ||
2775 !img_request_layered_test(img_request) ||
2776 !obj_request_overlaps_parent(obj_request) ||
2777 ((known = obj_request_known_test(obj_request)) &&
2778 obj_request_exists_test(obj_request))) {
2780 struct rbd_device *rbd_dev;
2781 struct ceph_osd_client *osdc;
2783 rbd_dev = obj_request->img_request->rbd_dev;
2784 osdc = &rbd_dev->rbd_client->client->osdc;
2786 return rbd_obj_request_submit(osdc, obj_request);
2790 * It's a layered write. The target object might exist but
2791 * we may not know that yet. If we know it doesn't exist,
2792 * start by reading the data for the full target object from
2793 * the parent so we can use it for a copyup to the target.
2796 return rbd_img_obj_parent_read_full(obj_request);
2798 /* We don't know whether the target exists. Go find out. */
2800 return rbd_img_obj_exists_submit(obj_request);
2803 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2805 struct rbd_obj_request *obj_request;
2806 struct rbd_obj_request *next_obj_request;
2808 dout("%s: img %p\n", __func__, img_request);
2809 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2812 ret = rbd_img_obj_request_submit(obj_request);
2820 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2822 struct rbd_obj_request *obj_request;
2823 struct rbd_device *rbd_dev;
2828 rbd_assert(img_request_child_test(img_request));
2830 /* First get what we need from the image request and release it */
2832 obj_request = img_request->obj_request;
2833 img_xferred = img_request->xferred;
2834 img_result = img_request->result;
2835 rbd_img_request_put(img_request);
2838 * If the overlap has become 0 (most likely because the
2839 * image has been flattened) we need to re-submit the
2842 rbd_assert(obj_request);
2843 rbd_assert(obj_request->img_request);
2844 rbd_dev = obj_request->img_request->rbd_dev;
2845 if (!rbd_dev->parent_overlap) {
2846 struct ceph_osd_client *osdc;
2848 osdc = &rbd_dev->rbd_client->client->osdc;
2849 img_result = rbd_obj_request_submit(osdc, obj_request);
2854 obj_request->result = img_result;
2855 if (obj_request->result)
2859 * We need to zero anything beyond the parent overlap
2860 * boundary. Since rbd_img_obj_request_read_callback()
2861 * will zero anything beyond the end of a short read, an
2862 * easy way to do this is to pretend the data from the
2863 * parent came up short--ending at the overlap boundary.
2865 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2866 obj_end = obj_request->img_offset + obj_request->length;
2867 if (obj_end > rbd_dev->parent_overlap) {
2870 if (obj_request->img_offset < rbd_dev->parent_overlap)
2871 xferred = rbd_dev->parent_overlap -
2872 obj_request->img_offset;
2874 obj_request->xferred = min(img_xferred, xferred);
2876 obj_request->xferred = img_xferred;
2879 rbd_img_obj_request_read_callback(obj_request);
2880 rbd_obj_request_complete(obj_request);
2883 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2885 struct rbd_img_request *img_request;
2888 rbd_assert(obj_request_img_data_test(obj_request));
2889 rbd_assert(obj_request->img_request != NULL);
2890 rbd_assert(obj_request->result == (s32) -ENOENT);
2891 rbd_assert(obj_request_type_valid(obj_request->type));
2893 /* rbd_read_finish(obj_request, obj_request->length); */
2894 img_request = rbd_parent_request_create(obj_request,
2895 obj_request->img_offset,
2896 obj_request->length);
2901 if (obj_request->type == OBJ_REQUEST_BIO)
2902 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2903 obj_request->bio_list);
2905 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2906 obj_request->pages);
2910 img_request->callback = rbd_img_parent_read_callback;
2911 result = rbd_img_request_submit(img_request);
2918 rbd_img_request_put(img_request);
2919 obj_request->result = result;
2920 obj_request->xferred = 0;
2921 obj_request_done_set(obj_request);
2924 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2926 struct rbd_obj_request *obj_request;
2927 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2930 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2931 OBJ_REQUEST_NODATA);
2936 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2938 if (!obj_request->osd_req)
2941 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2943 rbd_osd_req_format_read(obj_request);
2945 ret = rbd_obj_request_submit(osdc, obj_request);
2948 ret = rbd_obj_request_wait(obj_request);
2950 rbd_obj_request_put(obj_request);
2955 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2957 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2963 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2964 rbd_dev->header_name, (unsigned long long)notify_id,
2965 (unsigned int)opcode);
2966 ret = rbd_dev_refresh(rbd_dev);
2968 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2970 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2974 * Send a (un)watch request and wait for the ack. Return a request
2975 * with a ref held on success or error.
2977 static struct rbd_obj_request *rbd_obj_watch_request_helper(
2978 struct rbd_device *rbd_dev,
2981 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2982 struct rbd_obj_request *obj_request;
2985 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2986 OBJ_REQUEST_NODATA);
2988 return ERR_PTR(-ENOMEM);
2990 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2992 if (!obj_request->osd_req) {
2997 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2998 rbd_dev->watch_event->cookie, 0, watch);
2999 rbd_osd_req_format_write(obj_request);
3002 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3004 ret = rbd_obj_request_submit(osdc, obj_request);
3008 ret = rbd_obj_request_wait(obj_request);
3012 ret = obj_request->result;
3015 rbd_obj_request_end(obj_request);
3022 rbd_obj_request_put(obj_request);
3023 return ERR_PTR(ret);
3027 * Initiate a watch request, synchronously.
3029 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3031 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3032 struct rbd_obj_request *obj_request;
3035 rbd_assert(!rbd_dev->watch_event);
3036 rbd_assert(!rbd_dev->watch_request);
3038 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3039 &rbd_dev->watch_event);
3043 rbd_assert(rbd_dev->watch_event);
3045 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3046 OBJ_REQUEST_NODATA);
3052 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3054 if (!obj_request->osd_req) {
3059 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3061 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3062 rbd_dev->watch_event->cookie, 0, 1);
3063 rbd_osd_req_format_write(obj_request);
3065 ret = rbd_obj_request_submit(osdc, obj_request);
3069 ret = rbd_obj_request_wait(obj_request);
3073 ret = obj_request->result;
3078 * A watch request is set to linger, so the underlying osd
3079 * request won't go away until we unregister it. We retain
3080 * a pointer to the object request during that time (in
3081 * rbd_dev->watch_request), so we'll keep a reference to
3082 * it. We'll drop that reference (below) after we've
3085 rbd_dev->watch_request = obj_request;
3090 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
3092 rbd_obj_request_put(obj_request);
3094 ceph_osdc_cancel_event(rbd_dev->watch_event);
3095 rbd_dev->watch_event = NULL;
3101 * Tear down a watch request, synchronously.
3103 static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3105 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3106 struct rbd_obj_request *obj_request;
3109 rbd_assert(rbd_dev->watch_event);
3110 rbd_assert(rbd_dev->watch_request);
3112 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3113 OBJ_REQUEST_NODATA);
3119 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
3121 if (!obj_request->osd_req) {
3126 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3127 rbd_dev->watch_event->cookie, 0, 0);
3128 rbd_osd_req_format_write(obj_request);
3130 ret = rbd_obj_request_submit(osdc, obj_request);
3134 ret = rbd_obj_request_wait(obj_request);
3138 ret = obj_request->result;
3142 /* We have successfully torn down the watch request */
3144 ceph_osdc_unregister_linger_request(osdc,
3145 rbd_dev->watch_request->osd_req);
3146 rbd_obj_request_put(rbd_dev->watch_request);
3147 rbd_dev->watch_request = NULL;
3150 rbd_obj_request_put(obj_request);
3152 ceph_osdc_cancel_event(rbd_dev->watch_event);
3153 rbd_dev->watch_event = NULL;
3158 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3162 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
3164 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3170 * Synchronous osd object method call. Returns the number of bytes
3171 * returned in the outbound buffer, or a negative error code.
3173 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3174 const char *object_name,
3175 const char *class_name,
3176 const char *method_name,
3177 const void *outbound,
3178 size_t outbound_size,
3180 size_t inbound_size)
3182 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3183 struct rbd_obj_request *obj_request;
3184 struct page **pages;
3189 * Method calls are ultimately read operations. The result
3190 * should placed into the inbound buffer provided. They
3191 * also supply outbound data--parameters for the object
3192 * method. Currently if this is present it will be a
3195 page_count = (u32)calc_pages_for(0, inbound_size);
3196 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3198 return PTR_ERR(pages);
3201 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3206 obj_request->pages = pages;
3207 obj_request->page_count = page_count;
3209 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3211 if (!obj_request->osd_req)
3214 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3215 class_name, method_name);
3216 if (outbound_size) {
3217 struct ceph_pagelist *pagelist;
3219 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3223 ceph_pagelist_init(pagelist);
3224 ceph_pagelist_append(pagelist, outbound, outbound_size);
3225 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3228 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3229 obj_request->pages, inbound_size,
3231 rbd_osd_req_format_read(obj_request);
3233 ret = rbd_obj_request_submit(osdc, obj_request);
3236 ret = rbd_obj_request_wait(obj_request);
3240 ret = obj_request->result;
3244 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3245 ret = (int)obj_request->xferred;
3246 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3249 rbd_obj_request_put(obj_request);
3251 ceph_release_page_vector(pages, page_count);
3256 static void rbd_request_fn(struct request_queue *q)
3257 __releases(q->queue_lock) __acquires(q->queue_lock)
3259 struct rbd_device *rbd_dev = q->queuedata;
3263 while ((rq = blk_fetch_request(q))) {
3264 bool write_request = rq_data_dir(rq) == WRITE;
3265 struct rbd_img_request *img_request;
3269 /* Ignore any non-FS requests that filter through. */
3271 if (rq->cmd_type != REQ_TYPE_FS) {
3272 dout("%s: non-fs request type %d\n", __func__,
3273 (int) rq->cmd_type);
3274 __blk_end_request_all(rq, 0);
3278 /* Ignore/skip any zero-length requests */
3280 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3281 length = (u64) blk_rq_bytes(rq);
3284 dout("%s: zero-length request\n", __func__);
3285 __blk_end_request_all(rq, 0);
3289 spin_unlock_irq(q->queue_lock);
3291 /* Disallow writes to a read-only device */
3293 if (write_request) {
3295 if (rbd_dev->mapping.read_only)
3297 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3301 * Quit early if the mapped snapshot no longer
3302 * exists. It's still possible the snapshot will
3303 * have disappeared by the time our request arrives
3304 * at the osd, but there's no sense in sending it if
3307 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3308 dout("request for non-existent snapshot");
3309 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3315 if (offset && length > U64_MAX - offset + 1) {
3316 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3318 goto end_request; /* Shouldn't happen */
3322 if (offset + length > rbd_dev->mapping.size) {
3323 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3324 offset, length, rbd_dev->mapping.size);
3329 img_request = rbd_img_request_create(rbd_dev, offset, length,
3334 img_request->rq = rq;
3336 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3339 result = rbd_img_request_submit(img_request);
3341 rbd_img_request_put(img_request);
3343 spin_lock_irq(q->queue_lock);
3345 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3346 write_request ? "write" : "read",
3347 length, offset, result);
3349 __blk_end_request_all(rq, result);
3355 * a queue callback. Makes sure that we don't create a bio that spans across
3356 * multiple osd objects. One exception would be with a single page bios,
3357 * which we handle later at bio_chain_clone_range()
3359 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3360 struct bio_vec *bvec)
3362 struct rbd_device *rbd_dev = q->queuedata;
3363 sector_t sector_offset;
3364 sector_t sectors_per_obj;
3365 sector_t obj_sector_offset;
3369 * Find how far into its rbd object the partition-relative
3370 * bio start sector is to offset relative to the enclosing
3373 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3374 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3375 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3378 * Compute the number of bytes from that offset to the end
3379 * of the object. Account for what's already used by the bio.
3381 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3382 if (ret > bmd->bi_size)
3383 ret -= bmd->bi_size;
3388 * Don't send back more than was asked for. And if the bio
3389 * was empty, let the whole thing through because: "Note
3390 * that a block device *must* allow a single page to be
3391 * added to an empty bio."
3393 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3394 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3395 ret = (int) bvec->bv_len;
3400 static void rbd_free_disk(struct rbd_device *rbd_dev)
3402 struct gendisk *disk = rbd_dev->disk;
3407 rbd_dev->disk = NULL;
3408 if (disk->flags & GENHD_FL_UP) {
3411 blk_cleanup_queue(disk->queue);
3416 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3417 const char *object_name,
3418 u64 offset, u64 length, void *buf)
3421 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3422 struct rbd_obj_request *obj_request;
3423 struct page **pages = NULL;
3428 page_count = (u32) calc_pages_for(offset, length);
3429 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3431 ret = PTR_ERR(pages);
3434 obj_request = rbd_obj_request_create(object_name, offset, length,
3439 obj_request->pages = pages;
3440 obj_request->page_count = page_count;
3442 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3444 if (!obj_request->osd_req)
3447 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3448 offset, length, 0, 0);
3449 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3451 obj_request->length,
3452 obj_request->offset & ~PAGE_MASK,
3454 rbd_osd_req_format_read(obj_request);
3456 ret = rbd_obj_request_submit(osdc, obj_request);
3459 ret = rbd_obj_request_wait(obj_request);
3463 ret = obj_request->result;
3467 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3468 size = (size_t) obj_request->xferred;
3469 ceph_copy_from_page_vector(pages, buf, 0, size);
3470 rbd_assert(size <= (size_t)INT_MAX);
3474 rbd_obj_request_put(obj_request);
3476 ceph_release_page_vector(pages, page_count);
3482 * Read the complete header for the given rbd device. On successful
3483 * return, the rbd_dev->header field will contain up-to-date
3484 * information about the image.
3486 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3488 struct rbd_image_header_ondisk *ondisk = NULL;
3495 * The complete header will include an array of its 64-bit
3496 * snapshot ids, followed by the names of those snapshots as
3497 * a contiguous block of NUL-terminated strings. Note that
3498 * the number of snapshots could change by the time we read
3499 * it in, in which case we re-read it.
3506 size = sizeof (*ondisk);
3507 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3509 ondisk = kmalloc(size, GFP_KERNEL);
3513 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3517 if ((size_t)ret < size) {
3519 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3523 if (!rbd_dev_ondisk_valid(ondisk)) {
3525 rbd_warn(rbd_dev, "invalid header");
3529 names_size = le64_to_cpu(ondisk->snap_names_len);
3530 want_count = snap_count;
3531 snap_count = le32_to_cpu(ondisk->snap_count);
3532 } while (snap_count != want_count);
3534 ret = rbd_header_from_disk(rbd_dev, ondisk);
3542 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3543 * has disappeared from the (just updated) snapshot context.
3545 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3549 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3552 snap_id = rbd_dev->spec->snap_id;
3553 if (snap_id == CEPH_NOSNAP)
3556 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3557 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3560 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3566 * Don't hold the lock while doing disk operations,
3567 * or lock ordering will conflict with the bdev mutex via:
3568 * rbd_add() -> blkdev_get() -> rbd_open()
3570 spin_lock_irq(&rbd_dev->lock);
3571 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3572 spin_unlock_irq(&rbd_dev->lock);
3574 * If the device is being removed, rbd_dev->disk has
3575 * been destroyed, so don't try to update its size
3578 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3579 dout("setting size to %llu sectors", (unsigned long long)size);
3580 set_capacity(rbd_dev->disk, size);
3581 revalidate_disk(rbd_dev->disk);
3585 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3590 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3591 down_write(&rbd_dev->header_rwsem);
3592 mapping_size = rbd_dev->mapping.size;
3593 if (rbd_dev->image_format == 1)
3594 ret = rbd_dev_v1_header_info(rbd_dev);
3596 ret = rbd_dev_v2_header_info(rbd_dev);
3598 /* If it's a mapped snapshot, validate its EXISTS flag */
3600 rbd_exists_validate(rbd_dev);
3601 up_write(&rbd_dev->header_rwsem);
3603 if (mapping_size != rbd_dev->mapping.size) {
3604 rbd_dev_update_size(rbd_dev);
3610 static int rbd_init_disk(struct rbd_device *rbd_dev)
3612 struct gendisk *disk;
3613 struct request_queue *q;
3616 /* create gendisk info */
3617 disk = alloc_disk(single_major ?
3618 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3619 RBD_MINORS_PER_MAJOR);
3623 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3625 disk->major = rbd_dev->major;
3626 disk->first_minor = rbd_dev->minor;
3628 disk->flags |= GENHD_FL_EXT_DEVT;
3629 disk->fops = &rbd_bd_ops;
3630 disk->private_data = rbd_dev;
3632 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3636 /* We use the default size, but let's be explicit about it. */
3637 blk_queue_physical_block_size(q, SECTOR_SIZE);
3639 /* set io sizes to object size */
3640 segment_size = rbd_obj_bytes(&rbd_dev->header);
3641 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3642 blk_queue_max_segment_size(q, segment_size);
3643 blk_queue_io_min(q, segment_size);
3644 blk_queue_io_opt(q, segment_size);
3646 blk_queue_merge_bvec(q, rbd_merge_bvec);
3649 q->queuedata = rbd_dev;
3651 rbd_dev->disk = disk;
3664 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3666 return container_of(dev, struct rbd_device, dev);
3669 static ssize_t rbd_size_show(struct device *dev,
3670 struct device_attribute *attr, char *buf)
3672 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3674 return sprintf(buf, "%llu\n",
3675 (unsigned long long)rbd_dev->mapping.size);
3679 * Note this shows the features for whatever's mapped, which is not
3680 * necessarily the base image.
3682 static ssize_t rbd_features_show(struct device *dev,
3683 struct device_attribute *attr, char *buf)
3685 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3687 return sprintf(buf, "0x%016llx\n",
3688 (unsigned long long)rbd_dev->mapping.features);
3691 static ssize_t rbd_major_show(struct device *dev,
3692 struct device_attribute *attr, char *buf)
3694 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3697 return sprintf(buf, "%d\n", rbd_dev->major);
3699 return sprintf(buf, "(none)\n");
3702 static ssize_t rbd_minor_show(struct device *dev,
3703 struct device_attribute *attr, char *buf)
3705 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3707 return sprintf(buf, "%d\n", rbd_dev->minor);
3710 static ssize_t rbd_client_id_show(struct device *dev,
3711 struct device_attribute *attr, char *buf)
3713 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3715 return sprintf(buf, "client%lld\n",
3716 ceph_client_id(rbd_dev->rbd_client->client));
3719 static ssize_t rbd_pool_show(struct device *dev,
3720 struct device_attribute *attr, char *buf)
3722 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3724 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3727 static ssize_t rbd_pool_id_show(struct device *dev,
3728 struct device_attribute *attr, char *buf)
3730 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3732 return sprintf(buf, "%llu\n",
3733 (unsigned long long) rbd_dev->spec->pool_id);
3736 static ssize_t rbd_name_show(struct device *dev,
3737 struct device_attribute *attr, char *buf)
3739 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3741 if (rbd_dev->spec->image_name)
3742 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3744 return sprintf(buf, "(unknown)\n");
3747 static ssize_t rbd_image_id_show(struct device *dev,
3748 struct device_attribute *attr, char *buf)
3750 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3752 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3756 * Shows the name of the currently-mapped snapshot (or
3757 * RBD_SNAP_HEAD_NAME for the base image).
3759 static ssize_t rbd_snap_show(struct device *dev,
3760 struct device_attribute *attr,
3763 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3765 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3769 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3770 * for the parent image. If there is no parent, simply shows
3771 * "(no parent image)".
3773 static ssize_t rbd_parent_show(struct device *dev,
3774 struct device_attribute *attr,
3777 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3778 struct rbd_spec *spec = rbd_dev->parent_spec;
3783 return sprintf(buf, "(no parent image)\n");
3785 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3786 (unsigned long long) spec->pool_id, spec->pool_name);
3791 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3792 spec->image_name ? spec->image_name : "(unknown)");
3797 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3798 (unsigned long long) spec->snap_id, spec->snap_name);
3803 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3808 return (ssize_t) (bufp - buf);
3811 static ssize_t rbd_image_refresh(struct device *dev,
3812 struct device_attribute *attr,
3816 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3819 ret = rbd_dev_refresh(rbd_dev);
3821 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3823 return ret < 0 ? ret : size;
3826 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3827 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3828 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3829 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3830 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3831 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3832 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3833 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3834 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3835 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3836 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3837 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3839 static struct attribute *rbd_attrs[] = {
3840 &dev_attr_size.attr,
3841 &dev_attr_features.attr,
3842 &dev_attr_major.attr,
3843 &dev_attr_minor.attr,
3844 &dev_attr_client_id.attr,
3845 &dev_attr_pool.attr,
3846 &dev_attr_pool_id.attr,
3847 &dev_attr_name.attr,
3848 &dev_attr_image_id.attr,
3849 &dev_attr_current_snap.attr,
3850 &dev_attr_parent.attr,
3851 &dev_attr_refresh.attr,
3855 static struct attribute_group rbd_attr_group = {
3859 static const struct attribute_group *rbd_attr_groups[] = {
3864 static void rbd_sysfs_dev_release(struct device *dev)
3868 static struct device_type rbd_device_type = {
3870 .groups = rbd_attr_groups,
3871 .release = rbd_sysfs_dev_release,
3874 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3876 kref_get(&spec->kref);
3881 static void rbd_spec_free(struct kref *kref);
3882 static void rbd_spec_put(struct rbd_spec *spec)
3885 kref_put(&spec->kref, rbd_spec_free);
3888 static struct rbd_spec *rbd_spec_alloc(void)
3890 struct rbd_spec *spec;
3892 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3895 kref_init(&spec->kref);
3900 static void rbd_spec_free(struct kref *kref)
3902 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3904 kfree(spec->pool_name);
3905 kfree(spec->image_id);
3906 kfree(spec->image_name);
3907 kfree(spec->snap_name);
3911 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3912 struct rbd_spec *spec)
3914 struct rbd_device *rbd_dev;
3916 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3920 spin_lock_init(&rbd_dev->lock);
3922 atomic_set(&rbd_dev->parent_ref, 0);
3923 INIT_LIST_HEAD(&rbd_dev->node);
3924 init_rwsem(&rbd_dev->header_rwsem);
3926 rbd_dev->spec = spec;
3927 rbd_dev->rbd_client = rbdc;
3929 /* Initialize the layout used for all rbd requests */
3931 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3932 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3933 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3934 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3939 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3941 rbd_put_client(rbd_dev->rbd_client);
3942 rbd_spec_put(rbd_dev->spec);
3947 * Get the size and object order for an image snapshot, or if
3948 * snap_id is CEPH_NOSNAP, gets this information for the base
3951 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3952 u8 *order, u64 *snap_size)
3954 __le64 snapid = cpu_to_le64(snap_id);
3959 } __attribute__ ((packed)) size_buf = { 0 };
3961 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3963 &snapid, sizeof (snapid),
3964 &size_buf, sizeof (size_buf));
3965 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3968 if (ret < sizeof (size_buf))
3972 *order = size_buf.order;
3973 dout(" order %u", (unsigned int)*order);
3975 *snap_size = le64_to_cpu(size_buf.size);
3977 dout(" snap_id 0x%016llx snap_size = %llu\n",
3978 (unsigned long long)snap_id,
3979 (unsigned long long)*snap_size);
3984 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3986 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3987 &rbd_dev->header.obj_order,
3988 &rbd_dev->header.image_size);
3991 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3997 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4001 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4002 "rbd", "get_object_prefix", NULL, 0,
4003 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4004 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4009 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4010 p + ret, NULL, GFP_NOIO);
4013 if (IS_ERR(rbd_dev->header.object_prefix)) {
4014 ret = PTR_ERR(rbd_dev->header.object_prefix);
4015 rbd_dev->header.object_prefix = NULL;
4017 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4025 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4028 __le64 snapid = cpu_to_le64(snap_id);
4032 } __attribute__ ((packed)) features_buf = { 0 };
4036 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4037 "rbd", "get_features",
4038 &snapid, sizeof (snapid),
4039 &features_buf, sizeof (features_buf));
4040 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4043 if (ret < sizeof (features_buf))
4046 incompat = le64_to_cpu(features_buf.incompat);
4047 if (incompat & ~RBD_FEATURES_SUPPORTED)
4050 *snap_features = le64_to_cpu(features_buf.features);
4052 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4053 (unsigned long long)snap_id,
4054 (unsigned long long)*snap_features,
4055 (unsigned long long)le64_to_cpu(features_buf.incompat));
4060 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4062 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4063 &rbd_dev->header.features);
4066 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4068 struct rbd_spec *parent_spec;
4070 void *reply_buf = NULL;
4080 parent_spec = rbd_spec_alloc();
4084 size = sizeof (__le64) + /* pool_id */
4085 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4086 sizeof (__le64) + /* snap_id */
4087 sizeof (__le64); /* overlap */
4088 reply_buf = kmalloc(size, GFP_KERNEL);
4094 snapid = cpu_to_le64(CEPH_NOSNAP);
4095 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4096 "rbd", "get_parent",
4097 &snapid, sizeof (snapid),
4099 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4104 end = reply_buf + ret;
4106 ceph_decode_64_safe(&p, end, pool_id, out_err);
4107 if (pool_id == CEPH_NOPOOL) {
4109 * Either the parent never existed, or we have
4110 * record of it but the image got flattened so it no
4111 * longer has a parent. When the parent of a
4112 * layered image disappears we immediately set the
4113 * overlap to 0. The effect of this is that all new
4114 * requests will be treated as if the image had no
4117 if (rbd_dev->parent_overlap) {
4118 rbd_dev->parent_overlap = 0;
4120 rbd_dev_parent_put(rbd_dev);
4121 pr_info("%s: clone image has been flattened\n",
4122 rbd_dev->disk->disk_name);
4125 goto out; /* No parent? No problem. */
4128 /* The ceph file layout needs to fit pool id in 32 bits */
4131 if (pool_id > (u64)U32_MAX) {
4132 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
4133 (unsigned long long)pool_id, U32_MAX);
4137 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4138 if (IS_ERR(image_id)) {
4139 ret = PTR_ERR(image_id);
4142 ceph_decode_64_safe(&p, end, snap_id, out_err);
4143 ceph_decode_64_safe(&p, end, overlap, out_err);
4146 * The parent won't change (except when the clone is
4147 * flattened, already handled that). So we only need to
4148 * record the parent spec we have not already done so.
4150 if (!rbd_dev->parent_spec) {
4151 parent_spec->pool_id = pool_id;
4152 parent_spec->image_id = image_id;
4153 parent_spec->snap_id = snap_id;
4154 rbd_dev->parent_spec = parent_spec;
4155 parent_spec = NULL; /* rbd_dev now owns this */
4159 * We always update the parent overlap. If it's zero we
4160 * treat it specially.
4162 rbd_dev->parent_overlap = overlap;
4166 /* A null parent_spec indicates it's the initial probe */
4170 * The overlap has become zero, so the clone
4171 * must have been resized down to 0 at some
4172 * point. Treat this the same as a flatten.
4174 rbd_dev_parent_put(rbd_dev);
4175 pr_info("%s: clone image now standalone\n",
4176 rbd_dev->disk->disk_name);
4179 * For the initial probe, if we find the
4180 * overlap is zero we just pretend there was
4183 rbd_warn(rbd_dev, "ignoring parent of "
4184 "clone with overlap 0\n");
4191 rbd_spec_put(parent_spec);
4196 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4200 __le64 stripe_count;
4201 } __attribute__ ((packed)) striping_info_buf = { 0 };
4202 size_t size = sizeof (striping_info_buf);
4209 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4210 "rbd", "get_stripe_unit_count", NULL, 0,
4211 (char *)&striping_info_buf, size);
4212 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4219 * We don't actually support the "fancy striping" feature
4220 * (STRIPINGV2) yet, but if the striping sizes are the
4221 * defaults the behavior is the same as before. So find
4222 * out, and only fail if the image has non-default values.
4225 obj_size = (u64)1 << rbd_dev->header.obj_order;
4226 p = &striping_info_buf;
4227 stripe_unit = ceph_decode_64(&p);
4228 if (stripe_unit != obj_size) {
4229 rbd_warn(rbd_dev, "unsupported stripe unit "
4230 "(got %llu want %llu)",
4231 stripe_unit, obj_size);
4234 stripe_count = ceph_decode_64(&p);
4235 if (stripe_count != 1) {
4236 rbd_warn(rbd_dev, "unsupported stripe count "
4237 "(got %llu want 1)", stripe_count);
4240 rbd_dev->header.stripe_unit = stripe_unit;
4241 rbd_dev->header.stripe_count = stripe_count;
4246 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4248 size_t image_id_size;
4253 void *reply_buf = NULL;
4255 char *image_name = NULL;
4258 rbd_assert(!rbd_dev->spec->image_name);
4260 len = strlen(rbd_dev->spec->image_id);
4261 image_id_size = sizeof (__le32) + len;
4262 image_id = kmalloc(image_id_size, GFP_KERNEL);
4267 end = image_id + image_id_size;
4268 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4270 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4271 reply_buf = kmalloc(size, GFP_KERNEL);
4275 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4276 "rbd", "dir_get_name",
4277 image_id, image_id_size,
4282 end = reply_buf + ret;
4284 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4285 if (IS_ERR(image_name))
4288 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4296 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4298 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4299 const char *snap_name;
4302 /* Skip over names until we find the one we are looking for */
4304 snap_name = rbd_dev->header.snap_names;
4305 while (which < snapc->num_snaps) {
4306 if (!strcmp(name, snap_name))
4307 return snapc->snaps[which];
4308 snap_name += strlen(snap_name) + 1;
4314 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4316 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4321 for (which = 0; !found && which < snapc->num_snaps; which++) {
4322 const char *snap_name;
4324 snap_id = snapc->snaps[which];
4325 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4326 if (IS_ERR(snap_name)) {
4327 /* ignore no-longer existing snapshots */
4328 if (PTR_ERR(snap_name) == -ENOENT)
4333 found = !strcmp(name, snap_name);
4336 return found ? snap_id : CEPH_NOSNAP;
4340 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4341 * no snapshot by that name is found, or if an error occurs.
4343 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4345 if (rbd_dev->image_format == 1)
4346 return rbd_v1_snap_id_by_name(rbd_dev, name);
4348 return rbd_v2_snap_id_by_name(rbd_dev, name);
4352 * When an rbd image has a parent image, it is identified by the
4353 * pool, image, and snapshot ids (not names). This function fills
4354 * in the names for those ids. (It's OK if we can't figure out the
4355 * name for an image id, but the pool and snapshot ids should always
4356 * exist and have names.) All names in an rbd spec are dynamically
4359 * When an image being mapped (not a parent) is probed, we have the
4360 * pool name and pool id, image name and image id, and the snapshot
4361 * name. The only thing we're missing is the snapshot id.
4363 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4365 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4366 struct rbd_spec *spec = rbd_dev->spec;
4367 const char *pool_name;
4368 const char *image_name;
4369 const char *snap_name;
4373 * An image being mapped will have the pool name (etc.), but
4374 * we need to look up the snapshot id.
4376 if (spec->pool_name) {
4377 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4380 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4381 if (snap_id == CEPH_NOSNAP)
4383 spec->snap_id = snap_id;
4385 spec->snap_id = CEPH_NOSNAP;
4391 /* Get the pool name; we have to make our own copy of this */
4393 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4395 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4398 pool_name = kstrdup(pool_name, GFP_KERNEL);
4402 /* Fetch the image name; tolerate failure here */
4404 image_name = rbd_dev_image_name(rbd_dev);
4406 rbd_warn(rbd_dev, "unable to get image name");
4408 /* Look up the snapshot name, and make a copy */
4410 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4411 if (IS_ERR(snap_name)) {
4412 ret = PTR_ERR(snap_name);
4416 spec->pool_name = pool_name;
4417 spec->image_name = image_name;
4418 spec->snap_name = snap_name;
4428 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4437 struct ceph_snap_context *snapc;
4441 * We'll need room for the seq value (maximum snapshot id),
4442 * snapshot count, and array of that many snapshot ids.
4443 * For now we have a fixed upper limit on the number we're
4444 * prepared to receive.
4446 size = sizeof (__le64) + sizeof (__le32) +
4447 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4448 reply_buf = kzalloc(size, GFP_KERNEL);
4452 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4453 "rbd", "get_snapcontext", NULL, 0,
4455 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4460 end = reply_buf + ret;
4462 ceph_decode_64_safe(&p, end, seq, out);
4463 ceph_decode_32_safe(&p, end, snap_count, out);
4466 * Make sure the reported number of snapshot ids wouldn't go
4467 * beyond the end of our buffer. But before checking that,
4468 * make sure the computed size of the snapshot context we
4469 * allocate is representable in a size_t.
4471 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4476 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4480 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4486 for (i = 0; i < snap_count; i++)
4487 snapc->snaps[i] = ceph_decode_64(&p);
4489 ceph_put_snap_context(rbd_dev->header.snapc);
4490 rbd_dev->header.snapc = snapc;
4492 dout(" snap context seq = %llu, snap_count = %u\n",
4493 (unsigned long long)seq, (unsigned int)snap_count);
4500 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4511 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4512 reply_buf = kmalloc(size, GFP_KERNEL);
4514 return ERR_PTR(-ENOMEM);
4516 snapid = cpu_to_le64(snap_id);
4517 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4518 "rbd", "get_snapshot_name",
4519 &snapid, sizeof (snapid),
4521 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4523 snap_name = ERR_PTR(ret);
4528 end = reply_buf + ret;
4529 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4530 if (IS_ERR(snap_name))
4533 dout(" snap_id 0x%016llx snap_name = %s\n",
4534 (unsigned long long)snap_id, snap_name);
4541 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4543 bool first_time = rbd_dev->header.object_prefix == NULL;
4546 ret = rbd_dev_v2_image_size(rbd_dev);
4551 ret = rbd_dev_v2_header_onetime(rbd_dev);
4557 * If the image supports layering, get the parent info. We
4558 * need to probe the first time regardless. Thereafter we
4559 * only need to if there's a parent, to see if it has
4560 * disappeared due to the mapped image getting flattened.
4562 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4563 (first_time || rbd_dev->parent_spec)) {
4566 ret = rbd_dev_v2_parent_info(rbd_dev);
4571 * Print a warning if this is the initial probe and
4572 * the image has a parent. Don't print it if the
4573 * image now being probed is itself a parent. We
4574 * can tell at this point because we won't know its
4575 * pool name yet (just its pool id).
4577 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4578 if (first_time && warn)
4579 rbd_warn(rbd_dev, "WARNING: kernel layering "
4580 "is EXPERIMENTAL!");
4583 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4584 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4585 rbd_dev->mapping.size = rbd_dev->header.image_size;
4587 ret = rbd_dev_v2_snap_context(rbd_dev);
4588 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4593 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4598 dev = &rbd_dev->dev;
4599 dev->bus = &rbd_bus_type;
4600 dev->type = &rbd_device_type;
4601 dev->parent = &rbd_root_dev;
4602 dev->release = rbd_dev_device_release;
4603 dev_set_name(dev, "%d", rbd_dev->dev_id);
4604 ret = device_register(dev);
4609 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4611 device_unregister(&rbd_dev->dev);
4615 * Get a unique rbd identifier for the given new rbd_dev, and add
4616 * the rbd_dev to the global list.
4618 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4622 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4623 0, minor_to_rbd_dev_id(1 << MINORBITS),
4628 rbd_dev->dev_id = new_dev_id;
4630 spin_lock(&rbd_dev_list_lock);
4631 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4632 spin_unlock(&rbd_dev_list_lock);
4634 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4640 * Remove an rbd_dev from the global list, and record that its
4641 * identifier is no longer in use.
4643 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4645 spin_lock(&rbd_dev_list_lock);
4646 list_del_init(&rbd_dev->node);
4647 spin_unlock(&rbd_dev_list_lock);
4649 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4651 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4655 * Skips over white space at *buf, and updates *buf to point to the
4656 * first found non-space character (if any). Returns the length of
4657 * the token (string of non-white space characters) found. Note
4658 * that *buf must be terminated with '\0'.
4660 static inline size_t next_token(const char **buf)
4663 * These are the characters that produce nonzero for
4664 * isspace() in the "C" and "POSIX" locales.
4666 const char *spaces = " \f\n\r\t\v";
4668 *buf += strspn(*buf, spaces); /* Find start of token */
4670 return strcspn(*buf, spaces); /* Return token length */
4674 * Finds the next token in *buf, and if the provided token buffer is
4675 * big enough, copies the found token into it. The result, if
4676 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4677 * must be terminated with '\0' on entry.
4679 * Returns the length of the token found (not including the '\0').
4680 * Return value will be 0 if no token is found, and it will be >=
4681 * token_size if the token would not fit.
4683 * The *buf pointer will be updated to point beyond the end of the
4684 * found token. Note that this occurs even if the token buffer is
4685 * too small to hold it.
4687 static inline size_t copy_token(const char **buf,
4693 len = next_token(buf);
4694 if (len < token_size) {
4695 memcpy(token, *buf, len);
4696 *(token + len) = '\0';
4704 * Finds the next token in *buf, dynamically allocates a buffer big
4705 * enough to hold a copy of it, and copies the token into the new
4706 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4707 * that a duplicate buffer is created even for a zero-length token.
4709 * Returns a pointer to the newly-allocated duplicate, or a null
4710 * pointer if memory for the duplicate was not available. If
4711 * the lenp argument is a non-null pointer, the length of the token
4712 * (not including the '\0') is returned in *lenp.
4714 * If successful, the *buf pointer will be updated to point beyond
4715 * the end of the found token.
4717 * Note: uses GFP_KERNEL for allocation.
4719 static inline char *dup_token(const char **buf, size_t *lenp)
4724 len = next_token(buf);
4725 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4728 *(dup + len) = '\0';
4738 * Parse the options provided for an "rbd add" (i.e., rbd image
4739 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4740 * and the data written is passed here via a NUL-terminated buffer.
4741 * Returns 0 if successful or an error code otherwise.
4743 * The information extracted from these options is recorded in
4744 * the other parameters which return dynamically-allocated
4747 * The address of a pointer that will refer to a ceph options
4748 * structure. Caller must release the returned pointer using
4749 * ceph_destroy_options() when it is no longer needed.
4751 * Address of an rbd options pointer. Fully initialized by
4752 * this function; caller must release with kfree().
4754 * Address of an rbd image specification pointer. Fully
4755 * initialized by this function based on parsed options.
4756 * Caller must release with rbd_spec_put().
4758 * The options passed take this form:
4759 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4762 * A comma-separated list of one or more monitor addresses.
4763 * A monitor address is an ip address, optionally followed
4764 * by a port number (separated by a colon).
4765 * I.e.: ip1[:port1][,ip2[:port2]...]
4767 * A comma-separated list of ceph and/or rbd options.
4769 * The name of the rados pool containing the rbd image.
4771 * The name of the image in that pool to map.
4773 * An optional snapshot id. If provided, the mapping will
4774 * present data from the image at the time that snapshot was
4775 * created. The image head is used if no snapshot id is
4776 * provided. Snapshot mappings are always read-only.
4778 static int rbd_add_parse_args(const char *buf,
4779 struct ceph_options **ceph_opts,
4780 struct rbd_options **opts,
4781 struct rbd_spec **rbd_spec)
4785 const char *mon_addrs;
4787 size_t mon_addrs_size;
4788 struct rbd_spec *spec = NULL;
4789 struct rbd_options *rbd_opts = NULL;
4790 struct ceph_options *copts;
4793 /* The first four tokens are required */
4795 len = next_token(&buf);
4797 rbd_warn(NULL, "no monitor address(es) provided");
4801 mon_addrs_size = len + 1;
4805 options = dup_token(&buf, NULL);
4809 rbd_warn(NULL, "no options provided");
4813 spec = rbd_spec_alloc();
4817 spec->pool_name = dup_token(&buf, NULL);
4818 if (!spec->pool_name)
4820 if (!*spec->pool_name) {
4821 rbd_warn(NULL, "no pool name provided");
4825 spec->image_name = dup_token(&buf, NULL);
4826 if (!spec->image_name)
4828 if (!*spec->image_name) {
4829 rbd_warn(NULL, "no image name provided");
4834 * Snapshot name is optional; default is to use "-"
4835 * (indicating the head/no snapshot).
4837 len = next_token(&buf);
4839 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4840 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4841 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4842 ret = -ENAMETOOLONG;
4845 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4848 *(snap_name + len) = '\0';
4849 spec->snap_name = snap_name;
4851 /* Initialize all rbd options to the defaults */
4853 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4857 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4859 copts = ceph_parse_options(options, mon_addrs,
4860 mon_addrs + mon_addrs_size - 1,
4861 parse_rbd_opts_token, rbd_opts);
4862 if (IS_ERR(copts)) {
4863 ret = PTR_ERR(copts);
4884 * Return pool id (>= 0) or a negative error code.
4886 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4889 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4894 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4895 if (ret == -ENOENT && tries++ < 1) {
4896 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4901 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4902 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4903 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4904 newest_epoch, timeout);
4907 /* the osdmap we have is new enough */
4916 * An rbd format 2 image has a unique identifier, distinct from the
4917 * name given to it by the user. Internally, that identifier is
4918 * what's used to specify the names of objects related to the image.
4920 * A special "rbd id" object is used to map an rbd image name to its
4921 * id. If that object doesn't exist, then there is no v2 rbd image
4922 * with the supplied name.
4924 * This function will record the given rbd_dev's image_id field if
4925 * it can be determined, and in that case will return 0. If any
4926 * errors occur a negative errno will be returned and the rbd_dev's
4927 * image_id field will be unchanged (and should be NULL).
4929 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4938 * When probing a parent image, the image id is already
4939 * known (and the image name likely is not). There's no
4940 * need to fetch the image id again in this case. We
4941 * do still need to set the image format though.
4943 if (rbd_dev->spec->image_id) {
4944 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4950 * First, see if the format 2 image id file exists, and if
4951 * so, get the image's persistent id from it.
4953 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4954 object_name = kmalloc(size, GFP_NOIO);
4957 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4958 dout("rbd id object name is %s\n", object_name);
4960 /* Response will be an encoded string, which includes a length */
4962 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4963 response = kzalloc(size, GFP_NOIO);
4969 /* If it doesn't exist we'll assume it's a format 1 image */
4971 ret = rbd_obj_method_sync(rbd_dev, object_name,
4972 "rbd", "get_id", NULL, 0,
4973 response, RBD_IMAGE_ID_LEN_MAX);
4974 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4975 if (ret == -ENOENT) {
4976 image_id = kstrdup("", GFP_KERNEL);
4977 ret = image_id ? 0 : -ENOMEM;
4979 rbd_dev->image_format = 1;
4980 } else if (ret > sizeof (__le32)) {
4983 image_id = ceph_extract_encoded_string(&p, p + ret,
4985 ret = PTR_ERR_OR_ZERO(image_id);
4987 rbd_dev->image_format = 2;
4993 rbd_dev->spec->image_id = image_id;
4994 dout("image_id is %s\n", image_id);
5004 * Undo whatever state changes are made by v1 or v2 header info
5007 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5009 struct rbd_image_header *header;
5011 /* Drop parent reference unless it's already been done (or none) */
5013 if (rbd_dev->parent_overlap)
5014 rbd_dev_parent_put(rbd_dev);
5016 /* Free dynamic fields from the header, then zero it out */
5018 header = &rbd_dev->header;
5019 ceph_put_snap_context(header->snapc);
5020 kfree(header->snap_sizes);
5021 kfree(header->snap_names);
5022 kfree(header->object_prefix);
5023 memset(header, 0, sizeof (*header));
5026 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5030 ret = rbd_dev_v2_object_prefix(rbd_dev);
5035 * Get the and check features for the image. Currently the
5036 * features are assumed to never change.
5038 ret = rbd_dev_v2_features(rbd_dev);
5042 /* If the image supports fancy striping, get its parameters */
5044 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5045 ret = rbd_dev_v2_striping_info(rbd_dev);
5049 /* No support for crypto and compression type format 2 images */
5053 rbd_dev->header.features = 0;
5054 kfree(rbd_dev->header.object_prefix);
5055 rbd_dev->header.object_prefix = NULL;
5060 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
5062 struct rbd_device *parent = NULL;
5063 struct rbd_spec *parent_spec;
5064 struct rbd_client *rbdc;
5067 if (!rbd_dev->parent_spec)
5070 * We need to pass a reference to the client and the parent
5071 * spec when creating the parent rbd_dev. Images related by
5072 * parent/child relationships always share both.
5074 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5075 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5078 parent = rbd_dev_create(rbdc, parent_spec);
5082 ret = rbd_dev_image_probe(parent, false);
5085 rbd_dev->parent = parent;
5086 atomic_set(&rbd_dev->parent_ref, 1);
5091 rbd_dev_unparent(rbd_dev);
5092 kfree(rbd_dev->header_name);
5093 rbd_dev_destroy(parent);
5095 rbd_put_client(rbdc);
5096 rbd_spec_put(parent_spec);
5102 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5106 /* Get an id and fill in device name. */
5108 ret = rbd_dev_id_get(rbd_dev);
5112 BUILD_BUG_ON(DEV_NAME_LEN
5113 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5114 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5116 /* Record our major and minor device numbers. */
5118 if (!single_major) {
5119 ret = register_blkdev(0, rbd_dev->name);
5123 rbd_dev->major = ret;
5126 rbd_dev->major = rbd_major;
5127 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5130 /* Set up the blkdev mapping. */
5132 ret = rbd_init_disk(rbd_dev);
5134 goto err_out_blkdev;
5136 ret = rbd_dev_mapping_set(rbd_dev);
5139 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5140 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5142 ret = rbd_bus_add_dev(rbd_dev);
5144 goto err_out_mapping;
5146 /* Everything's ready. Announce the disk to the world. */
5148 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5149 add_disk(rbd_dev->disk);
5151 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5152 (unsigned long long) rbd_dev->mapping.size);
5157 rbd_dev_mapping_clear(rbd_dev);
5159 rbd_free_disk(rbd_dev);
5162 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5164 rbd_dev_id_put(rbd_dev);
5165 rbd_dev_mapping_clear(rbd_dev);
5170 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5172 struct rbd_spec *spec = rbd_dev->spec;
5175 /* Record the header object name for this rbd image. */
5177 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5179 if (rbd_dev->image_format == 1)
5180 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5182 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5184 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5185 if (!rbd_dev->header_name)
5188 if (rbd_dev->image_format == 1)
5189 sprintf(rbd_dev->header_name, "%s%s",
5190 spec->image_name, RBD_SUFFIX);
5192 sprintf(rbd_dev->header_name, "%s%s",
5193 RBD_HEADER_PREFIX, spec->image_id);
5197 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5199 rbd_dev_unprobe(rbd_dev);
5200 kfree(rbd_dev->header_name);
5201 rbd_dev->header_name = NULL;
5202 rbd_dev->image_format = 0;
5203 kfree(rbd_dev->spec->image_id);
5204 rbd_dev->spec->image_id = NULL;
5206 rbd_dev_destroy(rbd_dev);
5210 * Probe for the existence of the header object for the given rbd
5211 * device. If this image is the one being mapped (i.e., not a
5212 * parent), initiate a watch on its header object before using that
5213 * object to get detailed information about the rbd image.
5215 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5220 * Get the id from the image id object. Unless there's an
5221 * error, rbd_dev->spec->image_id will be filled in with
5222 * a dynamically-allocated string, and rbd_dev->image_format
5223 * will be set to either 1 or 2.
5225 ret = rbd_dev_image_id(rbd_dev);
5228 rbd_assert(rbd_dev->spec->image_id);
5229 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5231 ret = rbd_dev_header_name(rbd_dev);
5233 goto err_out_format;
5236 ret = rbd_dev_header_watch_sync(rbd_dev);
5238 goto out_header_name;
5241 if (rbd_dev->image_format == 1)
5242 ret = rbd_dev_v1_header_info(rbd_dev);
5244 ret = rbd_dev_v2_header_info(rbd_dev);
5248 ret = rbd_dev_spec_update(rbd_dev);
5252 ret = rbd_dev_probe_parent(rbd_dev);
5256 dout("discovered format %u image, header name is %s\n",
5257 rbd_dev->image_format, rbd_dev->header_name);
5261 rbd_dev_unprobe(rbd_dev);
5264 rbd_dev_header_unwatch_sync(rbd_dev);
5266 kfree(rbd_dev->header_name);
5267 rbd_dev->header_name = NULL;
5269 rbd_dev->image_format = 0;
5270 kfree(rbd_dev->spec->image_id);
5271 rbd_dev->spec->image_id = NULL;
5273 dout("probe failed, returning %d\n", ret);
5278 static ssize_t do_rbd_add(struct bus_type *bus,
5282 struct rbd_device *rbd_dev = NULL;
5283 struct ceph_options *ceph_opts = NULL;
5284 struct rbd_options *rbd_opts = NULL;
5285 struct rbd_spec *spec = NULL;
5286 struct rbd_client *rbdc;
5290 if (!try_module_get(THIS_MODULE))
5293 /* parse add command */
5294 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5296 goto err_out_module;
5297 read_only = rbd_opts->read_only;
5299 rbd_opts = NULL; /* done with this */
5301 rbdc = rbd_get_client(ceph_opts);
5308 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5310 goto err_out_client;
5311 spec->pool_id = (u64)rc;
5313 /* The ceph file layout needs to fit pool id in 32 bits */
5315 if (spec->pool_id > (u64)U32_MAX) {
5316 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5317 (unsigned long long)spec->pool_id, U32_MAX);
5319 goto err_out_client;
5322 rbd_dev = rbd_dev_create(rbdc, spec);
5324 goto err_out_client;
5325 rbdc = NULL; /* rbd_dev now owns this */
5326 spec = NULL; /* rbd_dev now owns this */
5328 rc = rbd_dev_image_probe(rbd_dev, true);
5330 goto err_out_rbd_dev;
5332 /* If we are mapping a snapshot it must be marked read-only */
5334 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5336 rbd_dev->mapping.read_only = read_only;
5338 rc = rbd_dev_device_setup(rbd_dev);
5341 * rbd_dev_header_unwatch_sync() can't be moved into
5342 * rbd_dev_image_release() without refactoring, see
5343 * commit 1f3ef78861ac.
5345 rbd_dev_header_unwatch_sync(rbd_dev);
5346 rbd_dev_image_release(rbd_dev);
5347 goto err_out_module;
5353 rbd_dev_destroy(rbd_dev);
5355 rbd_put_client(rbdc);
5359 module_put(THIS_MODULE);
5361 dout("Error adding device %s\n", buf);
5366 static ssize_t rbd_add(struct bus_type *bus,
5373 return do_rbd_add(bus, buf, count);
5376 static ssize_t rbd_add_single_major(struct bus_type *bus,
5380 return do_rbd_add(bus, buf, count);
5383 static void rbd_dev_device_release(struct device *dev)
5385 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5387 rbd_free_disk(rbd_dev);
5388 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5389 rbd_dev_mapping_clear(rbd_dev);
5391 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5392 rbd_dev_id_put(rbd_dev);
5393 rbd_dev_mapping_clear(rbd_dev);
5396 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5398 while (rbd_dev->parent) {
5399 struct rbd_device *first = rbd_dev;
5400 struct rbd_device *second = first->parent;
5401 struct rbd_device *third;
5404 * Follow to the parent with no grandparent and
5407 while (second && (third = second->parent)) {
5412 rbd_dev_image_release(second);
5413 first->parent = NULL;
5414 first->parent_overlap = 0;
5416 rbd_assert(first->parent_spec);
5417 rbd_spec_put(first->parent_spec);
5418 first->parent_spec = NULL;
5422 static ssize_t do_rbd_remove(struct bus_type *bus,
5426 struct rbd_device *rbd_dev = NULL;
5427 struct list_head *tmp;
5430 bool already = false;
5433 ret = kstrtoul(buf, 10, &ul);
5437 /* convert to int; abort if we lost anything in the conversion */
5443 spin_lock(&rbd_dev_list_lock);
5444 list_for_each(tmp, &rbd_dev_list) {
5445 rbd_dev = list_entry(tmp, struct rbd_device, node);
5446 if (rbd_dev->dev_id == dev_id) {
5452 spin_lock_irq(&rbd_dev->lock);
5453 if (rbd_dev->open_count)
5456 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5458 spin_unlock_irq(&rbd_dev->lock);
5460 spin_unlock(&rbd_dev_list_lock);
5461 if (ret < 0 || already)
5464 rbd_dev_header_unwatch_sync(rbd_dev);
5466 * flush remaining watch callbacks - these must be complete
5467 * before the osd_client is shutdown
5469 dout("%s: flushing notifies", __func__);
5470 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5473 * Don't free anything from rbd_dev->disk until after all
5474 * notifies are completely processed. Otherwise
5475 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5476 * in a potential use after free of rbd_dev->disk or rbd_dev.
5478 rbd_bus_del_dev(rbd_dev);
5479 rbd_dev_image_release(rbd_dev);
5480 module_put(THIS_MODULE);
5485 static ssize_t rbd_remove(struct bus_type *bus,
5492 return do_rbd_remove(bus, buf, count);
5495 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5499 return do_rbd_remove(bus, buf, count);
5503 * create control files in sysfs
5506 static int rbd_sysfs_init(void)
5510 ret = device_register(&rbd_root_dev);
5514 ret = bus_register(&rbd_bus_type);
5516 device_unregister(&rbd_root_dev);
5521 static void rbd_sysfs_cleanup(void)
5523 bus_unregister(&rbd_bus_type);
5524 device_unregister(&rbd_root_dev);
5527 static int rbd_slab_init(void)
5529 rbd_assert(!rbd_img_request_cache);
5530 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5531 sizeof (struct rbd_img_request),
5532 __alignof__(struct rbd_img_request),
5534 if (!rbd_img_request_cache)
5537 rbd_assert(!rbd_obj_request_cache);
5538 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5539 sizeof (struct rbd_obj_request),
5540 __alignof__(struct rbd_obj_request),
5542 if (!rbd_obj_request_cache)
5545 rbd_assert(!rbd_segment_name_cache);
5546 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5547 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5548 if (rbd_segment_name_cache)
5551 if (rbd_obj_request_cache) {
5552 kmem_cache_destroy(rbd_obj_request_cache);
5553 rbd_obj_request_cache = NULL;
5556 kmem_cache_destroy(rbd_img_request_cache);
5557 rbd_img_request_cache = NULL;
5562 static void rbd_slab_exit(void)
5564 rbd_assert(rbd_segment_name_cache);
5565 kmem_cache_destroy(rbd_segment_name_cache);
5566 rbd_segment_name_cache = NULL;
5568 rbd_assert(rbd_obj_request_cache);
5569 kmem_cache_destroy(rbd_obj_request_cache);
5570 rbd_obj_request_cache = NULL;
5572 rbd_assert(rbd_img_request_cache);
5573 kmem_cache_destroy(rbd_img_request_cache);
5574 rbd_img_request_cache = NULL;
5577 static int __init rbd_init(void)
5581 if (!libceph_compatible(NULL)) {
5582 rbd_warn(NULL, "libceph incompatibility (quitting)");
5586 rc = rbd_slab_init();
5591 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5592 if (rbd_major < 0) {
5598 rc = rbd_sysfs_init();
5600 goto err_out_blkdev;
5603 pr_info("loaded (major %d)\n", rbd_major);
5605 pr_info("loaded\n");
5611 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5617 static void __exit rbd_exit(void)
5619 ida_destroy(&rbd_dev_id_ida);
5620 rbd_sysfs_cleanup();
5622 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5626 module_init(rbd_init);
5627 module_exit(rbd_exit);
5629 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5630 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5631 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5632 /* following authorship retained from original osdblk.c */
5633 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5635 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5636 MODULE_LICENSE("GPL");