3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
548 static void rbd_release(struct gendisk *disk, fmode_t mode)
550 struct rbd_device *rbd_dev = disk->private_data;
551 unsigned long open_count_before;
553 spin_lock_irq(&rbd_dev->lock);
554 open_count_before = rbd_dev->open_count--;
555 spin_unlock_irq(&rbd_dev->lock);
556 rbd_assert(open_count_before > 0);
558 put_device(&rbd_dev->dev);
561 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
566 bool ro_changed = false;
568 /* get_user() may sleep, so call it before taking rbd_dev->lock */
569 if (get_user(val, (int __user *)(arg)))
572 ro = val ? true : false;
573 /* Snapshot doesn't allow to write*/
574 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
577 spin_lock_irq(&rbd_dev->lock);
578 /* prevent others open this device */
579 if (rbd_dev->open_count > 1) {
584 if (rbd_dev->mapping.read_only != ro) {
585 rbd_dev->mapping.read_only = ro;
590 spin_unlock_irq(&rbd_dev->lock);
591 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
592 if (ret == 0 && ro_changed)
593 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
598 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
599 unsigned int cmd, unsigned long arg)
601 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
606 ret = rbd_ioctl_set_ro(rbd_dev, arg);
616 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
617 unsigned int cmd, unsigned long arg)
619 return rbd_ioctl(bdev, mode, cmd, arg);
621 #endif /* CONFIG_COMPAT */
623 static const struct block_device_operations rbd_bd_ops = {
624 .owner = THIS_MODULE,
626 .release = rbd_release,
629 .compat_ioctl = rbd_compat_ioctl,
634 * Initialize an rbd client instance. Success or not, this function
635 * consumes ceph_opts. Caller holds client_mutex.
637 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
639 struct rbd_client *rbdc;
642 dout("%s:\n", __func__);
643 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
647 kref_init(&rbdc->kref);
648 INIT_LIST_HEAD(&rbdc->node);
650 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
651 if (IS_ERR(rbdc->client))
653 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
655 ret = ceph_open_session(rbdc->client);
659 spin_lock(&rbd_client_list_lock);
660 list_add_tail(&rbdc->node, &rbd_client_list);
661 spin_unlock(&rbd_client_list_lock);
663 dout("%s: rbdc %p\n", __func__, rbdc);
667 ceph_destroy_client(rbdc->client);
672 ceph_destroy_options(ceph_opts);
673 dout("%s: error %d\n", __func__, ret);
678 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
680 kref_get(&rbdc->kref);
686 * Find a ceph client with specific addr and configuration. If
687 * found, bump its reference count.
689 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
691 struct rbd_client *client_node;
694 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
697 spin_lock(&rbd_client_list_lock);
698 list_for_each_entry(client_node, &rbd_client_list, node) {
699 if (!ceph_compare_options(ceph_opts, client_node->client)) {
700 __rbd_get_client(client_node);
706 spin_unlock(&rbd_client_list_lock);
708 return found ? client_node : NULL;
718 /* string args above */
721 /* Boolean args above */
725 static match_table_t rbd_opts_tokens = {
727 /* string args above */
728 {Opt_read_only, "read_only"},
729 {Opt_read_only, "ro"}, /* Alternate spelling */
730 {Opt_read_write, "read_write"},
731 {Opt_read_write, "rw"}, /* Alternate spelling */
732 /* Boolean args above */
740 #define RBD_READ_ONLY_DEFAULT false
742 static int parse_rbd_opts_token(char *c, void *private)
744 struct rbd_options *rbd_opts = private;
745 substring_t argstr[MAX_OPT_ARGS];
746 int token, intval, ret;
748 token = match_token(c, rbd_opts_tokens, argstr);
752 if (token < Opt_last_int) {
753 ret = match_int(&argstr[0], &intval);
755 pr_err("bad mount option arg (not int) "
759 dout("got int token %d val %d\n", token, intval);
760 } else if (token > Opt_last_int && token < Opt_last_string) {
761 dout("got string token %d val %s\n", token,
763 } else if (token > Opt_last_string && token < Opt_last_bool) {
764 dout("got Boolean token %d\n", token);
766 dout("got token %d\n", token);
771 rbd_opts->read_only = true;
774 rbd_opts->read_only = false;
784 * Get a ceph client with specific addr and configuration, if one does
785 * not exist create it. Either way, ceph_opts is consumed by this
788 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
790 struct rbd_client *rbdc;
792 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
793 rbdc = rbd_client_find(ceph_opts);
794 if (rbdc) /* using an existing client */
795 ceph_destroy_options(ceph_opts);
797 rbdc = rbd_client_create(ceph_opts);
798 mutex_unlock(&client_mutex);
804 * Destroy ceph client
806 * Caller must hold rbd_client_list_lock.
808 static void rbd_client_release(struct kref *kref)
810 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
812 dout("%s: rbdc %p\n", __func__, rbdc);
813 spin_lock(&rbd_client_list_lock);
814 list_del(&rbdc->node);
815 spin_unlock(&rbd_client_list_lock);
817 ceph_destroy_client(rbdc->client);
822 * Drop reference to ceph client node. If it's not referenced anymore, release
825 static void rbd_put_client(struct rbd_client *rbdc)
828 kref_put(&rbdc->kref, rbd_client_release);
831 static bool rbd_image_format_valid(u32 image_format)
833 return image_format == 1 || image_format == 2;
836 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
841 /* The header has to start with the magic rbd header text */
842 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
845 /* The bio layer requires at least sector-sized I/O */
847 if (ondisk->options.order < SECTOR_SHIFT)
850 /* If we use u64 in a few spots we may be able to loosen this */
852 if (ondisk->options.order > 8 * sizeof (int) - 1)
856 * The size of a snapshot header has to fit in a size_t, and
857 * that limits the number of snapshots.
859 snap_count = le32_to_cpu(ondisk->snap_count);
860 size = SIZE_MAX - sizeof (struct ceph_snap_context);
861 if (snap_count > size / sizeof (__le64))
865 * Not only that, but the size of the entire the snapshot
866 * header must also be representable in a size_t.
868 size -= snap_count * sizeof (__le64);
869 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
876 * Fill an rbd image header with information from the given format 1
879 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
880 struct rbd_image_header_ondisk *ondisk)
882 struct rbd_image_header *header = &rbd_dev->header;
883 bool first_time = header->object_prefix == NULL;
884 struct ceph_snap_context *snapc;
885 char *object_prefix = NULL;
886 char *snap_names = NULL;
887 u64 *snap_sizes = NULL;
893 /* Allocate this now to avoid having to handle failure below */
898 len = strnlen(ondisk->object_prefix,
899 sizeof (ondisk->object_prefix));
900 object_prefix = kmalloc(len + 1, GFP_KERNEL);
903 memcpy(object_prefix, ondisk->object_prefix, len);
904 object_prefix[len] = '\0';
907 /* Allocate the snapshot context and fill it in */
909 snap_count = le32_to_cpu(ondisk->snap_count);
910 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
913 snapc->seq = le64_to_cpu(ondisk->snap_seq);
915 struct rbd_image_snap_ondisk *snaps;
916 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
918 /* We'll keep a copy of the snapshot names... */
920 if (snap_names_len > (u64)SIZE_MAX)
922 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
926 /* ...as well as the array of their sizes. */
928 size = snap_count * sizeof (*header->snap_sizes);
929 snap_sizes = kmalloc(size, GFP_KERNEL);
934 * Copy the names, and fill in each snapshot's id
937 * Note that rbd_dev_v1_header_info() guarantees the
938 * ondisk buffer we're working with has
939 * snap_names_len bytes beyond the end of the
940 * snapshot id array, this memcpy() is safe.
942 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
943 snaps = ondisk->snaps;
944 for (i = 0; i < snap_count; i++) {
945 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
946 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
950 /* We won't fail any more, fill in the header */
953 header->object_prefix = object_prefix;
954 header->obj_order = ondisk->options.order;
955 header->crypt_type = ondisk->options.crypt_type;
956 header->comp_type = ondisk->options.comp_type;
957 /* The rest aren't used for format 1 images */
958 header->stripe_unit = 0;
959 header->stripe_count = 0;
960 header->features = 0;
962 ceph_put_snap_context(header->snapc);
963 kfree(header->snap_names);
964 kfree(header->snap_sizes);
967 /* The remaining fields always get updated (when we refresh) */
969 header->image_size = le64_to_cpu(ondisk->image_size);
970 header->snapc = snapc;
971 header->snap_names = snap_names;
972 header->snap_sizes = snap_sizes;
980 ceph_put_snap_context(snapc);
981 kfree(object_prefix);
986 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
988 const char *snap_name;
990 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
992 /* Skip over names until we find the one we are looking for */
994 snap_name = rbd_dev->header.snap_names;
996 snap_name += strlen(snap_name) + 1;
998 return kstrdup(snap_name, GFP_KERNEL);
1002 * Snapshot id comparison function for use with qsort()/bsearch().
1003 * Note that result is for snapshots in *descending* order.
1005 static int snapid_compare_reverse(const void *s1, const void *s2)
1007 u64 snap_id1 = *(u64 *)s1;
1008 u64 snap_id2 = *(u64 *)s2;
1010 if (snap_id1 < snap_id2)
1012 return snap_id1 == snap_id2 ? 0 : -1;
1016 * Search a snapshot context to see if the given snapshot id is
1019 * Returns the position of the snapshot id in the array if it's found,
1020 * or BAD_SNAP_INDEX otherwise.
1022 * Note: The snapshot array is in kept sorted (by the osd) in
1023 * reverse order, highest snapshot id first.
1025 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1027 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1030 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1031 sizeof (snap_id), snapid_compare_reverse);
1033 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1036 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1040 const char *snap_name;
1042 which = rbd_dev_snap_index(rbd_dev, snap_id);
1043 if (which == BAD_SNAP_INDEX)
1044 return ERR_PTR(-ENOENT);
1046 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1047 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1050 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1052 if (snap_id == CEPH_NOSNAP)
1053 return RBD_SNAP_HEAD_NAME;
1055 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1056 if (rbd_dev->image_format == 1)
1057 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1059 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1062 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1065 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1066 if (snap_id == CEPH_NOSNAP) {
1067 *snap_size = rbd_dev->header.image_size;
1068 } else if (rbd_dev->image_format == 1) {
1071 which = rbd_dev_snap_index(rbd_dev, snap_id);
1072 if (which == BAD_SNAP_INDEX)
1075 *snap_size = rbd_dev->header.snap_sizes[which];
1080 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1089 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1092 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1093 if (snap_id == CEPH_NOSNAP) {
1094 *snap_features = rbd_dev->header.features;
1095 } else if (rbd_dev->image_format == 1) {
1096 *snap_features = 0; /* No features for format 1 */
1101 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1105 *snap_features = features;
1110 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1112 u64 snap_id = rbd_dev->spec->snap_id;
1117 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1120 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1124 rbd_dev->mapping.size = size;
1125 rbd_dev->mapping.features = features;
1130 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1132 rbd_dev->mapping.size = 0;
1133 rbd_dev->mapping.features = 0;
1136 static void rbd_segment_name_free(const char *name)
1138 /* The explicit cast here is needed to drop the const qualifier */
1140 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1143 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1150 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1153 segment = offset >> rbd_dev->header.obj_order;
1154 name_format = "%s.%012llx";
1155 if (rbd_dev->image_format == 2)
1156 name_format = "%s.%016llx";
1157 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1158 rbd_dev->header.object_prefix, segment);
1159 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1160 pr_err("error formatting segment name for #%llu (%d)\n",
1162 rbd_segment_name_free(name);
1169 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1171 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1173 return offset & (segment_size - 1);
1176 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1177 u64 offset, u64 length)
1179 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1181 offset &= segment_size - 1;
1183 rbd_assert(length <= U64_MAX - offset);
1184 if (offset + length > segment_size)
1185 length = segment_size - offset;
1191 * returns the size of an object in the image
1193 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1195 return 1 << header->obj_order;
1202 static void bio_chain_put(struct bio *chain)
1208 chain = chain->bi_next;
1214 * zeros a bio chain, starting at specific offset
1216 static void zero_bio_chain(struct bio *chain, int start_ofs)
1219 struct bvec_iter iter;
1220 unsigned long flags;
1225 bio_for_each_segment(bv, chain, iter) {
1226 if (pos + bv.bv_len > start_ofs) {
1227 int remainder = max(start_ofs - pos, 0);
1228 buf = bvec_kmap_irq(&bv, &flags);
1229 memset(buf + remainder, 0,
1230 bv.bv_len - remainder);
1231 flush_dcache_page(bv.bv_page);
1232 bvec_kunmap_irq(buf, &flags);
1237 chain = chain->bi_next;
1242 * similar to zero_bio_chain(), zeros data defined by a page array,
1243 * starting at the given byte offset from the start of the array and
1244 * continuing up to the given end offset. The pages array is
1245 * assumed to be big enough to hold all bytes up to the end.
1247 static void zero_pages(struct page **pages, u64 offset, u64 end)
1249 struct page **page = &pages[offset >> PAGE_SHIFT];
1251 rbd_assert(end > offset);
1252 rbd_assert(end - offset <= (u64)SIZE_MAX);
1253 while (offset < end) {
1256 unsigned long flags;
1259 page_offset = offset & ~PAGE_MASK;
1260 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1261 local_irq_save(flags);
1262 kaddr = kmap_atomic(*page);
1263 memset(kaddr + page_offset, 0, length);
1264 flush_dcache_page(*page);
1265 kunmap_atomic(kaddr);
1266 local_irq_restore(flags);
1274 * Clone a portion of a bio, starting at the given byte offset
1275 * and continuing for the number of bytes indicated.
1277 static struct bio *bio_clone_range(struct bio *bio_src,
1278 unsigned int offset,
1284 bio = bio_clone(bio_src, gfpmask);
1286 return NULL; /* ENOMEM */
1288 bio_advance(bio, offset);
1289 bio->bi_iter.bi_size = len;
1295 * Clone a portion of a bio chain, starting at the given byte offset
1296 * into the first bio in the source chain and continuing for the
1297 * number of bytes indicated. The result is another bio chain of
1298 * exactly the given length, or a null pointer on error.
1300 * The bio_src and offset parameters are both in-out. On entry they
1301 * refer to the first source bio and the offset into that bio where
1302 * the start of data to be cloned is located.
1304 * On return, bio_src is updated to refer to the bio in the source
1305 * chain that contains first un-cloned byte, and *offset will
1306 * contain the offset of that byte within that bio.
1308 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1309 unsigned int *offset,
1313 struct bio *bi = *bio_src;
1314 unsigned int off = *offset;
1315 struct bio *chain = NULL;
1318 /* Build up a chain of clone bios up to the limit */
1320 if (!bi || off >= bi->bi_iter.bi_size || !len)
1321 return NULL; /* Nothing to clone */
1325 unsigned int bi_size;
1329 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1330 goto out_err; /* EINVAL; ran out of bio's */
1332 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1333 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1335 goto out_err; /* ENOMEM */
1338 end = &bio->bi_next;
1341 if (off == bi->bi_iter.bi_size) {
1352 bio_chain_put(chain);
1358 * The default/initial value for all object request flags is 0. For
1359 * each flag, once its value is set to 1 it is never reset to 0
1362 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1364 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1365 struct rbd_device *rbd_dev;
1367 rbd_dev = obj_request->img_request->rbd_dev;
1368 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1373 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1376 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1379 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1381 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1382 struct rbd_device *rbd_dev = NULL;
1384 if (obj_request_img_data_test(obj_request))
1385 rbd_dev = obj_request->img_request->rbd_dev;
1386 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1391 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1394 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1398 * This sets the KNOWN flag after (possibly) setting the EXISTS
1399 * flag. The latter is set based on the "exists" value provided.
1401 * Note that for our purposes once an object exists it never goes
1402 * away again. It's possible that the response from two existence
1403 * checks are separated by the creation of the target object, and
1404 * the first ("doesn't exist") response arrives *after* the second
1405 * ("does exist"). In that case we ignore the second one.
1407 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1411 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1412 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1416 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1419 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1422 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1425 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1428 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1430 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1432 return obj_request->img_offset <
1433 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1436 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1438 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1439 atomic_read(&obj_request->kref.refcount));
1440 kref_get(&obj_request->kref);
1443 static void rbd_obj_request_destroy(struct kref *kref);
1444 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1446 rbd_assert(obj_request != NULL);
1447 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1448 atomic_read(&obj_request->kref.refcount));
1449 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1452 static void rbd_img_request_get(struct rbd_img_request *img_request)
1454 dout("%s: img %p (was %d)\n", __func__, img_request,
1455 atomic_read(&img_request->kref.refcount));
1456 kref_get(&img_request->kref);
1459 static bool img_request_child_test(struct rbd_img_request *img_request);
1460 static void rbd_parent_request_destroy(struct kref *kref);
1461 static void rbd_img_request_destroy(struct kref *kref);
1462 static void rbd_img_request_put(struct rbd_img_request *img_request)
1464 rbd_assert(img_request != NULL);
1465 dout("%s: img %p (was %d)\n", __func__, img_request,
1466 atomic_read(&img_request->kref.refcount));
1467 if (img_request_child_test(img_request))
1468 kref_put(&img_request->kref, rbd_parent_request_destroy);
1470 kref_put(&img_request->kref, rbd_img_request_destroy);
1473 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1474 struct rbd_obj_request *obj_request)
1476 rbd_assert(obj_request->img_request == NULL);
1478 /* Image request now owns object's original reference */
1479 obj_request->img_request = img_request;
1480 obj_request->which = img_request->obj_request_count;
1481 rbd_assert(!obj_request_img_data_test(obj_request));
1482 obj_request_img_data_set(obj_request);
1483 rbd_assert(obj_request->which != BAD_WHICH);
1484 img_request->obj_request_count++;
1485 list_add_tail(&obj_request->links, &img_request->obj_requests);
1486 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1487 obj_request->which);
1490 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1491 struct rbd_obj_request *obj_request)
1493 rbd_assert(obj_request->which != BAD_WHICH);
1495 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1496 obj_request->which);
1497 list_del(&obj_request->links);
1498 rbd_assert(img_request->obj_request_count > 0);
1499 img_request->obj_request_count--;
1500 rbd_assert(obj_request->which == img_request->obj_request_count);
1501 obj_request->which = BAD_WHICH;
1502 rbd_assert(obj_request_img_data_test(obj_request));
1503 rbd_assert(obj_request->img_request == img_request);
1504 obj_request->img_request = NULL;
1505 obj_request->callback = NULL;
1506 rbd_obj_request_put(obj_request);
1509 static bool obj_request_type_valid(enum obj_request_type type)
1512 case OBJ_REQUEST_NODATA:
1513 case OBJ_REQUEST_BIO:
1514 case OBJ_REQUEST_PAGES:
1521 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1522 struct rbd_obj_request *obj_request)
1524 dout("%s %p\n", __func__, obj_request);
1525 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1528 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1530 dout("%s %p\n", __func__, obj_request);
1531 ceph_osdc_cancel_request(obj_request->osd_req);
1535 * Wait for an object request to complete. If interrupted, cancel the
1536 * underlying osd request.
1538 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1542 dout("%s %p\n", __func__, obj_request);
1544 ret = wait_for_completion_interruptible(&obj_request->completion);
1546 dout("%s %p interrupted\n", __func__, obj_request);
1547 rbd_obj_request_end(obj_request);
1551 dout("%s %p done\n", __func__, obj_request);
1555 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1558 dout("%s: img %p\n", __func__, img_request);
1561 * If no error occurred, compute the aggregate transfer
1562 * count for the image request. We could instead use
1563 * atomic64_cmpxchg() to update it as each object request
1564 * completes; not clear which way is better off hand.
1566 if (!img_request->result) {
1567 struct rbd_obj_request *obj_request;
1570 for_each_obj_request(img_request, obj_request)
1571 xferred += obj_request->xferred;
1572 img_request->xferred = xferred;
1575 if (img_request->callback)
1576 img_request->callback(img_request);
1578 rbd_img_request_put(img_request);
1582 * The default/initial value for all image request flags is 0. Each
1583 * is conditionally set to 1 at image request initialization time
1584 * and currently never change thereafter.
1586 static void img_request_write_set(struct rbd_img_request *img_request)
1588 set_bit(IMG_REQ_WRITE, &img_request->flags);
1592 static bool img_request_write_test(struct rbd_img_request *img_request)
1595 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1598 static void img_request_child_set(struct rbd_img_request *img_request)
1600 set_bit(IMG_REQ_CHILD, &img_request->flags);
1604 static void img_request_child_clear(struct rbd_img_request *img_request)
1606 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1610 static bool img_request_child_test(struct rbd_img_request *img_request)
1613 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1616 static void img_request_layered_set(struct rbd_img_request *img_request)
1618 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1622 static void img_request_layered_clear(struct rbd_img_request *img_request)
1624 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1628 static bool img_request_layered_test(struct rbd_img_request *img_request)
1631 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1635 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1637 u64 xferred = obj_request->xferred;
1638 u64 length = obj_request->length;
1640 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1641 obj_request, obj_request->img_request, obj_request->result,
1644 * ENOENT means a hole in the image. We zero-fill the entire
1645 * length of the request. A short read also implies zero-fill
1646 * to the end of the request. An error requires the whole
1647 * length of the request to be reported finished with an error
1648 * to the block layer. In each case we update the xferred
1649 * count to indicate the whole request was satisfied.
1651 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1652 if (obj_request->result == -ENOENT) {
1653 if (obj_request->type == OBJ_REQUEST_BIO)
1654 zero_bio_chain(obj_request->bio_list, 0);
1656 zero_pages(obj_request->pages, 0, length);
1657 obj_request->result = 0;
1658 } else if (xferred < length && !obj_request->result) {
1659 if (obj_request->type == OBJ_REQUEST_BIO)
1660 zero_bio_chain(obj_request->bio_list, xferred);
1662 zero_pages(obj_request->pages, xferred, length);
1664 obj_request->xferred = length;
1665 obj_request_done_set(obj_request);
1668 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1670 dout("%s: obj %p cb %p\n", __func__, obj_request,
1671 obj_request->callback);
1672 if (obj_request->callback)
1673 obj_request->callback(obj_request);
1675 complete_all(&obj_request->completion);
1678 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1680 dout("%s: obj %p\n", __func__, obj_request);
1681 obj_request_done_set(obj_request);
1684 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1686 struct rbd_img_request *img_request = NULL;
1687 struct rbd_device *rbd_dev = NULL;
1688 bool layered = false;
1690 if (obj_request_img_data_test(obj_request)) {
1691 img_request = obj_request->img_request;
1692 layered = img_request && img_request_layered_test(img_request);
1693 rbd_dev = img_request->rbd_dev;
1696 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1697 obj_request, img_request, obj_request->result,
1698 obj_request->xferred, obj_request->length);
1699 if (layered && obj_request->result == -ENOENT &&
1700 obj_request->img_offset < rbd_dev->parent_overlap)
1701 rbd_img_parent_read(obj_request);
1702 else if (img_request)
1703 rbd_img_obj_request_read_callback(obj_request);
1705 obj_request_done_set(obj_request);
1708 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1710 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1711 obj_request->result, obj_request->length);
1713 * There is no such thing as a successful short write. Set
1714 * it to our originally-requested length.
1716 obj_request->xferred = obj_request->length;
1717 obj_request_done_set(obj_request);
1721 * For a simple stat call there's nothing to do. We'll do more if
1722 * this is part of a write sequence for a layered image.
1724 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1726 dout("%s: obj %p\n", __func__, obj_request);
1727 obj_request_done_set(obj_request);
1730 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1731 struct ceph_msg *msg)
1733 struct rbd_obj_request *obj_request = osd_req->r_priv;
1736 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1737 rbd_assert(osd_req == obj_request->osd_req);
1738 if (obj_request_img_data_test(obj_request)) {
1739 rbd_assert(obj_request->img_request);
1740 rbd_assert(obj_request->which != BAD_WHICH);
1742 rbd_assert(obj_request->which == BAD_WHICH);
1745 if (osd_req->r_result < 0)
1746 obj_request->result = osd_req->r_result;
1748 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1751 * We support a 64-bit length, but ultimately it has to be
1752 * passed to blk_end_request(), which takes an unsigned int.
1754 obj_request->xferred = osd_req->r_reply_op_len[0];
1755 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1757 opcode = osd_req->r_ops[0].op;
1759 case CEPH_OSD_OP_READ:
1760 rbd_osd_read_callback(obj_request);
1762 case CEPH_OSD_OP_SETALLOCHINT:
1763 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1765 case CEPH_OSD_OP_WRITE:
1766 rbd_osd_write_callback(obj_request);
1768 case CEPH_OSD_OP_STAT:
1769 rbd_osd_stat_callback(obj_request);
1771 case CEPH_OSD_OP_CALL:
1772 case CEPH_OSD_OP_NOTIFY_ACK:
1773 case CEPH_OSD_OP_WATCH:
1774 rbd_osd_trivial_callback(obj_request);
1777 rbd_warn(NULL, "%s: unsupported op %hu\n",
1778 obj_request->object_name, (unsigned short) opcode);
1782 if (obj_request_done_test(obj_request))
1783 rbd_obj_request_complete(obj_request);
1786 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1788 struct rbd_img_request *img_request = obj_request->img_request;
1789 struct ceph_osd_request *osd_req = obj_request->osd_req;
1792 rbd_assert(osd_req != NULL);
1794 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1795 ceph_osdc_build_request(osd_req, obj_request->offset,
1796 NULL, snap_id, NULL);
1799 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1801 struct rbd_img_request *img_request = obj_request->img_request;
1802 struct ceph_osd_request *osd_req = obj_request->osd_req;
1803 struct ceph_snap_context *snapc;
1804 struct timespec mtime = CURRENT_TIME;
1806 rbd_assert(osd_req != NULL);
1808 snapc = img_request ? img_request->snapc : NULL;
1809 ceph_osdc_build_request(osd_req, obj_request->offset,
1810 snapc, CEPH_NOSNAP, &mtime);
1814 * Create an osd request. A read request has one osd op (read).
1815 * A write request has either one (watch) or two (hint+write) osd ops.
1816 * (All rbd data writes are prefixed with an allocation hint op, but
1817 * technically osd watch is a write request, hence this distinction.)
1819 static struct ceph_osd_request *rbd_osd_req_create(
1820 struct rbd_device *rbd_dev,
1822 unsigned int num_ops,
1823 struct rbd_obj_request *obj_request)
1825 struct ceph_snap_context *snapc = NULL;
1826 struct ceph_osd_client *osdc;
1827 struct ceph_osd_request *osd_req;
1829 if (obj_request_img_data_test(obj_request)) {
1830 struct rbd_img_request *img_request = obj_request->img_request;
1832 rbd_assert(write_request ==
1833 img_request_write_test(img_request));
1835 snapc = img_request->snapc;
1838 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1840 /* Allocate and initialize the request, for the num_ops ops */
1842 osdc = &rbd_dev->rbd_client->client->osdc;
1843 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1846 return NULL; /* ENOMEM */
1849 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1851 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1853 osd_req->r_callback = rbd_osd_req_callback;
1854 osd_req->r_priv = obj_request;
1856 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1857 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1863 * Create a copyup osd request based on the information in the
1864 * object request supplied. A copyup request has three osd ops,
1865 * a copyup method call, a hint op, and a write op.
1867 static struct ceph_osd_request *
1868 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1870 struct rbd_img_request *img_request;
1871 struct ceph_snap_context *snapc;
1872 struct rbd_device *rbd_dev;
1873 struct ceph_osd_client *osdc;
1874 struct ceph_osd_request *osd_req;
1876 rbd_assert(obj_request_img_data_test(obj_request));
1877 img_request = obj_request->img_request;
1878 rbd_assert(img_request);
1879 rbd_assert(img_request_write_test(img_request));
1881 /* Allocate and initialize the request, for the three ops */
1883 snapc = img_request->snapc;
1884 rbd_dev = img_request->rbd_dev;
1885 osdc = &rbd_dev->rbd_client->client->osdc;
1886 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1888 return NULL; /* ENOMEM */
1890 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1891 osd_req->r_callback = rbd_osd_req_callback;
1892 osd_req->r_priv = obj_request;
1894 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1895 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1901 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1903 ceph_osdc_put_request(osd_req);
1906 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1908 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1909 u64 offset, u64 length,
1910 enum obj_request_type type)
1912 struct rbd_obj_request *obj_request;
1916 rbd_assert(obj_request_type_valid(type));
1918 size = strlen(object_name) + 1;
1919 name = kmalloc(size, GFP_KERNEL);
1923 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1929 obj_request->object_name = memcpy(name, object_name, size);
1930 obj_request->offset = offset;
1931 obj_request->length = length;
1932 obj_request->flags = 0;
1933 obj_request->which = BAD_WHICH;
1934 obj_request->type = type;
1935 INIT_LIST_HEAD(&obj_request->links);
1936 init_completion(&obj_request->completion);
1937 kref_init(&obj_request->kref);
1939 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1940 offset, length, (int)type, obj_request);
1945 static void rbd_obj_request_destroy(struct kref *kref)
1947 struct rbd_obj_request *obj_request;
1949 obj_request = container_of(kref, struct rbd_obj_request, kref);
1951 dout("%s: obj %p\n", __func__, obj_request);
1953 rbd_assert(obj_request->img_request == NULL);
1954 rbd_assert(obj_request->which == BAD_WHICH);
1956 if (obj_request->osd_req)
1957 rbd_osd_req_destroy(obj_request->osd_req);
1959 rbd_assert(obj_request_type_valid(obj_request->type));
1960 switch (obj_request->type) {
1961 case OBJ_REQUEST_NODATA:
1962 break; /* Nothing to do */
1963 case OBJ_REQUEST_BIO:
1964 if (obj_request->bio_list)
1965 bio_chain_put(obj_request->bio_list);
1967 case OBJ_REQUEST_PAGES:
1968 if (obj_request->pages)
1969 ceph_release_page_vector(obj_request->pages,
1970 obj_request->page_count);
1974 kfree(obj_request->object_name);
1975 obj_request->object_name = NULL;
1976 kmem_cache_free(rbd_obj_request_cache, obj_request);
1979 /* It's OK to call this for a device with no parent */
1981 static void rbd_spec_put(struct rbd_spec *spec);
1982 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1984 rbd_dev_remove_parent(rbd_dev);
1985 rbd_spec_put(rbd_dev->parent_spec);
1986 rbd_dev->parent_spec = NULL;
1987 rbd_dev->parent_overlap = 0;
1991 * Parent image reference counting is used to determine when an
1992 * image's parent fields can be safely torn down--after there are no
1993 * more in-flight requests to the parent image. When the last
1994 * reference is dropped, cleaning them up is safe.
1996 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2000 if (!rbd_dev->parent_spec)
2003 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2007 /* Last reference; clean up parent data structures */
2010 rbd_dev_unparent(rbd_dev);
2012 rbd_warn(rbd_dev, "parent reference underflow\n");
2016 * If an image has a non-zero parent overlap, get a reference to its
2019 * We must get the reference before checking for the overlap to
2020 * coordinate properly with zeroing the parent overlap in
2021 * rbd_dev_v2_parent_info() when an image gets flattened. We
2022 * drop it again if there is no overlap.
2024 * Returns true if the rbd device has a parent with a non-zero
2025 * overlap and a reference for it was successfully taken, or
2028 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2032 if (!rbd_dev->parent_spec)
2035 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2036 if (counter > 0 && rbd_dev->parent_overlap)
2039 /* Image was flattened, but parent is not yet torn down */
2042 rbd_warn(rbd_dev, "parent reference overflow\n");
2048 * Caller is responsible for filling in the list of object requests
2049 * that comprises the image request, and the Linux request pointer
2050 * (if there is one).
2052 static struct rbd_img_request *rbd_img_request_create(
2053 struct rbd_device *rbd_dev,
2054 u64 offset, u64 length,
2057 struct rbd_img_request *img_request;
2059 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2063 if (write_request) {
2064 down_read(&rbd_dev->header_rwsem);
2065 ceph_get_snap_context(rbd_dev->header.snapc);
2066 up_read(&rbd_dev->header_rwsem);
2069 img_request->rq = NULL;
2070 img_request->rbd_dev = rbd_dev;
2071 img_request->offset = offset;
2072 img_request->length = length;
2073 img_request->flags = 0;
2074 if (write_request) {
2075 img_request_write_set(img_request);
2076 img_request->snapc = rbd_dev->header.snapc;
2078 img_request->snap_id = rbd_dev->spec->snap_id;
2080 if (rbd_dev_parent_get(rbd_dev))
2081 img_request_layered_set(img_request);
2082 spin_lock_init(&img_request->completion_lock);
2083 img_request->next_completion = 0;
2084 img_request->callback = NULL;
2085 img_request->result = 0;
2086 img_request->obj_request_count = 0;
2087 INIT_LIST_HEAD(&img_request->obj_requests);
2088 kref_init(&img_request->kref);
2090 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2091 write_request ? "write" : "read", offset, length,
2097 static void rbd_img_request_destroy(struct kref *kref)
2099 struct rbd_img_request *img_request;
2100 struct rbd_obj_request *obj_request;
2101 struct rbd_obj_request *next_obj_request;
2103 img_request = container_of(kref, struct rbd_img_request, kref);
2105 dout("%s: img %p\n", __func__, img_request);
2107 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2108 rbd_img_obj_request_del(img_request, obj_request);
2109 rbd_assert(img_request->obj_request_count == 0);
2111 if (img_request_layered_test(img_request)) {
2112 img_request_layered_clear(img_request);
2113 rbd_dev_parent_put(img_request->rbd_dev);
2116 if (img_request_write_test(img_request))
2117 ceph_put_snap_context(img_request->snapc);
2119 kmem_cache_free(rbd_img_request_cache, img_request);
2122 static struct rbd_img_request *rbd_parent_request_create(
2123 struct rbd_obj_request *obj_request,
2124 u64 img_offset, u64 length)
2126 struct rbd_img_request *parent_request;
2127 struct rbd_device *rbd_dev;
2129 rbd_assert(obj_request->img_request);
2130 rbd_dev = obj_request->img_request->rbd_dev;
2132 parent_request = rbd_img_request_create(rbd_dev->parent,
2133 img_offset, length, false);
2134 if (!parent_request)
2137 img_request_child_set(parent_request);
2138 rbd_obj_request_get(obj_request);
2139 parent_request->obj_request = obj_request;
2141 return parent_request;
2144 static void rbd_parent_request_destroy(struct kref *kref)
2146 struct rbd_img_request *parent_request;
2147 struct rbd_obj_request *orig_request;
2149 parent_request = container_of(kref, struct rbd_img_request, kref);
2150 orig_request = parent_request->obj_request;
2152 parent_request->obj_request = NULL;
2153 rbd_obj_request_put(orig_request);
2154 img_request_child_clear(parent_request);
2156 rbd_img_request_destroy(kref);
2159 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2161 struct rbd_img_request *img_request;
2162 unsigned int xferred;
2166 rbd_assert(obj_request_img_data_test(obj_request));
2167 img_request = obj_request->img_request;
2169 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2170 xferred = (unsigned int)obj_request->xferred;
2171 result = obj_request->result;
2173 struct rbd_device *rbd_dev = img_request->rbd_dev;
2175 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2176 img_request_write_test(img_request) ? "write" : "read",
2177 obj_request->length, obj_request->img_offset,
2178 obj_request->offset);
2179 rbd_warn(rbd_dev, " result %d xferred %x\n",
2181 if (!img_request->result)
2182 img_request->result = result;
2185 /* Image object requests don't own their page array */
2187 if (obj_request->type == OBJ_REQUEST_PAGES) {
2188 obj_request->pages = NULL;
2189 obj_request->page_count = 0;
2192 if (img_request_child_test(img_request)) {
2193 rbd_assert(img_request->obj_request != NULL);
2194 more = obj_request->which < img_request->obj_request_count - 1;
2196 rbd_assert(img_request->rq != NULL);
2197 more = blk_end_request(img_request->rq, result, xferred);
2203 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2205 struct rbd_img_request *img_request;
2206 u32 which = obj_request->which;
2209 rbd_assert(obj_request_img_data_test(obj_request));
2210 img_request = obj_request->img_request;
2212 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2213 rbd_assert(img_request != NULL);
2214 rbd_assert(img_request->obj_request_count > 0);
2215 rbd_assert(which != BAD_WHICH);
2216 rbd_assert(which < img_request->obj_request_count);
2218 spin_lock_irq(&img_request->completion_lock);
2219 if (which != img_request->next_completion)
2222 for_each_obj_request_from(img_request, obj_request) {
2224 rbd_assert(which < img_request->obj_request_count);
2226 if (!obj_request_done_test(obj_request))
2228 more = rbd_img_obj_end_request(obj_request);
2232 rbd_assert(more ^ (which == img_request->obj_request_count));
2233 img_request->next_completion = which;
2235 spin_unlock_irq(&img_request->completion_lock);
2236 rbd_img_request_put(img_request);
2239 rbd_img_request_complete(img_request);
2243 * Split up an image request into one or more object requests, each
2244 * to a different object. The "type" parameter indicates whether
2245 * "data_desc" is the pointer to the head of a list of bio
2246 * structures, or the base of a page array. In either case this
2247 * function assumes data_desc describes memory sufficient to hold
2248 * all data described by the image request.
2250 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2251 enum obj_request_type type,
2254 struct rbd_device *rbd_dev = img_request->rbd_dev;
2255 struct rbd_obj_request *obj_request = NULL;
2256 struct rbd_obj_request *next_obj_request;
2257 bool write_request = img_request_write_test(img_request);
2258 struct bio *bio_list = NULL;
2259 unsigned int bio_offset = 0;
2260 struct page **pages = NULL;
2265 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2266 (int)type, data_desc);
2268 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2269 img_offset = img_request->offset;
2270 resid = img_request->length;
2271 rbd_assert(resid > 0);
2273 if (type == OBJ_REQUEST_BIO) {
2274 bio_list = data_desc;
2275 rbd_assert(img_offset ==
2276 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2278 rbd_assert(type == OBJ_REQUEST_PAGES);
2283 struct ceph_osd_request *osd_req;
2284 const char *object_name;
2287 unsigned int which = 0;
2289 object_name = rbd_segment_name(rbd_dev, img_offset);
2292 offset = rbd_segment_offset(rbd_dev, img_offset);
2293 length = rbd_segment_length(rbd_dev, img_offset, resid);
2294 obj_request = rbd_obj_request_create(object_name,
2295 offset, length, type);
2296 /* object request has its own copy of the object name */
2297 rbd_segment_name_free(object_name);
2302 * set obj_request->img_request before creating the
2303 * osd_request so that it gets the right snapc
2305 rbd_img_obj_request_add(img_request, obj_request);
2307 if (type == OBJ_REQUEST_BIO) {
2308 unsigned int clone_size;
2310 rbd_assert(length <= (u64)UINT_MAX);
2311 clone_size = (unsigned int)length;
2312 obj_request->bio_list =
2313 bio_chain_clone_range(&bio_list,
2317 if (!obj_request->bio_list)
2320 unsigned int page_count;
2322 obj_request->pages = pages;
2323 page_count = (u32)calc_pages_for(offset, length);
2324 obj_request->page_count = page_count;
2325 if ((offset + length) & ~PAGE_MASK)
2326 page_count--; /* more on last page */
2327 pages += page_count;
2330 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2331 (write_request ? 2 : 1),
2335 obj_request->osd_req = osd_req;
2336 obj_request->callback = rbd_img_obj_callback;
2337 rbd_img_request_get(img_request);
2339 if (write_request) {
2340 osd_req_op_alloc_hint_init(osd_req, which,
2341 rbd_obj_bytes(&rbd_dev->header),
2342 rbd_obj_bytes(&rbd_dev->header));
2346 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2348 if (type == OBJ_REQUEST_BIO)
2349 osd_req_op_extent_osd_data_bio(osd_req, which,
2350 obj_request->bio_list, length);
2352 osd_req_op_extent_osd_data_pages(osd_req, which,
2353 obj_request->pages, length,
2354 offset & ~PAGE_MASK, false, false);
2357 rbd_osd_req_format_write(obj_request);
2359 rbd_osd_req_format_read(obj_request);
2361 obj_request->img_offset = img_offset;
2363 img_offset += length;
2370 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2371 rbd_img_obj_request_del(img_request, obj_request);
2377 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2379 struct rbd_img_request *img_request;
2380 struct rbd_device *rbd_dev;
2381 struct page **pages;
2384 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2385 rbd_assert(obj_request_img_data_test(obj_request));
2386 img_request = obj_request->img_request;
2387 rbd_assert(img_request);
2389 rbd_dev = img_request->rbd_dev;
2390 rbd_assert(rbd_dev);
2392 pages = obj_request->copyup_pages;
2393 rbd_assert(pages != NULL);
2394 obj_request->copyup_pages = NULL;
2395 page_count = obj_request->copyup_page_count;
2396 rbd_assert(page_count);
2397 obj_request->copyup_page_count = 0;
2398 ceph_release_page_vector(pages, page_count);
2401 * We want the transfer count to reflect the size of the
2402 * original write request. There is no such thing as a
2403 * successful short write, so if the request was successful
2404 * we can just set it to the originally-requested length.
2406 if (!obj_request->result)
2407 obj_request->xferred = obj_request->length;
2409 /* Finish up with the normal image object callback */
2411 rbd_img_obj_callback(obj_request);
2415 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2417 struct rbd_obj_request *orig_request;
2418 struct ceph_osd_request *osd_req;
2419 struct ceph_osd_client *osdc;
2420 struct rbd_device *rbd_dev;
2421 struct page **pages;
2428 rbd_assert(img_request_child_test(img_request));
2430 /* First get what we need from the image request */
2432 pages = img_request->copyup_pages;
2433 rbd_assert(pages != NULL);
2434 img_request->copyup_pages = NULL;
2435 page_count = img_request->copyup_page_count;
2436 rbd_assert(page_count);
2437 img_request->copyup_page_count = 0;
2439 orig_request = img_request->obj_request;
2440 rbd_assert(orig_request != NULL);
2441 rbd_assert(obj_request_type_valid(orig_request->type));
2442 img_result = img_request->result;
2443 parent_length = img_request->length;
2444 rbd_assert(parent_length == img_request->xferred);
2445 rbd_img_request_put(img_request);
2447 rbd_assert(orig_request->img_request);
2448 rbd_dev = orig_request->img_request->rbd_dev;
2449 rbd_assert(rbd_dev);
2452 * If the overlap has become 0 (most likely because the
2453 * image has been flattened) we need to free the pages
2454 * and re-submit the original write request.
2456 if (!rbd_dev->parent_overlap) {
2457 struct ceph_osd_client *osdc;
2459 ceph_release_page_vector(pages, page_count);
2460 osdc = &rbd_dev->rbd_client->client->osdc;
2461 img_result = rbd_obj_request_submit(osdc, orig_request);
2470 * The original osd request is of no use to use any more.
2471 * We need a new one that can hold the three ops in a copyup
2472 * request. Allocate the new copyup osd request for the
2473 * original request, and release the old one.
2475 img_result = -ENOMEM;
2476 osd_req = rbd_osd_req_create_copyup(orig_request);
2479 rbd_osd_req_destroy(orig_request->osd_req);
2480 orig_request->osd_req = osd_req;
2481 orig_request->copyup_pages = pages;
2482 orig_request->copyup_page_count = page_count;
2484 /* Initialize the copyup op */
2486 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2487 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2490 /* Then the hint op */
2492 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2493 rbd_obj_bytes(&rbd_dev->header));
2495 /* And the original write request op */
2497 offset = orig_request->offset;
2498 length = orig_request->length;
2499 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2500 offset, length, 0, 0);
2501 if (orig_request->type == OBJ_REQUEST_BIO)
2502 osd_req_op_extent_osd_data_bio(osd_req, 2,
2503 orig_request->bio_list, length);
2505 osd_req_op_extent_osd_data_pages(osd_req, 2,
2506 orig_request->pages, length,
2507 offset & ~PAGE_MASK, false, false);
2509 rbd_osd_req_format_write(orig_request);
2511 /* All set, send it off. */
2513 orig_request->callback = rbd_img_obj_copyup_callback;
2514 osdc = &rbd_dev->rbd_client->client->osdc;
2515 img_result = rbd_obj_request_submit(osdc, orig_request);
2519 /* Record the error code and complete the request */
2521 orig_request->result = img_result;
2522 orig_request->xferred = 0;
2523 obj_request_done_set(orig_request);
2524 rbd_obj_request_complete(orig_request);
2528 * Read from the parent image the range of data that covers the
2529 * entire target of the given object request. This is used for
2530 * satisfying a layered image write request when the target of an
2531 * object request from the image request does not exist.
2533 * A page array big enough to hold the returned data is allocated
2534 * and supplied to rbd_img_request_fill() as the "data descriptor."
2535 * When the read completes, this page array will be transferred to
2536 * the original object request for the copyup operation.
2538 * If an error occurs, record it as the result of the original
2539 * object request and mark it done so it gets completed.
2541 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2543 struct rbd_img_request *img_request = NULL;
2544 struct rbd_img_request *parent_request = NULL;
2545 struct rbd_device *rbd_dev;
2548 struct page **pages = NULL;
2552 rbd_assert(obj_request_img_data_test(obj_request));
2553 rbd_assert(obj_request_type_valid(obj_request->type));
2555 img_request = obj_request->img_request;
2556 rbd_assert(img_request != NULL);
2557 rbd_dev = img_request->rbd_dev;
2558 rbd_assert(rbd_dev->parent != NULL);
2561 * Determine the byte range covered by the object in the
2562 * child image to which the original request was to be sent.
2564 img_offset = obj_request->img_offset - obj_request->offset;
2565 length = (u64)1 << rbd_dev->header.obj_order;
2568 * There is no defined parent data beyond the parent
2569 * overlap, so limit what we read at that boundary if
2572 if (img_offset + length > rbd_dev->parent_overlap) {
2573 rbd_assert(img_offset < rbd_dev->parent_overlap);
2574 length = rbd_dev->parent_overlap - img_offset;
2578 * Allocate a page array big enough to receive the data read
2581 page_count = (u32)calc_pages_for(0, length);
2582 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2583 if (IS_ERR(pages)) {
2584 result = PTR_ERR(pages);
2590 parent_request = rbd_parent_request_create(obj_request,
2591 img_offset, length);
2592 if (!parent_request)
2595 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2598 parent_request->copyup_pages = pages;
2599 parent_request->copyup_page_count = page_count;
2601 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2602 result = rbd_img_request_submit(parent_request);
2606 parent_request->copyup_pages = NULL;
2607 parent_request->copyup_page_count = 0;
2608 parent_request->obj_request = NULL;
2609 rbd_obj_request_put(obj_request);
2612 ceph_release_page_vector(pages, page_count);
2614 rbd_img_request_put(parent_request);
2615 obj_request->result = result;
2616 obj_request->xferred = 0;
2617 obj_request_done_set(obj_request);
2622 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2624 struct rbd_obj_request *orig_request;
2625 struct rbd_device *rbd_dev;
2628 rbd_assert(!obj_request_img_data_test(obj_request));
2631 * All we need from the object request is the original
2632 * request and the result of the STAT op. Grab those, then
2633 * we're done with the request.
2635 orig_request = obj_request->obj_request;
2636 obj_request->obj_request = NULL;
2637 rbd_obj_request_put(orig_request);
2638 rbd_assert(orig_request);
2639 rbd_assert(orig_request->img_request);
2641 result = obj_request->result;
2642 obj_request->result = 0;
2644 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2645 obj_request, orig_request, result,
2646 obj_request->xferred, obj_request->length);
2647 rbd_obj_request_put(obj_request);
2650 * If the overlap has become 0 (most likely because the
2651 * image has been flattened) we need to free the pages
2652 * and re-submit the original write request.
2654 rbd_dev = orig_request->img_request->rbd_dev;
2655 if (!rbd_dev->parent_overlap) {
2656 struct ceph_osd_client *osdc;
2658 osdc = &rbd_dev->rbd_client->client->osdc;
2659 result = rbd_obj_request_submit(osdc, orig_request);
2665 * Our only purpose here is to determine whether the object
2666 * exists, and we don't want to treat the non-existence as
2667 * an error. If something else comes back, transfer the
2668 * error to the original request and complete it now.
2671 obj_request_existence_set(orig_request, true);
2672 } else if (result == -ENOENT) {
2673 obj_request_existence_set(orig_request, false);
2674 } else if (result) {
2675 orig_request->result = result;
2680 * Resubmit the original request now that we have recorded
2681 * whether the target object exists.
2683 orig_request->result = rbd_img_obj_request_submit(orig_request);
2685 if (orig_request->result)
2686 rbd_obj_request_complete(orig_request);
2689 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2691 struct rbd_obj_request *stat_request;
2692 struct rbd_device *rbd_dev;
2693 struct ceph_osd_client *osdc;
2694 struct page **pages = NULL;
2700 * The response data for a STAT call consists of:
2707 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2708 page_count = (u32)calc_pages_for(0, size);
2709 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2711 return PTR_ERR(pages);
2714 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2719 rbd_obj_request_get(obj_request);
2720 stat_request->obj_request = obj_request;
2721 stat_request->pages = pages;
2722 stat_request->page_count = page_count;
2724 rbd_assert(obj_request->img_request);
2725 rbd_dev = obj_request->img_request->rbd_dev;
2726 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2728 if (!stat_request->osd_req)
2730 stat_request->callback = rbd_img_obj_exists_callback;
2732 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2733 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2735 rbd_osd_req_format_read(stat_request);
2737 osdc = &rbd_dev->rbd_client->client->osdc;
2738 ret = rbd_obj_request_submit(osdc, stat_request);
2741 rbd_obj_request_put(obj_request);
2746 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2748 struct rbd_img_request *img_request;
2749 struct rbd_device *rbd_dev;
2752 rbd_assert(obj_request_img_data_test(obj_request));
2754 img_request = obj_request->img_request;
2755 rbd_assert(img_request);
2756 rbd_dev = img_request->rbd_dev;
2759 * Only writes to layered images need special handling.
2760 * Reads and non-layered writes are simple object requests.
2761 * Layered writes that start beyond the end of the overlap
2762 * with the parent have no parent data, so they too are
2763 * simple object requests. Finally, if the target object is
2764 * known to already exist, its parent data has already been
2765 * copied, so a write to the object can also be handled as a
2766 * simple object request.
2768 if (!img_request_write_test(img_request) ||
2769 !img_request_layered_test(img_request) ||
2770 !obj_request_overlaps_parent(obj_request) ||
2771 ((known = obj_request_known_test(obj_request)) &&
2772 obj_request_exists_test(obj_request))) {
2774 struct rbd_device *rbd_dev;
2775 struct ceph_osd_client *osdc;
2777 rbd_dev = obj_request->img_request->rbd_dev;
2778 osdc = &rbd_dev->rbd_client->client->osdc;
2780 return rbd_obj_request_submit(osdc, obj_request);
2784 * It's a layered write. The target object might exist but
2785 * we may not know that yet. If we know it doesn't exist,
2786 * start by reading the data for the full target object from
2787 * the parent so we can use it for a copyup to the target.
2790 return rbd_img_obj_parent_read_full(obj_request);
2792 /* We don't know whether the target exists. Go find out. */
2794 return rbd_img_obj_exists_submit(obj_request);
2797 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2799 struct rbd_obj_request *obj_request;
2800 struct rbd_obj_request *next_obj_request;
2802 dout("%s: img %p\n", __func__, img_request);
2803 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2806 ret = rbd_img_obj_request_submit(obj_request);
2814 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2816 struct rbd_obj_request *obj_request;
2817 struct rbd_device *rbd_dev;
2822 rbd_assert(img_request_child_test(img_request));
2824 /* First get what we need from the image request and release it */
2826 obj_request = img_request->obj_request;
2827 img_xferred = img_request->xferred;
2828 img_result = img_request->result;
2829 rbd_img_request_put(img_request);
2832 * If the overlap has become 0 (most likely because the
2833 * image has been flattened) we need to re-submit the
2836 rbd_assert(obj_request);
2837 rbd_assert(obj_request->img_request);
2838 rbd_dev = obj_request->img_request->rbd_dev;
2839 if (!rbd_dev->parent_overlap) {
2840 struct ceph_osd_client *osdc;
2842 osdc = &rbd_dev->rbd_client->client->osdc;
2843 img_result = rbd_obj_request_submit(osdc, obj_request);
2848 obj_request->result = img_result;
2849 if (obj_request->result)
2853 * We need to zero anything beyond the parent overlap
2854 * boundary. Since rbd_img_obj_request_read_callback()
2855 * will zero anything beyond the end of a short read, an
2856 * easy way to do this is to pretend the data from the
2857 * parent came up short--ending at the overlap boundary.
2859 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2860 obj_end = obj_request->img_offset + obj_request->length;
2861 if (obj_end > rbd_dev->parent_overlap) {
2864 if (obj_request->img_offset < rbd_dev->parent_overlap)
2865 xferred = rbd_dev->parent_overlap -
2866 obj_request->img_offset;
2868 obj_request->xferred = min(img_xferred, xferred);
2870 obj_request->xferred = img_xferred;
2873 rbd_img_obj_request_read_callback(obj_request);
2874 rbd_obj_request_complete(obj_request);
2877 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2879 struct rbd_img_request *img_request;
2882 rbd_assert(obj_request_img_data_test(obj_request));
2883 rbd_assert(obj_request->img_request != NULL);
2884 rbd_assert(obj_request->result == (s32) -ENOENT);
2885 rbd_assert(obj_request_type_valid(obj_request->type));
2887 /* rbd_read_finish(obj_request, obj_request->length); */
2888 img_request = rbd_parent_request_create(obj_request,
2889 obj_request->img_offset,
2890 obj_request->length);
2895 if (obj_request->type == OBJ_REQUEST_BIO)
2896 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2897 obj_request->bio_list);
2899 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2900 obj_request->pages);
2904 img_request->callback = rbd_img_parent_read_callback;
2905 result = rbd_img_request_submit(img_request);
2912 rbd_img_request_put(img_request);
2913 obj_request->result = result;
2914 obj_request->xferred = 0;
2915 obj_request_done_set(obj_request);
2918 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2920 struct rbd_obj_request *obj_request;
2921 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2924 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2925 OBJ_REQUEST_NODATA);
2930 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2932 if (!obj_request->osd_req)
2935 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2937 rbd_osd_req_format_read(obj_request);
2939 ret = rbd_obj_request_submit(osdc, obj_request);
2942 ret = rbd_obj_request_wait(obj_request);
2944 rbd_obj_request_put(obj_request);
2949 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2951 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2957 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2958 rbd_dev->header_name, (unsigned long long)notify_id,
2959 (unsigned int)opcode);
2962 * Until adequate refresh error handling is in place, there is
2963 * not much we can do here, except warn.
2965 * See http://tracker.ceph.com/issues/5040
2967 ret = rbd_dev_refresh(rbd_dev);
2969 rbd_warn(rbd_dev, "refresh failed: %d\n", ret);
2971 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2973 rbd_warn(rbd_dev, "notify_ack ret %d\n", ret);
2977 * Send a (un)watch request and wait for the ack. Return a request
2978 * with a ref held on success or error.
2980 static struct rbd_obj_request *rbd_obj_watch_request_helper(
2981 struct rbd_device *rbd_dev,
2984 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2985 struct rbd_obj_request *obj_request;
2988 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2989 OBJ_REQUEST_NODATA);
2991 return ERR_PTR(-ENOMEM);
2993 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2995 if (!obj_request->osd_req) {
3000 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3001 rbd_dev->watch_event->cookie, 0, watch);
3002 rbd_osd_req_format_write(obj_request);
3005 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3007 ret = rbd_obj_request_submit(osdc, obj_request);
3011 ret = rbd_obj_request_wait(obj_request);
3015 ret = obj_request->result;
3018 rbd_obj_request_end(obj_request);
3025 rbd_obj_request_put(obj_request);
3026 return ERR_PTR(ret);
3030 * Initiate a watch request, synchronously.
3032 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3034 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3035 struct rbd_obj_request *obj_request;
3038 rbd_assert(!rbd_dev->watch_event);
3039 rbd_assert(!rbd_dev->watch_request);
3041 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3042 &rbd_dev->watch_event);
3046 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3047 if (IS_ERR(obj_request)) {
3048 ceph_osdc_cancel_event(rbd_dev->watch_event);
3049 rbd_dev->watch_event = NULL;
3050 return PTR_ERR(obj_request);
3054 * A watch request is set to linger, so the underlying osd
3055 * request won't go away until we unregister it. We retain
3056 * a pointer to the object request during that time (in
3057 * rbd_dev->watch_request), so we'll keep a reference to it.
3058 * We'll drop that reference after we've unregistered it in
3059 * rbd_dev_header_unwatch_sync().
3061 rbd_dev->watch_request = obj_request;
3067 * Tear down a watch request, synchronously.
3069 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3071 struct rbd_obj_request *obj_request;
3073 rbd_assert(rbd_dev->watch_event);
3074 rbd_assert(rbd_dev->watch_request);
3076 rbd_obj_request_end(rbd_dev->watch_request);
3077 rbd_obj_request_put(rbd_dev->watch_request);
3078 rbd_dev->watch_request = NULL;
3080 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3081 if (!IS_ERR(obj_request))
3082 rbd_obj_request_put(obj_request);
3084 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3085 PTR_ERR(obj_request));
3087 ceph_osdc_cancel_event(rbd_dev->watch_event);
3088 rbd_dev->watch_event = NULL;
3092 * Synchronous osd object method call. Returns the number of bytes
3093 * returned in the outbound buffer, or a negative error code.
3095 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3096 const char *object_name,
3097 const char *class_name,
3098 const char *method_name,
3099 const void *outbound,
3100 size_t outbound_size,
3102 size_t inbound_size)
3104 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3105 struct rbd_obj_request *obj_request;
3106 struct page **pages;
3111 * Method calls are ultimately read operations. The result
3112 * should placed into the inbound buffer provided. They
3113 * also supply outbound data--parameters for the object
3114 * method. Currently if this is present it will be a
3117 page_count = (u32)calc_pages_for(0, inbound_size);
3118 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3120 return PTR_ERR(pages);
3123 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3128 obj_request->pages = pages;
3129 obj_request->page_count = page_count;
3131 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3133 if (!obj_request->osd_req)
3136 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3137 class_name, method_name);
3138 if (outbound_size) {
3139 struct ceph_pagelist *pagelist;
3141 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3145 ceph_pagelist_init(pagelist);
3146 ceph_pagelist_append(pagelist, outbound, outbound_size);
3147 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3150 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3151 obj_request->pages, inbound_size,
3153 rbd_osd_req_format_read(obj_request);
3155 ret = rbd_obj_request_submit(osdc, obj_request);
3158 ret = rbd_obj_request_wait(obj_request);
3162 ret = obj_request->result;
3166 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3167 ret = (int)obj_request->xferred;
3168 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3171 rbd_obj_request_put(obj_request);
3173 ceph_release_page_vector(pages, page_count);
3178 static void rbd_request_fn(struct request_queue *q)
3179 __releases(q->queue_lock) __acquires(q->queue_lock)
3181 struct rbd_device *rbd_dev = q->queuedata;
3185 while ((rq = blk_fetch_request(q))) {
3186 bool write_request = rq_data_dir(rq) == WRITE;
3187 struct rbd_img_request *img_request;
3191 /* Ignore any non-FS requests that filter through. */
3193 if (rq->cmd_type != REQ_TYPE_FS) {
3194 dout("%s: non-fs request type %d\n", __func__,
3195 (int) rq->cmd_type);
3196 __blk_end_request_all(rq, 0);
3200 /* Ignore/skip any zero-length requests */
3202 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3203 length = (u64) blk_rq_bytes(rq);
3206 dout("%s: zero-length request\n", __func__);
3207 __blk_end_request_all(rq, 0);
3211 spin_unlock_irq(q->queue_lock);
3213 /* Disallow writes to a read-only device */
3215 if (write_request) {
3217 if (rbd_dev->mapping.read_only)
3219 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3223 * Quit early if the mapped snapshot no longer
3224 * exists. It's still possible the snapshot will
3225 * have disappeared by the time our request arrives
3226 * at the osd, but there's no sense in sending it if
3229 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3230 dout("request for non-existent snapshot");
3231 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3237 if (offset && length > U64_MAX - offset + 1) {
3238 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3240 goto end_request; /* Shouldn't happen */
3244 if (offset + length > rbd_dev->mapping.size) {
3245 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3246 offset, length, rbd_dev->mapping.size);
3251 img_request = rbd_img_request_create(rbd_dev, offset, length,
3256 img_request->rq = rq;
3258 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3261 result = rbd_img_request_submit(img_request);
3263 rbd_img_request_put(img_request);
3265 spin_lock_irq(q->queue_lock);
3267 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3268 write_request ? "write" : "read",
3269 length, offset, result);
3271 __blk_end_request_all(rq, result);
3277 * a queue callback. Makes sure that we don't create a bio that spans across
3278 * multiple osd objects. One exception would be with a single page bios,
3279 * which we handle later at bio_chain_clone_range()
3281 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3282 struct bio_vec *bvec)
3284 struct rbd_device *rbd_dev = q->queuedata;
3285 sector_t sector_offset;
3286 sector_t sectors_per_obj;
3287 sector_t obj_sector_offset;
3291 * Find how far into its rbd object the partition-relative
3292 * bio start sector is to offset relative to the enclosing
3295 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3296 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3297 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3300 * Compute the number of bytes from that offset to the end
3301 * of the object. Account for what's already used by the bio.
3303 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3304 if (ret > bmd->bi_size)
3305 ret -= bmd->bi_size;
3310 * Don't send back more than was asked for. And if the bio
3311 * was empty, let the whole thing through because: "Note
3312 * that a block device *must* allow a single page to be
3313 * added to an empty bio."
3315 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3316 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3317 ret = (int) bvec->bv_len;
3322 static void rbd_free_disk(struct rbd_device *rbd_dev)
3324 struct gendisk *disk = rbd_dev->disk;
3329 rbd_dev->disk = NULL;
3330 if (disk->flags & GENHD_FL_UP) {
3333 blk_cleanup_queue(disk->queue);
3338 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3339 const char *object_name,
3340 u64 offset, u64 length, void *buf)
3343 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3344 struct rbd_obj_request *obj_request;
3345 struct page **pages = NULL;
3350 page_count = (u32) calc_pages_for(offset, length);
3351 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3353 ret = PTR_ERR(pages);
3356 obj_request = rbd_obj_request_create(object_name, offset, length,
3361 obj_request->pages = pages;
3362 obj_request->page_count = page_count;
3364 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3366 if (!obj_request->osd_req)
3369 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3370 offset, length, 0, 0);
3371 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3373 obj_request->length,
3374 obj_request->offset & ~PAGE_MASK,
3376 rbd_osd_req_format_read(obj_request);
3378 ret = rbd_obj_request_submit(osdc, obj_request);
3381 ret = rbd_obj_request_wait(obj_request);
3385 ret = obj_request->result;
3389 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3390 size = (size_t) obj_request->xferred;
3391 ceph_copy_from_page_vector(pages, buf, 0, size);
3392 rbd_assert(size <= (size_t)INT_MAX);
3396 rbd_obj_request_put(obj_request);
3398 ceph_release_page_vector(pages, page_count);
3404 * Read the complete header for the given rbd device. On successful
3405 * return, the rbd_dev->header field will contain up-to-date
3406 * information about the image.
3408 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3410 struct rbd_image_header_ondisk *ondisk = NULL;
3417 * The complete header will include an array of its 64-bit
3418 * snapshot ids, followed by the names of those snapshots as
3419 * a contiguous block of NUL-terminated strings. Note that
3420 * the number of snapshots could change by the time we read
3421 * it in, in which case we re-read it.
3428 size = sizeof (*ondisk);
3429 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3431 ondisk = kmalloc(size, GFP_KERNEL);
3435 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3439 if ((size_t)ret < size) {
3441 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3445 if (!rbd_dev_ondisk_valid(ondisk)) {
3447 rbd_warn(rbd_dev, "invalid header");
3451 names_size = le64_to_cpu(ondisk->snap_names_len);
3452 want_count = snap_count;
3453 snap_count = le32_to_cpu(ondisk->snap_count);
3454 } while (snap_count != want_count);
3456 ret = rbd_header_from_disk(rbd_dev, ondisk);
3464 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3465 * has disappeared from the (just updated) snapshot context.
3467 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3471 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3474 snap_id = rbd_dev->spec->snap_id;
3475 if (snap_id == CEPH_NOSNAP)
3478 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3479 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3482 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3488 * Don't hold the lock while doing disk operations,
3489 * or lock ordering will conflict with the bdev mutex via:
3490 * rbd_add() -> blkdev_get() -> rbd_open()
3492 spin_lock_irq(&rbd_dev->lock);
3493 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3494 spin_unlock_irq(&rbd_dev->lock);
3496 * If the device is being removed, rbd_dev->disk has
3497 * been destroyed, so don't try to update its size
3500 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3501 dout("setting size to %llu sectors", (unsigned long long)size);
3502 set_capacity(rbd_dev->disk, size);
3503 revalidate_disk(rbd_dev->disk);
3507 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3512 down_write(&rbd_dev->header_rwsem);
3513 mapping_size = rbd_dev->mapping.size;
3515 ret = rbd_dev_header_info(rbd_dev);
3519 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3520 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
3521 rbd_dev->mapping.size = rbd_dev->header.image_size;
3523 /* validate mapped snapshot's EXISTS flag */
3524 rbd_exists_validate(rbd_dev);
3527 up_write(&rbd_dev->header_rwsem);
3529 if (mapping_size != rbd_dev->mapping.size) {
3530 rbd_dev_update_size(rbd_dev);
3536 static int rbd_init_disk(struct rbd_device *rbd_dev)
3538 struct gendisk *disk;
3539 struct request_queue *q;
3542 /* create gendisk info */
3543 disk = alloc_disk(single_major ?
3544 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3545 RBD_MINORS_PER_MAJOR);
3549 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3551 disk->major = rbd_dev->major;
3552 disk->first_minor = rbd_dev->minor;
3554 disk->flags |= GENHD_FL_EXT_DEVT;
3555 disk->fops = &rbd_bd_ops;
3556 disk->private_data = rbd_dev;
3558 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3562 /* We use the default size, but let's be explicit about it. */
3563 blk_queue_physical_block_size(q, SECTOR_SIZE);
3565 /* set io sizes to object size */
3566 segment_size = rbd_obj_bytes(&rbd_dev->header);
3567 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3568 blk_queue_max_segment_size(q, segment_size);
3569 blk_queue_io_min(q, segment_size);
3570 blk_queue_io_opt(q, segment_size);
3572 blk_queue_merge_bvec(q, rbd_merge_bvec);
3575 q->queuedata = rbd_dev;
3577 rbd_dev->disk = disk;
3590 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3592 return container_of(dev, struct rbd_device, dev);
3595 static ssize_t rbd_size_show(struct device *dev,
3596 struct device_attribute *attr, char *buf)
3598 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3600 return sprintf(buf, "%llu\n",
3601 (unsigned long long)rbd_dev->mapping.size);
3605 * Note this shows the features for whatever's mapped, which is not
3606 * necessarily the base image.
3608 static ssize_t rbd_features_show(struct device *dev,
3609 struct device_attribute *attr, char *buf)
3611 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3613 return sprintf(buf, "0x%016llx\n",
3614 (unsigned long long)rbd_dev->mapping.features);
3617 static ssize_t rbd_major_show(struct device *dev,
3618 struct device_attribute *attr, char *buf)
3620 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3623 return sprintf(buf, "%d\n", rbd_dev->major);
3625 return sprintf(buf, "(none)\n");
3628 static ssize_t rbd_minor_show(struct device *dev,
3629 struct device_attribute *attr, char *buf)
3631 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3633 return sprintf(buf, "%d\n", rbd_dev->minor);
3636 static ssize_t rbd_client_id_show(struct device *dev,
3637 struct device_attribute *attr, char *buf)
3639 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3641 return sprintf(buf, "client%lld\n",
3642 ceph_client_id(rbd_dev->rbd_client->client));
3645 static ssize_t rbd_pool_show(struct device *dev,
3646 struct device_attribute *attr, char *buf)
3648 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3650 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3653 static ssize_t rbd_pool_id_show(struct device *dev,
3654 struct device_attribute *attr, char *buf)
3656 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3658 return sprintf(buf, "%llu\n",
3659 (unsigned long long) rbd_dev->spec->pool_id);
3662 static ssize_t rbd_name_show(struct device *dev,
3663 struct device_attribute *attr, char *buf)
3665 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3667 if (rbd_dev->spec->image_name)
3668 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3670 return sprintf(buf, "(unknown)\n");
3673 static ssize_t rbd_image_id_show(struct device *dev,
3674 struct device_attribute *attr, char *buf)
3676 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3678 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3682 * Shows the name of the currently-mapped snapshot (or
3683 * RBD_SNAP_HEAD_NAME for the base image).
3685 static ssize_t rbd_snap_show(struct device *dev,
3686 struct device_attribute *attr,
3689 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3691 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3695 * For a v2 image, shows the chain of parent images, separated by empty
3696 * lines. For v1 images or if there is no parent, shows "(no parent
3699 static ssize_t rbd_parent_show(struct device *dev,
3700 struct device_attribute *attr,
3703 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3706 if (!rbd_dev->parent)
3707 return sprintf(buf, "(no parent image)\n");
3709 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3710 struct rbd_spec *spec = rbd_dev->parent_spec;
3712 count += sprintf(&buf[count], "%s"
3713 "pool_id %llu\npool_name %s\n"
3714 "image_id %s\nimage_name %s\n"
3715 "snap_id %llu\nsnap_name %s\n"
3717 !count ? "" : "\n", /* first? */
3718 spec->pool_id, spec->pool_name,
3719 spec->image_id, spec->image_name ?: "(unknown)",
3720 spec->snap_id, spec->snap_name,
3721 rbd_dev->parent_overlap);
3727 static ssize_t rbd_image_refresh(struct device *dev,
3728 struct device_attribute *attr,
3732 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3735 ret = rbd_dev_refresh(rbd_dev);
3742 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3743 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3744 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3745 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3746 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3747 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3748 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3749 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3750 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3751 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3752 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3753 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3755 static struct attribute *rbd_attrs[] = {
3756 &dev_attr_size.attr,
3757 &dev_attr_features.attr,
3758 &dev_attr_major.attr,
3759 &dev_attr_minor.attr,
3760 &dev_attr_client_id.attr,
3761 &dev_attr_pool.attr,
3762 &dev_attr_pool_id.attr,
3763 &dev_attr_name.attr,
3764 &dev_attr_image_id.attr,
3765 &dev_attr_current_snap.attr,
3766 &dev_attr_parent.attr,
3767 &dev_attr_refresh.attr,
3771 static struct attribute_group rbd_attr_group = {
3775 static const struct attribute_group *rbd_attr_groups[] = {
3780 static void rbd_sysfs_dev_release(struct device *dev)
3784 static struct device_type rbd_device_type = {
3786 .groups = rbd_attr_groups,
3787 .release = rbd_sysfs_dev_release,
3790 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3792 kref_get(&spec->kref);
3797 static void rbd_spec_free(struct kref *kref);
3798 static void rbd_spec_put(struct rbd_spec *spec)
3801 kref_put(&spec->kref, rbd_spec_free);
3804 static struct rbd_spec *rbd_spec_alloc(void)
3806 struct rbd_spec *spec;
3808 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3812 spec->pool_id = CEPH_NOPOOL;
3813 spec->snap_id = CEPH_NOSNAP;
3814 kref_init(&spec->kref);
3819 static void rbd_spec_free(struct kref *kref)
3821 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3823 kfree(spec->pool_name);
3824 kfree(spec->image_id);
3825 kfree(spec->image_name);
3826 kfree(spec->snap_name);
3830 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3831 struct rbd_spec *spec)
3833 struct rbd_device *rbd_dev;
3835 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3839 spin_lock_init(&rbd_dev->lock);
3841 atomic_set(&rbd_dev->parent_ref, 0);
3842 INIT_LIST_HEAD(&rbd_dev->node);
3843 init_rwsem(&rbd_dev->header_rwsem);
3845 rbd_dev->spec = spec;
3846 rbd_dev->rbd_client = rbdc;
3848 /* Initialize the layout used for all rbd requests */
3850 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3851 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3852 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3853 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3858 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3860 rbd_put_client(rbd_dev->rbd_client);
3861 rbd_spec_put(rbd_dev->spec);
3866 * Get the size and object order for an image snapshot, or if
3867 * snap_id is CEPH_NOSNAP, gets this information for the base
3870 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3871 u8 *order, u64 *snap_size)
3873 __le64 snapid = cpu_to_le64(snap_id);
3878 } __attribute__ ((packed)) size_buf = { 0 };
3880 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3882 &snapid, sizeof (snapid),
3883 &size_buf, sizeof (size_buf));
3884 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3887 if (ret < sizeof (size_buf))
3891 *order = size_buf.order;
3892 dout(" order %u", (unsigned int)*order);
3894 *snap_size = le64_to_cpu(size_buf.size);
3896 dout(" snap_id 0x%016llx snap_size = %llu\n",
3897 (unsigned long long)snap_id,
3898 (unsigned long long)*snap_size);
3903 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3905 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3906 &rbd_dev->header.obj_order,
3907 &rbd_dev->header.image_size);
3910 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3916 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3920 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3921 "rbd", "get_object_prefix", NULL, 0,
3922 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3923 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3928 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3929 p + ret, NULL, GFP_NOIO);
3932 if (IS_ERR(rbd_dev->header.object_prefix)) {
3933 ret = PTR_ERR(rbd_dev->header.object_prefix);
3934 rbd_dev->header.object_prefix = NULL;
3936 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3944 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3947 __le64 snapid = cpu_to_le64(snap_id);
3951 } __attribute__ ((packed)) features_buf = { 0 };
3955 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3956 "rbd", "get_features",
3957 &snapid, sizeof (snapid),
3958 &features_buf, sizeof (features_buf));
3959 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3962 if (ret < sizeof (features_buf))
3965 incompat = le64_to_cpu(features_buf.incompat);
3966 if (incompat & ~RBD_FEATURES_SUPPORTED)
3969 *snap_features = le64_to_cpu(features_buf.features);
3971 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3972 (unsigned long long)snap_id,
3973 (unsigned long long)*snap_features,
3974 (unsigned long long)le64_to_cpu(features_buf.incompat));
3979 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3981 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3982 &rbd_dev->header.features);
3985 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3987 struct rbd_spec *parent_spec;
3989 void *reply_buf = NULL;
3999 parent_spec = rbd_spec_alloc();
4003 size = sizeof (__le64) + /* pool_id */
4004 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4005 sizeof (__le64) + /* snap_id */
4006 sizeof (__le64); /* overlap */
4007 reply_buf = kmalloc(size, GFP_KERNEL);
4013 snapid = cpu_to_le64(CEPH_NOSNAP);
4014 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4015 "rbd", "get_parent",
4016 &snapid, sizeof (snapid),
4018 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4023 end = reply_buf + ret;
4025 ceph_decode_64_safe(&p, end, pool_id, out_err);
4026 if (pool_id == CEPH_NOPOOL) {
4028 * Either the parent never existed, or we have
4029 * record of it but the image got flattened so it no
4030 * longer has a parent. When the parent of a
4031 * layered image disappears we immediately set the
4032 * overlap to 0. The effect of this is that all new
4033 * requests will be treated as if the image had no
4036 if (rbd_dev->parent_overlap) {
4037 rbd_dev->parent_overlap = 0;
4039 rbd_dev_parent_put(rbd_dev);
4040 pr_info("%s: clone image has been flattened\n",
4041 rbd_dev->disk->disk_name);
4044 goto out; /* No parent? No problem. */
4047 /* The ceph file layout needs to fit pool id in 32 bits */
4050 if (pool_id > (u64)U32_MAX) {
4051 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
4052 (unsigned long long)pool_id, U32_MAX);
4056 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4057 if (IS_ERR(image_id)) {
4058 ret = PTR_ERR(image_id);
4061 ceph_decode_64_safe(&p, end, snap_id, out_err);
4062 ceph_decode_64_safe(&p, end, overlap, out_err);
4065 * The parent won't change (except when the clone is
4066 * flattened, already handled that). So we only need to
4067 * record the parent spec we have not already done so.
4069 if (!rbd_dev->parent_spec) {
4070 parent_spec->pool_id = pool_id;
4071 parent_spec->image_id = image_id;
4072 parent_spec->snap_id = snap_id;
4073 rbd_dev->parent_spec = parent_spec;
4074 parent_spec = NULL; /* rbd_dev now owns this */
4080 * We always update the parent overlap. If it's zero we
4081 * treat it specially.
4083 rbd_dev->parent_overlap = overlap;
4087 /* A null parent_spec indicates it's the initial probe */
4091 * The overlap has become zero, so the clone
4092 * must have been resized down to 0 at some
4093 * point. Treat this the same as a flatten.
4095 rbd_dev_parent_put(rbd_dev);
4096 pr_info("%s: clone image now standalone\n",
4097 rbd_dev->disk->disk_name);
4100 * For the initial probe, if we find the
4101 * overlap is zero we just pretend there was
4104 rbd_warn(rbd_dev, "ignoring parent of "
4105 "clone with overlap 0\n");
4112 rbd_spec_put(parent_spec);
4117 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4121 __le64 stripe_count;
4122 } __attribute__ ((packed)) striping_info_buf = { 0 };
4123 size_t size = sizeof (striping_info_buf);
4130 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4131 "rbd", "get_stripe_unit_count", NULL, 0,
4132 (char *)&striping_info_buf, size);
4133 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4140 * We don't actually support the "fancy striping" feature
4141 * (STRIPINGV2) yet, but if the striping sizes are the
4142 * defaults the behavior is the same as before. So find
4143 * out, and only fail if the image has non-default values.
4146 obj_size = (u64)1 << rbd_dev->header.obj_order;
4147 p = &striping_info_buf;
4148 stripe_unit = ceph_decode_64(&p);
4149 if (stripe_unit != obj_size) {
4150 rbd_warn(rbd_dev, "unsupported stripe unit "
4151 "(got %llu want %llu)",
4152 stripe_unit, obj_size);
4155 stripe_count = ceph_decode_64(&p);
4156 if (stripe_count != 1) {
4157 rbd_warn(rbd_dev, "unsupported stripe count "
4158 "(got %llu want 1)", stripe_count);
4161 rbd_dev->header.stripe_unit = stripe_unit;
4162 rbd_dev->header.stripe_count = stripe_count;
4167 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4169 size_t image_id_size;
4174 void *reply_buf = NULL;
4176 char *image_name = NULL;
4179 rbd_assert(!rbd_dev->spec->image_name);
4181 len = strlen(rbd_dev->spec->image_id);
4182 image_id_size = sizeof (__le32) + len;
4183 image_id = kmalloc(image_id_size, GFP_KERNEL);
4188 end = image_id + image_id_size;
4189 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4191 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4192 reply_buf = kmalloc(size, GFP_KERNEL);
4196 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4197 "rbd", "dir_get_name",
4198 image_id, image_id_size,
4203 end = reply_buf + ret;
4205 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4206 if (IS_ERR(image_name))
4209 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4217 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4219 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4220 const char *snap_name;
4223 /* Skip over names until we find the one we are looking for */
4225 snap_name = rbd_dev->header.snap_names;
4226 while (which < snapc->num_snaps) {
4227 if (!strcmp(name, snap_name))
4228 return snapc->snaps[which];
4229 snap_name += strlen(snap_name) + 1;
4235 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4237 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4242 for (which = 0; !found && which < snapc->num_snaps; which++) {
4243 const char *snap_name;
4245 snap_id = snapc->snaps[which];
4246 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4247 if (IS_ERR(snap_name)) {
4248 /* ignore no-longer existing snapshots */
4249 if (PTR_ERR(snap_name) == -ENOENT)
4254 found = !strcmp(name, snap_name);
4257 return found ? snap_id : CEPH_NOSNAP;
4261 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4262 * no snapshot by that name is found, or if an error occurs.
4264 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4266 if (rbd_dev->image_format == 1)
4267 return rbd_v1_snap_id_by_name(rbd_dev, name);
4269 return rbd_v2_snap_id_by_name(rbd_dev, name);
4273 * An image being mapped will have everything but the snap id.
4275 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4277 struct rbd_spec *spec = rbd_dev->spec;
4279 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4280 rbd_assert(spec->image_id && spec->image_name);
4281 rbd_assert(spec->snap_name);
4283 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4286 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4287 if (snap_id == CEPH_NOSNAP)
4290 spec->snap_id = snap_id;
4292 spec->snap_id = CEPH_NOSNAP;
4299 * A parent image will have all ids but none of the names.
4301 * All names in an rbd spec are dynamically allocated. It's OK if we
4302 * can't figure out the name for an image id.
4304 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4306 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4307 struct rbd_spec *spec = rbd_dev->spec;
4308 const char *pool_name;
4309 const char *image_name;
4310 const char *snap_name;
4313 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4314 rbd_assert(spec->image_id);
4315 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4317 /* Get the pool name; we have to make our own copy of this */
4319 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4321 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4324 pool_name = kstrdup(pool_name, GFP_KERNEL);
4328 /* Fetch the image name; tolerate failure here */
4330 image_name = rbd_dev_image_name(rbd_dev);
4332 rbd_warn(rbd_dev, "unable to get image name");
4334 /* Fetch the snapshot name */
4336 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4337 if (IS_ERR(snap_name)) {
4338 ret = PTR_ERR(snap_name);
4342 spec->pool_name = pool_name;
4343 spec->image_name = image_name;
4344 spec->snap_name = snap_name;
4354 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4363 struct ceph_snap_context *snapc;
4367 * We'll need room for the seq value (maximum snapshot id),
4368 * snapshot count, and array of that many snapshot ids.
4369 * For now we have a fixed upper limit on the number we're
4370 * prepared to receive.
4372 size = sizeof (__le64) + sizeof (__le32) +
4373 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4374 reply_buf = kzalloc(size, GFP_KERNEL);
4378 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4379 "rbd", "get_snapcontext", NULL, 0,
4381 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4386 end = reply_buf + ret;
4388 ceph_decode_64_safe(&p, end, seq, out);
4389 ceph_decode_32_safe(&p, end, snap_count, out);
4392 * Make sure the reported number of snapshot ids wouldn't go
4393 * beyond the end of our buffer. But before checking that,
4394 * make sure the computed size of the snapshot context we
4395 * allocate is representable in a size_t.
4397 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4402 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4406 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4412 for (i = 0; i < snap_count; i++)
4413 snapc->snaps[i] = ceph_decode_64(&p);
4415 ceph_put_snap_context(rbd_dev->header.snapc);
4416 rbd_dev->header.snapc = snapc;
4418 dout(" snap context seq = %llu, snap_count = %u\n",
4419 (unsigned long long)seq, (unsigned int)snap_count);
4426 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4437 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4438 reply_buf = kmalloc(size, GFP_KERNEL);
4440 return ERR_PTR(-ENOMEM);
4442 snapid = cpu_to_le64(snap_id);
4443 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4444 "rbd", "get_snapshot_name",
4445 &snapid, sizeof (snapid),
4447 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4449 snap_name = ERR_PTR(ret);
4454 end = reply_buf + ret;
4455 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4456 if (IS_ERR(snap_name))
4459 dout(" snap_id 0x%016llx snap_name = %s\n",
4460 (unsigned long long)snap_id, snap_name);
4467 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4469 bool first_time = rbd_dev->header.object_prefix == NULL;
4472 ret = rbd_dev_v2_image_size(rbd_dev);
4477 ret = rbd_dev_v2_header_onetime(rbd_dev);
4483 * If the image supports layering, get the parent info. We
4484 * need to probe the first time regardless. Thereafter we
4485 * only need to if there's a parent, to see if it has
4486 * disappeared due to the mapped image getting flattened.
4488 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4489 (first_time || rbd_dev->parent_spec)) {
4492 ret = rbd_dev_v2_parent_info(rbd_dev);
4497 * Print a warning if this is the initial probe and
4498 * the image has a parent. Don't print it if the
4499 * image now being probed is itself a parent. We
4500 * can tell at this point because we won't know its
4501 * pool name yet (just its pool id).
4503 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4504 if (first_time && warn)
4505 rbd_warn(rbd_dev, "WARNING: kernel layering "
4506 "is EXPERIMENTAL!");
4509 ret = rbd_dev_v2_snap_context(rbd_dev);
4510 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4515 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4517 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4519 if (rbd_dev->image_format == 1)
4520 return rbd_dev_v1_header_info(rbd_dev);
4522 return rbd_dev_v2_header_info(rbd_dev);
4525 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4530 dev = &rbd_dev->dev;
4531 dev->bus = &rbd_bus_type;
4532 dev->type = &rbd_device_type;
4533 dev->parent = &rbd_root_dev;
4534 dev->release = rbd_dev_device_release;
4535 dev_set_name(dev, "%d", rbd_dev->dev_id);
4536 ret = device_register(dev);
4541 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4543 device_unregister(&rbd_dev->dev);
4547 * Get a unique rbd identifier for the given new rbd_dev, and add
4548 * the rbd_dev to the global list.
4550 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4554 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4555 0, minor_to_rbd_dev_id(1 << MINORBITS),
4560 rbd_dev->dev_id = new_dev_id;
4562 spin_lock(&rbd_dev_list_lock);
4563 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4564 spin_unlock(&rbd_dev_list_lock);
4566 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4572 * Remove an rbd_dev from the global list, and record that its
4573 * identifier is no longer in use.
4575 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4577 spin_lock(&rbd_dev_list_lock);
4578 list_del_init(&rbd_dev->node);
4579 spin_unlock(&rbd_dev_list_lock);
4581 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4583 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4587 * Skips over white space at *buf, and updates *buf to point to the
4588 * first found non-space character (if any). Returns the length of
4589 * the token (string of non-white space characters) found. Note
4590 * that *buf must be terminated with '\0'.
4592 static inline size_t next_token(const char **buf)
4595 * These are the characters that produce nonzero for
4596 * isspace() in the "C" and "POSIX" locales.
4598 const char *spaces = " \f\n\r\t\v";
4600 *buf += strspn(*buf, spaces); /* Find start of token */
4602 return strcspn(*buf, spaces); /* Return token length */
4606 * Finds the next token in *buf, and if the provided token buffer is
4607 * big enough, copies the found token into it. The result, if
4608 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4609 * must be terminated with '\0' on entry.
4611 * Returns the length of the token found (not including the '\0').
4612 * Return value will be 0 if no token is found, and it will be >=
4613 * token_size if the token would not fit.
4615 * The *buf pointer will be updated to point beyond the end of the
4616 * found token. Note that this occurs even if the token buffer is
4617 * too small to hold it.
4619 static inline size_t copy_token(const char **buf,
4625 len = next_token(buf);
4626 if (len < token_size) {
4627 memcpy(token, *buf, len);
4628 *(token + len) = '\0';
4636 * Finds the next token in *buf, dynamically allocates a buffer big
4637 * enough to hold a copy of it, and copies the token into the new
4638 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4639 * that a duplicate buffer is created even for a zero-length token.
4641 * Returns a pointer to the newly-allocated duplicate, or a null
4642 * pointer if memory for the duplicate was not available. If
4643 * the lenp argument is a non-null pointer, the length of the token
4644 * (not including the '\0') is returned in *lenp.
4646 * If successful, the *buf pointer will be updated to point beyond
4647 * the end of the found token.
4649 * Note: uses GFP_KERNEL for allocation.
4651 static inline char *dup_token(const char **buf, size_t *lenp)
4656 len = next_token(buf);
4657 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4660 *(dup + len) = '\0';
4670 * Parse the options provided for an "rbd add" (i.e., rbd image
4671 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4672 * and the data written is passed here via a NUL-terminated buffer.
4673 * Returns 0 if successful or an error code otherwise.
4675 * The information extracted from these options is recorded in
4676 * the other parameters which return dynamically-allocated
4679 * The address of a pointer that will refer to a ceph options
4680 * structure. Caller must release the returned pointer using
4681 * ceph_destroy_options() when it is no longer needed.
4683 * Address of an rbd options pointer. Fully initialized by
4684 * this function; caller must release with kfree().
4686 * Address of an rbd image specification pointer. Fully
4687 * initialized by this function based on parsed options.
4688 * Caller must release with rbd_spec_put().
4690 * The options passed take this form:
4691 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4694 * A comma-separated list of one or more monitor addresses.
4695 * A monitor address is an ip address, optionally followed
4696 * by a port number (separated by a colon).
4697 * I.e.: ip1[:port1][,ip2[:port2]...]
4699 * A comma-separated list of ceph and/or rbd options.
4701 * The name of the rados pool containing the rbd image.
4703 * The name of the image in that pool to map.
4705 * An optional snapshot id. If provided, the mapping will
4706 * present data from the image at the time that snapshot was
4707 * created. The image head is used if no snapshot id is
4708 * provided. Snapshot mappings are always read-only.
4710 static int rbd_add_parse_args(const char *buf,
4711 struct ceph_options **ceph_opts,
4712 struct rbd_options **opts,
4713 struct rbd_spec **rbd_spec)
4717 const char *mon_addrs;
4719 size_t mon_addrs_size;
4720 struct rbd_spec *spec = NULL;
4721 struct rbd_options *rbd_opts = NULL;
4722 struct ceph_options *copts;
4725 /* The first four tokens are required */
4727 len = next_token(&buf);
4729 rbd_warn(NULL, "no monitor address(es) provided");
4733 mon_addrs_size = len + 1;
4737 options = dup_token(&buf, NULL);
4741 rbd_warn(NULL, "no options provided");
4745 spec = rbd_spec_alloc();
4749 spec->pool_name = dup_token(&buf, NULL);
4750 if (!spec->pool_name)
4752 if (!*spec->pool_name) {
4753 rbd_warn(NULL, "no pool name provided");
4757 spec->image_name = dup_token(&buf, NULL);
4758 if (!spec->image_name)
4760 if (!*spec->image_name) {
4761 rbd_warn(NULL, "no image name provided");
4766 * Snapshot name is optional; default is to use "-"
4767 * (indicating the head/no snapshot).
4769 len = next_token(&buf);
4771 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4772 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4773 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4774 ret = -ENAMETOOLONG;
4777 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4780 *(snap_name + len) = '\0';
4781 spec->snap_name = snap_name;
4783 /* Initialize all rbd options to the defaults */
4785 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4789 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4791 copts = ceph_parse_options(options, mon_addrs,
4792 mon_addrs + mon_addrs_size - 1,
4793 parse_rbd_opts_token, rbd_opts);
4794 if (IS_ERR(copts)) {
4795 ret = PTR_ERR(copts);
4816 * Return pool id (>= 0) or a negative error code.
4818 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4821 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4826 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4827 if (ret == -ENOENT && tries++ < 1) {
4828 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4833 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4834 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4835 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4836 newest_epoch, timeout);
4839 /* the osdmap we have is new enough */
4848 * An rbd format 2 image has a unique identifier, distinct from the
4849 * name given to it by the user. Internally, that identifier is
4850 * what's used to specify the names of objects related to the image.
4852 * A special "rbd id" object is used to map an rbd image name to its
4853 * id. If that object doesn't exist, then there is no v2 rbd image
4854 * with the supplied name.
4856 * This function will record the given rbd_dev's image_id field if
4857 * it can be determined, and in that case will return 0. If any
4858 * errors occur a negative errno will be returned and the rbd_dev's
4859 * image_id field will be unchanged (and should be NULL).
4861 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4870 * When probing a parent image, the image id is already
4871 * known (and the image name likely is not). There's no
4872 * need to fetch the image id again in this case. We
4873 * do still need to set the image format though.
4875 if (rbd_dev->spec->image_id) {
4876 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4882 * First, see if the format 2 image id file exists, and if
4883 * so, get the image's persistent id from it.
4885 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4886 object_name = kmalloc(size, GFP_NOIO);
4889 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4890 dout("rbd id object name is %s\n", object_name);
4892 /* Response will be an encoded string, which includes a length */
4894 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4895 response = kzalloc(size, GFP_NOIO);
4901 /* If it doesn't exist we'll assume it's a format 1 image */
4903 ret = rbd_obj_method_sync(rbd_dev, object_name,
4904 "rbd", "get_id", NULL, 0,
4905 response, RBD_IMAGE_ID_LEN_MAX);
4906 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4907 if (ret == -ENOENT) {
4908 image_id = kstrdup("", GFP_KERNEL);
4909 ret = image_id ? 0 : -ENOMEM;
4911 rbd_dev->image_format = 1;
4912 } else if (ret > sizeof (__le32)) {
4915 image_id = ceph_extract_encoded_string(&p, p + ret,
4917 ret = PTR_ERR_OR_ZERO(image_id);
4919 rbd_dev->image_format = 2;
4925 rbd_dev->spec->image_id = image_id;
4926 dout("image_id is %s\n", image_id);
4936 * Undo whatever state changes are made by v1 or v2 header info
4939 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4941 struct rbd_image_header *header;
4943 /* Drop parent reference unless it's already been done (or none) */
4945 if (rbd_dev->parent_overlap)
4946 rbd_dev_parent_put(rbd_dev);
4948 /* Free dynamic fields from the header, then zero it out */
4950 header = &rbd_dev->header;
4951 ceph_put_snap_context(header->snapc);
4952 kfree(header->snap_sizes);
4953 kfree(header->snap_names);
4954 kfree(header->object_prefix);
4955 memset(header, 0, sizeof (*header));
4958 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4962 ret = rbd_dev_v2_object_prefix(rbd_dev);
4967 * Get the and check features for the image. Currently the
4968 * features are assumed to never change.
4970 ret = rbd_dev_v2_features(rbd_dev);
4974 /* If the image supports fancy striping, get its parameters */
4976 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4977 ret = rbd_dev_v2_striping_info(rbd_dev);
4981 /* No support for crypto and compression type format 2 images */
4985 rbd_dev->header.features = 0;
4986 kfree(rbd_dev->header.object_prefix);
4987 rbd_dev->header.object_prefix = NULL;
4992 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4994 struct rbd_device *parent = NULL;
4995 struct rbd_spec *parent_spec;
4996 struct rbd_client *rbdc;
4999 if (!rbd_dev->parent_spec)
5002 * We need to pass a reference to the client and the parent
5003 * spec when creating the parent rbd_dev. Images related by
5004 * parent/child relationships always share both.
5006 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
5007 rbdc = __rbd_get_client(rbd_dev->rbd_client);
5010 parent = rbd_dev_create(rbdc, parent_spec);
5014 ret = rbd_dev_image_probe(parent, false);
5017 rbd_dev->parent = parent;
5018 atomic_set(&rbd_dev->parent_ref, 1);
5023 rbd_dev_unparent(rbd_dev);
5024 kfree(rbd_dev->header_name);
5025 rbd_dev_destroy(parent);
5027 rbd_put_client(rbdc);
5028 rbd_spec_put(parent_spec);
5034 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5038 /* Get an id and fill in device name. */
5040 ret = rbd_dev_id_get(rbd_dev);
5044 BUILD_BUG_ON(DEV_NAME_LEN
5045 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5046 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5048 /* Record our major and minor device numbers. */
5050 if (!single_major) {
5051 ret = register_blkdev(0, rbd_dev->name);
5055 rbd_dev->major = ret;
5058 rbd_dev->major = rbd_major;
5059 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5062 /* Set up the blkdev mapping. */
5064 ret = rbd_init_disk(rbd_dev);
5066 goto err_out_blkdev;
5068 ret = rbd_dev_mapping_set(rbd_dev);
5071 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5072 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5074 ret = rbd_bus_add_dev(rbd_dev);
5076 goto err_out_mapping;
5078 /* Everything's ready. Announce the disk to the world. */
5080 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5081 add_disk(rbd_dev->disk);
5083 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5084 (unsigned long long) rbd_dev->mapping.size);
5089 rbd_dev_mapping_clear(rbd_dev);
5091 rbd_free_disk(rbd_dev);
5094 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5096 rbd_dev_id_put(rbd_dev);
5097 rbd_dev_mapping_clear(rbd_dev);
5102 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5104 struct rbd_spec *spec = rbd_dev->spec;
5107 /* Record the header object name for this rbd image. */
5109 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5111 if (rbd_dev->image_format == 1)
5112 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5114 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5116 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5117 if (!rbd_dev->header_name)
5120 if (rbd_dev->image_format == 1)
5121 sprintf(rbd_dev->header_name, "%s%s",
5122 spec->image_name, RBD_SUFFIX);
5124 sprintf(rbd_dev->header_name, "%s%s",
5125 RBD_HEADER_PREFIX, spec->image_id);
5129 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5131 rbd_dev_unprobe(rbd_dev);
5132 kfree(rbd_dev->header_name);
5133 rbd_dev->header_name = NULL;
5134 rbd_dev->image_format = 0;
5135 kfree(rbd_dev->spec->image_id);
5136 rbd_dev->spec->image_id = NULL;
5138 rbd_dev_destroy(rbd_dev);
5142 * Probe for the existence of the header object for the given rbd
5143 * device. If this image is the one being mapped (i.e., not a
5144 * parent), initiate a watch on its header object before using that
5145 * object to get detailed information about the rbd image.
5147 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5152 * Get the id from the image id object. Unless there's an
5153 * error, rbd_dev->spec->image_id will be filled in with
5154 * a dynamically-allocated string, and rbd_dev->image_format
5155 * will be set to either 1 or 2.
5157 ret = rbd_dev_image_id(rbd_dev);
5161 ret = rbd_dev_header_name(rbd_dev);
5163 goto err_out_format;
5166 ret = rbd_dev_header_watch_sync(rbd_dev);
5168 goto out_header_name;
5171 ret = rbd_dev_header_info(rbd_dev);
5176 * If this image is the one being mapped, we have pool name and
5177 * id, image name and id, and snap name - need to fill snap id.
5178 * Otherwise this is a parent image, identified by pool, image
5179 * and snap ids - need to fill in names for those ids.
5182 ret = rbd_spec_fill_snap_id(rbd_dev);
5184 ret = rbd_spec_fill_names(rbd_dev);
5188 ret = rbd_dev_probe_parent(rbd_dev);
5192 dout("discovered format %u image, header name is %s\n",
5193 rbd_dev->image_format, rbd_dev->header_name);
5197 rbd_dev_unprobe(rbd_dev);
5200 rbd_dev_header_unwatch_sync(rbd_dev);
5202 kfree(rbd_dev->header_name);
5203 rbd_dev->header_name = NULL;
5205 rbd_dev->image_format = 0;
5206 kfree(rbd_dev->spec->image_id);
5207 rbd_dev->spec->image_id = NULL;
5209 dout("probe failed, returning %d\n", ret);
5214 static ssize_t do_rbd_add(struct bus_type *bus,
5218 struct rbd_device *rbd_dev = NULL;
5219 struct ceph_options *ceph_opts = NULL;
5220 struct rbd_options *rbd_opts = NULL;
5221 struct rbd_spec *spec = NULL;
5222 struct rbd_client *rbdc;
5226 if (!try_module_get(THIS_MODULE))
5229 /* parse add command */
5230 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5232 goto err_out_module;
5233 read_only = rbd_opts->read_only;
5235 rbd_opts = NULL; /* done with this */
5237 rbdc = rbd_get_client(ceph_opts);
5244 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5246 goto err_out_client;
5247 spec->pool_id = (u64)rc;
5249 /* The ceph file layout needs to fit pool id in 32 bits */
5251 if (spec->pool_id > (u64)U32_MAX) {
5252 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5253 (unsigned long long)spec->pool_id, U32_MAX);
5255 goto err_out_client;
5258 rbd_dev = rbd_dev_create(rbdc, spec);
5260 goto err_out_client;
5261 rbdc = NULL; /* rbd_dev now owns this */
5262 spec = NULL; /* rbd_dev now owns this */
5264 rc = rbd_dev_image_probe(rbd_dev, true);
5266 goto err_out_rbd_dev;
5268 /* If we are mapping a snapshot it must be marked read-only */
5270 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5272 rbd_dev->mapping.read_only = read_only;
5274 rc = rbd_dev_device_setup(rbd_dev);
5277 * rbd_dev_header_unwatch_sync() can't be moved into
5278 * rbd_dev_image_release() without refactoring, see
5279 * commit 1f3ef78861ac.
5281 rbd_dev_header_unwatch_sync(rbd_dev);
5282 rbd_dev_image_release(rbd_dev);
5283 goto err_out_module;
5289 rbd_dev_destroy(rbd_dev);
5291 rbd_put_client(rbdc);
5295 module_put(THIS_MODULE);
5297 dout("Error adding device %s\n", buf);
5302 static ssize_t rbd_add(struct bus_type *bus,
5309 return do_rbd_add(bus, buf, count);
5312 static ssize_t rbd_add_single_major(struct bus_type *bus,
5316 return do_rbd_add(bus, buf, count);
5319 static void rbd_dev_device_release(struct device *dev)
5321 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5323 rbd_free_disk(rbd_dev);
5324 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5325 rbd_dev_mapping_clear(rbd_dev);
5327 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5328 rbd_dev_id_put(rbd_dev);
5329 rbd_dev_mapping_clear(rbd_dev);
5332 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5334 while (rbd_dev->parent) {
5335 struct rbd_device *first = rbd_dev;
5336 struct rbd_device *second = first->parent;
5337 struct rbd_device *third;
5340 * Follow to the parent with no grandparent and
5343 while (second && (third = second->parent)) {
5348 rbd_dev_image_release(second);
5349 first->parent = NULL;
5350 first->parent_overlap = 0;
5352 rbd_assert(first->parent_spec);
5353 rbd_spec_put(first->parent_spec);
5354 first->parent_spec = NULL;
5358 static ssize_t do_rbd_remove(struct bus_type *bus,
5362 struct rbd_device *rbd_dev = NULL;
5363 struct list_head *tmp;
5366 bool already = false;
5369 ret = kstrtoul(buf, 10, &ul);
5373 /* convert to int; abort if we lost anything in the conversion */
5379 spin_lock(&rbd_dev_list_lock);
5380 list_for_each(tmp, &rbd_dev_list) {
5381 rbd_dev = list_entry(tmp, struct rbd_device, node);
5382 if (rbd_dev->dev_id == dev_id) {
5388 spin_lock_irq(&rbd_dev->lock);
5389 if (rbd_dev->open_count)
5392 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5394 spin_unlock_irq(&rbd_dev->lock);
5396 spin_unlock(&rbd_dev_list_lock);
5397 if (ret < 0 || already)
5400 rbd_dev_header_unwatch_sync(rbd_dev);
5402 * flush remaining watch callbacks - these must be complete
5403 * before the osd_client is shutdown
5405 dout("%s: flushing notifies", __func__);
5406 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5409 * Don't free anything from rbd_dev->disk until after all
5410 * notifies are completely processed. Otherwise
5411 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5412 * in a potential use after free of rbd_dev->disk or rbd_dev.
5414 rbd_bus_del_dev(rbd_dev);
5415 rbd_dev_image_release(rbd_dev);
5416 module_put(THIS_MODULE);
5421 static ssize_t rbd_remove(struct bus_type *bus,
5428 return do_rbd_remove(bus, buf, count);
5431 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5435 return do_rbd_remove(bus, buf, count);
5439 * create control files in sysfs
5442 static int rbd_sysfs_init(void)
5446 ret = device_register(&rbd_root_dev);
5450 ret = bus_register(&rbd_bus_type);
5452 device_unregister(&rbd_root_dev);
5457 static void rbd_sysfs_cleanup(void)
5459 bus_unregister(&rbd_bus_type);
5460 device_unregister(&rbd_root_dev);
5463 static int rbd_slab_init(void)
5465 rbd_assert(!rbd_img_request_cache);
5466 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5467 sizeof (struct rbd_img_request),
5468 __alignof__(struct rbd_img_request),
5470 if (!rbd_img_request_cache)
5473 rbd_assert(!rbd_obj_request_cache);
5474 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5475 sizeof (struct rbd_obj_request),
5476 __alignof__(struct rbd_obj_request),
5478 if (!rbd_obj_request_cache)
5481 rbd_assert(!rbd_segment_name_cache);
5482 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5483 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5484 if (rbd_segment_name_cache)
5487 if (rbd_obj_request_cache) {
5488 kmem_cache_destroy(rbd_obj_request_cache);
5489 rbd_obj_request_cache = NULL;
5492 kmem_cache_destroy(rbd_img_request_cache);
5493 rbd_img_request_cache = NULL;
5498 static void rbd_slab_exit(void)
5500 rbd_assert(rbd_segment_name_cache);
5501 kmem_cache_destroy(rbd_segment_name_cache);
5502 rbd_segment_name_cache = NULL;
5504 rbd_assert(rbd_obj_request_cache);
5505 kmem_cache_destroy(rbd_obj_request_cache);
5506 rbd_obj_request_cache = NULL;
5508 rbd_assert(rbd_img_request_cache);
5509 kmem_cache_destroy(rbd_img_request_cache);
5510 rbd_img_request_cache = NULL;
5513 static int __init rbd_init(void)
5517 if (!libceph_compatible(NULL)) {
5518 rbd_warn(NULL, "libceph incompatibility (quitting)");
5522 rc = rbd_slab_init();
5527 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5528 if (rbd_major < 0) {
5534 rc = rbd_sysfs_init();
5536 goto err_out_blkdev;
5539 pr_info("loaded (major %d)\n", rbd_major);
5541 pr_info("loaded\n");
5547 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5553 static void __exit rbd_exit(void)
5555 ida_destroy(&rbd_dev_id_ida);
5556 rbd_sysfs_cleanup();
5558 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5562 module_init(rbd_init);
5563 module_exit(rbd_exit);
5565 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5566 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5567 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5568 /* following authorship retained from original osdblk.c */
5569 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5571 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5572 MODULE_LICENSE("GPL");