3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
37 #include <linux/kernel.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
41 #include <linux/blkdev.h>
43 #include "rbd_types.h"
45 #define RBD_DEBUG /* Activate rbd_assert() calls */
48 * The basic unit of block I/O is a sector. It is interpreted in a
49 * number of contexts in Linux (blk, bio, genhd), but the default is
50 * universally 512 bytes. These symbols are just slightly more
51 * meaningful than the bare numbers they represent.
53 #define SECTOR_SHIFT 9
54 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
56 #define RBD_DRV_NAME "rbd"
57 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
59 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
61 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
62 #define RBD_MAX_SNAP_NAME_LEN \
63 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
65 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
67 #define RBD_SNAP_HEAD_NAME "-"
69 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
71 /* This allows a single page to hold an image name sent by OSD */
72 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
73 #define RBD_IMAGE_ID_LEN_MAX 64
75 #define RBD_OBJ_PREFIX_LEN_MAX 64
79 #define RBD_FEATURE_LAYERING (1<<0)
80 #define RBD_FEATURE_STRIPINGV2 (1<<1)
81 #define RBD_FEATURES_ALL \
82 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
84 /* Features supported by this (client software) implementation. */
86 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
89 * An RBD device name will be "rbd#", where the "rbd" comes from
90 * RBD_DRV_NAME above, and # is a unique integer identifier.
91 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
92 * enough to hold all possible device names.
94 #define DEV_NAME_LEN 32
95 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
98 * block device image metadata (in-memory version)
100 struct rbd_image_header {
101 /* These four fields never change for a given rbd image */
108 /* The remaining fields need to be updated occasionally */
110 struct ceph_snap_context *snapc;
119 * An rbd image specification.
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
145 const char *pool_name;
147 const char *image_id;
148 const char *image_name;
151 const char *snap_name;
157 * an instance of the client. multiple devices may share an rbd client.
160 struct ceph_client *client;
162 struct list_head node;
165 struct rbd_img_request;
166 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
168 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
170 struct rbd_obj_request;
171 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
173 enum obj_request_type {
174 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
178 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
179 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
180 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
181 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
184 struct rbd_obj_request {
185 const char *object_name;
186 u64 offset; /* object start byte */
187 u64 length; /* bytes from offset */
191 * An object request associated with an image will have its
192 * img_data flag set; a standalone object request will not.
194 * A standalone object request will have which == BAD_WHICH
195 * and a null obj_request pointer.
197 * An object request initiated in support of a layered image
198 * object (to check for its existence before a write) will
199 * have which == BAD_WHICH and a non-null obj_request pointer.
201 * Finally, an object request for rbd image data will have
202 * which != BAD_WHICH, and will have a non-null img_request
203 * pointer. The value of which will be in the range
204 * 0..(img_request->obj_request_count-1).
207 struct rbd_obj_request *obj_request; /* STAT op */
209 struct rbd_img_request *img_request;
211 /* links for img_request->obj_requests list */
212 struct list_head links;
215 u32 which; /* posn image request list */
217 enum obj_request_type type;
219 struct bio *bio_list;
225 struct page **copyup_pages;
227 struct ceph_osd_request *osd_req;
229 u64 xferred; /* bytes transferred */
232 rbd_obj_callback_t callback;
233 struct completion completion;
239 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
240 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
241 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
244 struct rbd_img_request {
245 struct rbd_device *rbd_dev;
246 u64 offset; /* starting image byte offset */
247 u64 length; /* byte count from offset */
250 u64 snap_id; /* for reads */
251 struct ceph_snap_context *snapc; /* for writes */
254 struct request *rq; /* block request */
255 struct rbd_obj_request *obj_request; /* obj req initiator */
257 struct page **copyup_pages;
258 spinlock_t completion_lock;/* protects next_completion */
260 rbd_img_callback_t callback;
261 u64 xferred;/* aggregate bytes transferred */
262 int result; /* first nonzero obj_request result */
264 u32 obj_request_count;
265 struct list_head obj_requests; /* rbd_obj_request structs */
270 #define for_each_obj_request(ireq, oreq) \
271 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
272 #define for_each_obj_request_from(ireq, oreq) \
273 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
274 #define for_each_obj_request_safe(ireq, oreq, n) \
275 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
280 struct list_head node;
295 int dev_id; /* blkdev unique id */
297 int major; /* blkdev assigned major */
298 struct gendisk *disk; /* blkdev's gendisk and rq */
300 u32 image_format; /* Either 1 or 2 */
301 struct rbd_client *rbd_client;
303 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
305 spinlock_t lock; /* queue, flags, open_count */
307 struct rbd_image_header header;
308 unsigned long flags; /* possibly lock protected */
309 struct rbd_spec *spec;
313 struct ceph_file_layout layout;
315 struct ceph_osd_event *watch_event;
316 struct rbd_obj_request *watch_request;
318 struct rbd_spec *parent_spec;
320 struct rbd_device *parent;
322 /* protects updating the header */
323 struct rw_semaphore header_rwsem;
325 struct rbd_mapping mapping;
327 struct list_head node;
329 /* list of snapshots */
330 struct list_head snaps;
334 unsigned long open_count; /* protected by lock */
338 * Flag bits for rbd_dev->flags. If atomicity is required,
339 * rbd_dev->lock is used to protect access.
341 * Currently, only the "removing" flag (which is coupled with the
342 * "open_count" field) requires atomic access.
345 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
346 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
349 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
351 static LIST_HEAD(rbd_dev_list); /* devices */
352 static DEFINE_SPINLOCK(rbd_dev_list_lock);
354 static LIST_HEAD(rbd_client_list); /* clients */
355 static DEFINE_SPINLOCK(rbd_client_list_lock);
357 static int rbd_img_request_submit(struct rbd_img_request *img_request);
359 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
361 static void rbd_dev_device_release(struct device *dev);
362 static void rbd_snap_destroy(struct rbd_snap *snap);
364 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
366 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
368 static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
370 static struct bus_attribute rbd_bus_attrs[] = {
371 __ATTR(add, S_IWUSR, NULL, rbd_add),
372 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
376 static struct bus_type rbd_bus_type = {
378 .bus_attrs = rbd_bus_attrs,
381 static void rbd_root_dev_release(struct device *dev)
385 static struct device rbd_root_dev = {
387 .release = rbd_root_dev_release,
390 static __printf(2, 3)
391 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
393 struct va_format vaf;
401 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
402 else if (rbd_dev->disk)
403 printk(KERN_WARNING "%s: %s: %pV\n",
404 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
405 else if (rbd_dev->spec && rbd_dev->spec->image_name)
406 printk(KERN_WARNING "%s: image %s: %pV\n",
407 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
408 else if (rbd_dev->spec && rbd_dev->spec->image_id)
409 printk(KERN_WARNING "%s: id %s: %pV\n",
410 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
412 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
413 RBD_DRV_NAME, rbd_dev, &vaf);
418 #define rbd_assert(expr) \
419 if (unlikely(!(expr))) { \
420 printk(KERN_ERR "\nAssertion failure in %s() " \
422 "\trbd_assert(%s);\n\n", \
423 __func__, __LINE__, #expr); \
426 #else /* !RBD_DEBUG */
427 # define rbd_assert(expr) ((void) 0)
428 #endif /* !RBD_DEBUG */
430 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
431 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
432 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
434 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
435 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
437 static int rbd_open(struct block_device *bdev, fmode_t mode)
439 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
440 bool removing = false;
442 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
445 spin_lock_irq(&rbd_dev->lock);
446 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
449 rbd_dev->open_count++;
450 spin_unlock_irq(&rbd_dev->lock);
454 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455 (void) get_device(&rbd_dev->dev);
456 set_device_ro(bdev, rbd_dev->mapping.read_only);
457 mutex_unlock(&ctl_mutex);
462 static int rbd_release(struct gendisk *disk, fmode_t mode)
464 struct rbd_device *rbd_dev = disk->private_data;
465 unsigned long open_count_before;
467 spin_lock_irq(&rbd_dev->lock);
468 open_count_before = rbd_dev->open_count--;
469 spin_unlock_irq(&rbd_dev->lock);
470 rbd_assert(open_count_before > 0);
472 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
473 put_device(&rbd_dev->dev);
474 mutex_unlock(&ctl_mutex);
479 static const struct block_device_operations rbd_bd_ops = {
480 .owner = THIS_MODULE,
482 .release = rbd_release,
486 * Initialize an rbd client instance.
489 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
491 struct rbd_client *rbdc;
494 dout("%s:\n", __func__);
495 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
499 kref_init(&rbdc->kref);
500 INIT_LIST_HEAD(&rbdc->node);
502 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
504 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
505 if (IS_ERR(rbdc->client))
507 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
509 ret = ceph_open_session(rbdc->client);
513 spin_lock(&rbd_client_list_lock);
514 list_add_tail(&rbdc->node, &rbd_client_list);
515 spin_unlock(&rbd_client_list_lock);
517 mutex_unlock(&ctl_mutex);
518 dout("%s: rbdc %p\n", __func__, rbdc);
523 ceph_destroy_client(rbdc->client);
525 mutex_unlock(&ctl_mutex);
529 ceph_destroy_options(ceph_opts);
530 dout("%s: error %d\n", __func__, ret);
535 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
537 kref_get(&rbdc->kref);
543 * Find a ceph client with specific addr and configuration. If
544 * found, bump its reference count.
546 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
548 struct rbd_client *client_node;
551 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
554 spin_lock(&rbd_client_list_lock);
555 list_for_each_entry(client_node, &rbd_client_list, node) {
556 if (!ceph_compare_options(ceph_opts, client_node->client)) {
557 __rbd_get_client(client_node);
563 spin_unlock(&rbd_client_list_lock);
565 return found ? client_node : NULL;
575 /* string args above */
578 /* Boolean args above */
582 static match_table_t rbd_opts_tokens = {
584 /* string args above */
585 {Opt_read_only, "read_only"},
586 {Opt_read_only, "ro"}, /* Alternate spelling */
587 {Opt_read_write, "read_write"},
588 {Opt_read_write, "rw"}, /* Alternate spelling */
589 /* Boolean args above */
597 #define RBD_READ_ONLY_DEFAULT false
599 static int parse_rbd_opts_token(char *c, void *private)
601 struct rbd_options *rbd_opts = private;
602 substring_t argstr[MAX_OPT_ARGS];
603 int token, intval, ret;
605 token = match_token(c, rbd_opts_tokens, argstr);
609 if (token < Opt_last_int) {
610 ret = match_int(&argstr[0], &intval);
612 pr_err("bad mount option arg (not int) "
616 dout("got int token %d val %d\n", token, intval);
617 } else if (token > Opt_last_int && token < Opt_last_string) {
618 dout("got string token %d val %s\n", token,
620 } else if (token > Opt_last_string && token < Opt_last_bool) {
621 dout("got Boolean token %d\n", token);
623 dout("got token %d\n", token);
628 rbd_opts->read_only = true;
631 rbd_opts->read_only = false;
641 * Get a ceph client with specific addr and configuration, if one does
642 * not exist create it.
644 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
646 struct rbd_client *rbdc;
648 rbdc = rbd_client_find(ceph_opts);
649 if (rbdc) /* using an existing client */
650 ceph_destroy_options(ceph_opts);
652 rbdc = rbd_client_create(ceph_opts);
658 * Destroy ceph client
660 * Caller must hold rbd_client_list_lock.
662 static void rbd_client_release(struct kref *kref)
664 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
666 dout("%s: rbdc %p\n", __func__, rbdc);
667 spin_lock(&rbd_client_list_lock);
668 list_del(&rbdc->node);
669 spin_unlock(&rbd_client_list_lock);
671 ceph_destroy_client(rbdc->client);
676 * Drop reference to ceph client node. If it's not referenced anymore, release
679 static void rbd_put_client(struct rbd_client *rbdc)
682 kref_put(&rbdc->kref, rbd_client_release);
685 static bool rbd_image_format_valid(u32 image_format)
687 return image_format == 1 || image_format == 2;
690 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
695 /* The header has to start with the magic rbd header text */
696 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
699 /* The bio layer requires at least sector-sized I/O */
701 if (ondisk->options.order < SECTOR_SHIFT)
704 /* If we use u64 in a few spots we may be able to loosen this */
706 if (ondisk->options.order > 8 * sizeof (int) - 1)
710 * The size of a snapshot header has to fit in a size_t, and
711 * that limits the number of snapshots.
713 snap_count = le32_to_cpu(ondisk->snap_count);
714 size = SIZE_MAX - sizeof (struct ceph_snap_context);
715 if (snap_count > size / sizeof (__le64))
719 * Not only that, but the size of the entire the snapshot
720 * header must also be representable in a size_t.
722 size -= snap_count * sizeof (__le64);
723 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
730 * Create a new header structure, translate header format from the on-disk
733 static int rbd_header_from_disk(struct rbd_image_header *header,
734 struct rbd_image_header_ondisk *ondisk)
741 memset(header, 0, sizeof (*header));
743 snap_count = le32_to_cpu(ondisk->snap_count);
745 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
746 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
747 if (!header->object_prefix)
749 memcpy(header->object_prefix, ondisk->object_prefix, len);
750 header->object_prefix[len] = '\0';
753 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
755 /* Save a copy of the snapshot names */
757 if (snap_names_len > (u64) SIZE_MAX)
759 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
760 if (!header->snap_names)
763 * Note that rbd_dev_v1_header_read() guarantees
764 * the ondisk buffer we're working with has
765 * snap_names_len bytes beyond the end of the
766 * snapshot id array, this memcpy() is safe.
768 memcpy(header->snap_names, &ondisk->snaps[snap_count],
771 /* Record each snapshot's size */
773 size = snap_count * sizeof (*header->snap_sizes);
774 header->snap_sizes = kmalloc(size, GFP_KERNEL);
775 if (!header->snap_sizes)
777 for (i = 0; i < snap_count; i++)
778 header->snap_sizes[i] =
779 le64_to_cpu(ondisk->snaps[i].image_size);
781 header->snap_names = NULL;
782 header->snap_sizes = NULL;
785 header->features = 0; /* No features support in v1 images */
786 header->obj_order = ondisk->options.order;
787 header->crypt_type = ondisk->options.crypt_type;
788 header->comp_type = ondisk->options.comp_type;
790 /* Allocate and fill in the snapshot context */
792 header->image_size = le64_to_cpu(ondisk->image_size);
794 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
797 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
798 for (i = 0; i < snap_count; i++)
799 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
804 kfree(header->snap_sizes);
805 header->snap_sizes = NULL;
806 kfree(header->snap_names);
807 header->snap_names = NULL;
808 kfree(header->object_prefix);
809 header->object_prefix = NULL;
814 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
816 const char *snap_name;
818 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
820 /* Skip over names until we find the one we are looking for */
822 snap_name = rbd_dev->header.snap_names;
824 snap_name += strlen(snap_name) + 1;
826 return kstrdup(snap_name, GFP_KERNEL);
829 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
831 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
834 for (which = 0; which < snapc->num_snaps; which++)
835 if (snapc->snaps[which] == snap_id)
838 return BAD_SNAP_INDEX;
841 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
843 struct rbd_snap *snap;
845 if (snap_id == CEPH_NOSNAP)
846 return RBD_SNAP_HEAD_NAME;
848 list_for_each_entry(snap, &rbd_dev->snaps, node)
849 if (snap_id == snap->id)
855 static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev,
856 const char *snap_name)
858 struct rbd_snap *snap;
860 list_for_each_entry(snap, &rbd_dev->snaps, node)
861 if (!strcmp(snap_name, snap->name))
867 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
869 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
870 sizeof (RBD_SNAP_HEAD_NAME))) {
871 rbd_dev->mapping.size = rbd_dev->header.image_size;
872 rbd_dev->mapping.features = rbd_dev->header.features;
874 struct rbd_snap *snap;
876 snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
879 rbd_dev->mapping.size = snap->size;
880 rbd_dev->mapping.features = snap->features;
881 rbd_dev->mapping.read_only = true;
887 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
889 rbd_dev->mapping.size = 0;
890 rbd_dev->mapping.features = 0;
891 rbd_dev->mapping.read_only = true;
894 static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
896 rbd_dev->mapping.size = 0;
897 rbd_dev->mapping.features = 0;
898 rbd_dev->mapping.read_only = true;
901 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
907 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
910 segment = offset >> rbd_dev->header.obj_order;
911 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
912 rbd_dev->header.object_prefix, segment);
913 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
914 pr_err("error formatting segment name for #%llu (%d)\n",
923 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
925 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
927 return offset & (segment_size - 1);
930 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
931 u64 offset, u64 length)
933 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
935 offset &= segment_size - 1;
937 rbd_assert(length <= U64_MAX - offset);
938 if (offset + length > segment_size)
939 length = segment_size - offset;
945 * returns the size of an object in the image
947 static u64 rbd_obj_bytes(struct rbd_image_header *header)
949 return 1 << header->obj_order;
956 static void bio_chain_put(struct bio *chain)
962 chain = chain->bi_next;
968 * zeros a bio chain, starting at specific offset
970 static void zero_bio_chain(struct bio *chain, int start_ofs)
979 bio_for_each_segment(bv, chain, i) {
980 if (pos + bv->bv_len > start_ofs) {
981 int remainder = max(start_ofs - pos, 0);
982 buf = bvec_kmap_irq(bv, &flags);
983 memset(buf + remainder, 0,
984 bv->bv_len - remainder);
985 bvec_kunmap_irq(buf, &flags);
990 chain = chain->bi_next;
995 * similar to zero_bio_chain(), zeros data defined by a page array,
996 * starting at the given byte offset from the start of the array and
997 * continuing up to the given end offset. The pages array is
998 * assumed to be big enough to hold all bytes up to the end.
1000 static void zero_pages(struct page **pages, u64 offset, u64 end)
1002 struct page **page = &pages[offset >> PAGE_SHIFT];
1004 rbd_assert(end > offset);
1005 rbd_assert(end - offset <= (u64)SIZE_MAX);
1006 while (offset < end) {
1009 unsigned long flags;
1012 page_offset = (size_t)(offset & ~PAGE_MASK);
1013 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1014 local_irq_save(flags);
1015 kaddr = kmap_atomic(*page);
1016 memset(kaddr + page_offset, 0, length);
1017 kunmap_atomic(kaddr);
1018 local_irq_restore(flags);
1026 * Clone a portion of a bio, starting at the given byte offset
1027 * and continuing for the number of bytes indicated.
1029 static struct bio *bio_clone_range(struct bio *bio_src,
1030 unsigned int offset,
1038 unsigned short end_idx;
1039 unsigned short vcnt;
1042 /* Handle the easy case for the caller */
1044 if (!offset && len == bio_src->bi_size)
1045 return bio_clone(bio_src, gfpmask);
1047 if (WARN_ON_ONCE(!len))
1049 if (WARN_ON_ONCE(len > bio_src->bi_size))
1051 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1054 /* Find first affected segment... */
1057 __bio_for_each_segment(bv, bio_src, idx, 0) {
1058 if (resid < bv->bv_len)
1060 resid -= bv->bv_len;
1064 /* ...and the last affected segment */
1067 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1068 if (resid <= bv->bv_len)
1070 resid -= bv->bv_len;
1072 vcnt = end_idx - idx + 1;
1074 /* Build the clone */
1076 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1078 return NULL; /* ENOMEM */
1080 bio->bi_bdev = bio_src->bi_bdev;
1081 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1082 bio->bi_rw = bio_src->bi_rw;
1083 bio->bi_flags |= 1 << BIO_CLONED;
1086 * Copy over our part of the bio_vec, then update the first
1087 * and last (or only) entries.
1089 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1090 vcnt * sizeof (struct bio_vec));
1091 bio->bi_io_vec[0].bv_offset += voff;
1093 bio->bi_io_vec[0].bv_len -= voff;
1094 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1096 bio->bi_io_vec[0].bv_len = len;
1099 bio->bi_vcnt = vcnt;
1107 * Clone a portion of a bio chain, starting at the given byte offset
1108 * into the first bio in the source chain and continuing for the
1109 * number of bytes indicated. The result is another bio chain of
1110 * exactly the given length, or a null pointer on error.
1112 * The bio_src and offset parameters are both in-out. On entry they
1113 * refer to the first source bio and the offset into that bio where
1114 * the start of data to be cloned is located.
1116 * On return, bio_src is updated to refer to the bio in the source
1117 * chain that contains first un-cloned byte, and *offset will
1118 * contain the offset of that byte within that bio.
1120 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1121 unsigned int *offset,
1125 struct bio *bi = *bio_src;
1126 unsigned int off = *offset;
1127 struct bio *chain = NULL;
1130 /* Build up a chain of clone bios up to the limit */
1132 if (!bi || off >= bi->bi_size || !len)
1133 return NULL; /* Nothing to clone */
1137 unsigned int bi_size;
1141 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1142 goto out_err; /* EINVAL; ran out of bio's */
1144 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1145 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1147 goto out_err; /* ENOMEM */
1150 end = &bio->bi_next;
1153 if (off == bi->bi_size) {
1164 bio_chain_put(chain);
1170 * The default/initial value for all object request flags is 0. For
1171 * each flag, once its value is set to 1 it is never reset to 0
1174 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1176 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1177 struct rbd_device *rbd_dev;
1179 rbd_dev = obj_request->img_request->rbd_dev;
1180 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1185 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1188 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1191 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1193 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1194 struct rbd_device *rbd_dev = NULL;
1196 if (obj_request_img_data_test(obj_request))
1197 rbd_dev = obj_request->img_request->rbd_dev;
1198 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1203 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1206 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1210 * This sets the KNOWN flag after (possibly) setting the EXISTS
1211 * flag. The latter is set based on the "exists" value provided.
1213 * Note that for our purposes once an object exists it never goes
1214 * away again. It's possible that the response from two existence
1215 * checks are separated by the creation of the target object, and
1216 * the first ("doesn't exist") response arrives *after* the second
1217 * ("does exist"). In that case we ignore the second one.
1219 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1223 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1224 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1228 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1231 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1234 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1237 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1240 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1242 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1243 atomic_read(&obj_request->kref.refcount));
1244 kref_get(&obj_request->kref);
1247 static void rbd_obj_request_destroy(struct kref *kref);
1248 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1250 rbd_assert(obj_request != NULL);
1251 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1252 atomic_read(&obj_request->kref.refcount));
1253 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1256 static void rbd_img_request_get(struct rbd_img_request *img_request)
1258 dout("%s: img %p (was %d)\n", __func__, img_request,
1259 atomic_read(&img_request->kref.refcount));
1260 kref_get(&img_request->kref);
1263 static void rbd_img_request_destroy(struct kref *kref);
1264 static void rbd_img_request_put(struct rbd_img_request *img_request)
1266 rbd_assert(img_request != NULL);
1267 dout("%s: img %p (was %d)\n", __func__, img_request,
1268 atomic_read(&img_request->kref.refcount));
1269 kref_put(&img_request->kref, rbd_img_request_destroy);
1272 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1273 struct rbd_obj_request *obj_request)
1275 rbd_assert(obj_request->img_request == NULL);
1277 /* Image request now owns object's original reference */
1278 obj_request->img_request = img_request;
1279 obj_request->which = img_request->obj_request_count;
1280 rbd_assert(!obj_request_img_data_test(obj_request));
1281 obj_request_img_data_set(obj_request);
1282 rbd_assert(obj_request->which != BAD_WHICH);
1283 img_request->obj_request_count++;
1284 list_add_tail(&obj_request->links, &img_request->obj_requests);
1285 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1286 obj_request->which);
1289 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1290 struct rbd_obj_request *obj_request)
1292 rbd_assert(obj_request->which != BAD_WHICH);
1294 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1295 obj_request->which);
1296 list_del(&obj_request->links);
1297 rbd_assert(img_request->obj_request_count > 0);
1298 img_request->obj_request_count--;
1299 rbd_assert(obj_request->which == img_request->obj_request_count);
1300 obj_request->which = BAD_WHICH;
1301 rbd_assert(obj_request_img_data_test(obj_request));
1302 rbd_assert(obj_request->img_request == img_request);
1303 obj_request->img_request = NULL;
1304 obj_request->callback = NULL;
1305 rbd_obj_request_put(obj_request);
1308 static bool obj_request_type_valid(enum obj_request_type type)
1311 case OBJ_REQUEST_NODATA:
1312 case OBJ_REQUEST_BIO:
1313 case OBJ_REQUEST_PAGES:
1320 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1321 struct rbd_obj_request *obj_request)
1323 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1325 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1328 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1331 dout("%s: img %p\n", __func__, img_request);
1334 * If no error occurred, compute the aggregate transfer
1335 * count for the image request. We could instead use
1336 * atomic64_cmpxchg() to update it as each object request
1337 * completes; not clear which way is better off hand.
1339 if (!img_request->result) {
1340 struct rbd_obj_request *obj_request;
1343 for_each_obj_request(img_request, obj_request)
1344 xferred += obj_request->xferred;
1345 img_request->xferred = xferred;
1348 if (img_request->callback)
1349 img_request->callback(img_request);
1351 rbd_img_request_put(img_request);
1354 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1356 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1358 dout("%s: obj %p\n", __func__, obj_request);
1360 return wait_for_completion_interruptible(&obj_request->completion);
1364 * The default/initial value for all image request flags is 0. Each
1365 * is conditionally set to 1 at image request initialization time
1366 * and currently never change thereafter.
1368 static void img_request_write_set(struct rbd_img_request *img_request)
1370 set_bit(IMG_REQ_WRITE, &img_request->flags);
1374 static bool img_request_write_test(struct rbd_img_request *img_request)
1377 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1380 static void img_request_child_set(struct rbd_img_request *img_request)
1382 set_bit(IMG_REQ_CHILD, &img_request->flags);
1386 static bool img_request_child_test(struct rbd_img_request *img_request)
1389 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1392 static void img_request_layered_set(struct rbd_img_request *img_request)
1394 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1398 static bool img_request_layered_test(struct rbd_img_request *img_request)
1401 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1405 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1407 u64 xferred = obj_request->xferred;
1408 u64 length = obj_request->length;
1410 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1411 obj_request, obj_request->img_request, obj_request->result,
1414 * ENOENT means a hole in the image. We zero-fill the
1415 * entire length of the request. A short read also implies
1416 * zero-fill to the end of the request. Either way we
1417 * update the xferred count to indicate the whole request
1420 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1421 if (obj_request->result == -ENOENT) {
1422 if (obj_request->type == OBJ_REQUEST_BIO)
1423 zero_bio_chain(obj_request->bio_list, 0);
1425 zero_pages(obj_request->pages, 0, length);
1426 obj_request->result = 0;
1427 obj_request->xferred = length;
1428 } else if (xferred < length && !obj_request->result) {
1429 if (obj_request->type == OBJ_REQUEST_BIO)
1430 zero_bio_chain(obj_request->bio_list, xferred);
1432 zero_pages(obj_request->pages, xferred, length);
1433 obj_request->xferred = length;
1435 obj_request_done_set(obj_request);
1438 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1440 dout("%s: obj %p cb %p\n", __func__, obj_request,
1441 obj_request->callback);
1442 if (obj_request->callback)
1443 obj_request->callback(obj_request);
1445 complete_all(&obj_request->completion);
1448 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1450 dout("%s: obj %p\n", __func__, obj_request);
1451 obj_request_done_set(obj_request);
1454 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1456 struct rbd_img_request *img_request = NULL;
1457 struct rbd_device *rbd_dev = NULL;
1458 bool layered = false;
1460 if (obj_request_img_data_test(obj_request)) {
1461 img_request = obj_request->img_request;
1462 layered = img_request && img_request_layered_test(img_request);
1463 rbd_dev = img_request->rbd_dev;
1466 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1467 obj_request, img_request, obj_request->result,
1468 obj_request->xferred, obj_request->length);
1469 if (layered && obj_request->result == -ENOENT &&
1470 obj_request->img_offset < rbd_dev->parent_overlap)
1471 rbd_img_parent_read(obj_request);
1472 else if (img_request)
1473 rbd_img_obj_request_read_callback(obj_request);
1475 obj_request_done_set(obj_request);
1478 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1480 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1481 obj_request->result, obj_request->length);
1483 * There is no such thing as a successful short write. Set
1484 * it to our originally-requested length.
1486 obj_request->xferred = obj_request->length;
1487 obj_request_done_set(obj_request);
1491 * For a simple stat call there's nothing to do. We'll do more if
1492 * this is part of a write sequence for a layered image.
1494 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1496 dout("%s: obj %p\n", __func__, obj_request);
1497 obj_request_done_set(obj_request);
1500 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1501 struct ceph_msg *msg)
1503 struct rbd_obj_request *obj_request = osd_req->r_priv;
1506 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1507 rbd_assert(osd_req == obj_request->osd_req);
1508 if (obj_request_img_data_test(obj_request)) {
1509 rbd_assert(obj_request->img_request);
1510 rbd_assert(obj_request->which != BAD_WHICH);
1512 rbd_assert(obj_request->which == BAD_WHICH);
1515 if (osd_req->r_result < 0)
1516 obj_request->result = osd_req->r_result;
1518 BUG_ON(osd_req->r_num_ops > 2);
1521 * We support a 64-bit length, but ultimately it has to be
1522 * passed to blk_end_request(), which takes an unsigned int.
1524 obj_request->xferred = osd_req->r_reply_op_len[0];
1525 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1526 opcode = osd_req->r_ops[0].op;
1528 case CEPH_OSD_OP_READ:
1529 rbd_osd_read_callback(obj_request);
1531 case CEPH_OSD_OP_WRITE:
1532 rbd_osd_write_callback(obj_request);
1534 case CEPH_OSD_OP_STAT:
1535 rbd_osd_stat_callback(obj_request);
1537 case CEPH_OSD_OP_CALL:
1538 case CEPH_OSD_OP_NOTIFY_ACK:
1539 case CEPH_OSD_OP_WATCH:
1540 rbd_osd_trivial_callback(obj_request);
1543 rbd_warn(NULL, "%s: unsupported op %hu\n",
1544 obj_request->object_name, (unsigned short) opcode);
1548 if (obj_request_done_test(obj_request))
1549 rbd_obj_request_complete(obj_request);
1552 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1554 struct rbd_img_request *img_request = obj_request->img_request;
1555 struct ceph_osd_request *osd_req = obj_request->osd_req;
1558 rbd_assert(osd_req != NULL);
1560 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1561 ceph_osdc_build_request(osd_req, obj_request->offset,
1562 NULL, snap_id, NULL);
1565 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1567 struct rbd_img_request *img_request = obj_request->img_request;
1568 struct ceph_osd_request *osd_req = obj_request->osd_req;
1569 struct ceph_snap_context *snapc;
1570 struct timespec mtime = CURRENT_TIME;
1572 rbd_assert(osd_req != NULL);
1574 snapc = img_request ? img_request->snapc : NULL;
1575 ceph_osdc_build_request(osd_req, obj_request->offset,
1576 snapc, CEPH_NOSNAP, &mtime);
1579 static struct ceph_osd_request *rbd_osd_req_create(
1580 struct rbd_device *rbd_dev,
1582 struct rbd_obj_request *obj_request)
1584 struct ceph_snap_context *snapc = NULL;
1585 struct ceph_osd_client *osdc;
1586 struct ceph_osd_request *osd_req;
1588 if (obj_request_img_data_test(obj_request)) {
1589 struct rbd_img_request *img_request = obj_request->img_request;
1591 rbd_assert(write_request ==
1592 img_request_write_test(img_request));
1594 snapc = img_request->snapc;
1597 /* Allocate and initialize the request, for the single op */
1599 osdc = &rbd_dev->rbd_client->client->osdc;
1600 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1602 return NULL; /* ENOMEM */
1605 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1607 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1609 osd_req->r_callback = rbd_osd_req_callback;
1610 osd_req->r_priv = obj_request;
1612 osd_req->r_oid_len = strlen(obj_request->object_name);
1613 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1614 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1616 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1622 * Create a copyup osd request based on the information in the
1623 * object request supplied. A copyup request has two osd ops,
1624 * a copyup method call, and a "normal" write request.
1626 static struct ceph_osd_request *
1627 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1629 struct rbd_img_request *img_request;
1630 struct ceph_snap_context *snapc;
1631 struct rbd_device *rbd_dev;
1632 struct ceph_osd_client *osdc;
1633 struct ceph_osd_request *osd_req;
1635 rbd_assert(obj_request_img_data_test(obj_request));
1636 img_request = obj_request->img_request;
1637 rbd_assert(img_request);
1638 rbd_assert(img_request_write_test(img_request));
1640 /* Allocate and initialize the request, for the two ops */
1642 snapc = img_request->snapc;
1643 rbd_dev = img_request->rbd_dev;
1644 osdc = &rbd_dev->rbd_client->client->osdc;
1645 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1647 return NULL; /* ENOMEM */
1649 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1650 osd_req->r_callback = rbd_osd_req_callback;
1651 osd_req->r_priv = obj_request;
1653 osd_req->r_oid_len = strlen(obj_request->object_name);
1654 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1655 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1657 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1663 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1665 ceph_osdc_put_request(osd_req);
1668 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1670 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1671 u64 offset, u64 length,
1672 enum obj_request_type type)
1674 struct rbd_obj_request *obj_request;
1678 rbd_assert(obj_request_type_valid(type));
1680 size = strlen(object_name) + 1;
1681 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1685 name = (char *)(obj_request + 1);
1686 obj_request->object_name = memcpy(name, object_name, size);
1687 obj_request->offset = offset;
1688 obj_request->length = length;
1689 obj_request->flags = 0;
1690 obj_request->which = BAD_WHICH;
1691 obj_request->type = type;
1692 INIT_LIST_HEAD(&obj_request->links);
1693 init_completion(&obj_request->completion);
1694 kref_init(&obj_request->kref);
1696 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1697 offset, length, (int)type, obj_request);
1702 static void rbd_obj_request_destroy(struct kref *kref)
1704 struct rbd_obj_request *obj_request;
1706 obj_request = container_of(kref, struct rbd_obj_request, kref);
1708 dout("%s: obj %p\n", __func__, obj_request);
1710 rbd_assert(obj_request->img_request == NULL);
1711 rbd_assert(obj_request->which == BAD_WHICH);
1713 if (obj_request->osd_req)
1714 rbd_osd_req_destroy(obj_request->osd_req);
1716 rbd_assert(obj_request_type_valid(obj_request->type));
1717 switch (obj_request->type) {
1718 case OBJ_REQUEST_NODATA:
1719 break; /* Nothing to do */
1720 case OBJ_REQUEST_BIO:
1721 if (obj_request->bio_list)
1722 bio_chain_put(obj_request->bio_list);
1724 case OBJ_REQUEST_PAGES:
1725 if (obj_request->pages)
1726 ceph_release_page_vector(obj_request->pages,
1727 obj_request->page_count);
1735 * Caller is responsible for filling in the list of object requests
1736 * that comprises the image request, and the Linux request pointer
1737 * (if there is one).
1739 static struct rbd_img_request *rbd_img_request_create(
1740 struct rbd_device *rbd_dev,
1741 u64 offset, u64 length,
1745 struct rbd_img_request *img_request;
1747 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1751 if (write_request) {
1752 down_read(&rbd_dev->header_rwsem);
1753 ceph_get_snap_context(rbd_dev->header.snapc);
1754 up_read(&rbd_dev->header_rwsem);
1757 img_request->rq = NULL;
1758 img_request->rbd_dev = rbd_dev;
1759 img_request->offset = offset;
1760 img_request->length = length;
1761 img_request->flags = 0;
1762 if (write_request) {
1763 img_request_write_set(img_request);
1764 img_request->snapc = rbd_dev->header.snapc;
1766 img_request->snap_id = rbd_dev->spec->snap_id;
1769 img_request_child_set(img_request);
1770 if (rbd_dev->parent_spec)
1771 img_request_layered_set(img_request);
1772 spin_lock_init(&img_request->completion_lock);
1773 img_request->next_completion = 0;
1774 img_request->callback = NULL;
1775 img_request->result = 0;
1776 img_request->obj_request_count = 0;
1777 INIT_LIST_HEAD(&img_request->obj_requests);
1778 kref_init(&img_request->kref);
1780 rbd_img_request_get(img_request); /* Avoid a warning */
1781 rbd_img_request_put(img_request); /* TEMPORARY */
1783 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1784 write_request ? "write" : "read", offset, length,
1790 static void rbd_img_request_destroy(struct kref *kref)
1792 struct rbd_img_request *img_request;
1793 struct rbd_obj_request *obj_request;
1794 struct rbd_obj_request *next_obj_request;
1796 img_request = container_of(kref, struct rbd_img_request, kref);
1798 dout("%s: img %p\n", __func__, img_request);
1800 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1801 rbd_img_obj_request_del(img_request, obj_request);
1802 rbd_assert(img_request->obj_request_count == 0);
1804 if (img_request_write_test(img_request))
1805 ceph_put_snap_context(img_request->snapc);
1807 if (img_request_child_test(img_request))
1808 rbd_obj_request_put(img_request->obj_request);
1813 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1815 struct rbd_img_request *img_request;
1816 unsigned int xferred;
1820 rbd_assert(obj_request_img_data_test(obj_request));
1821 img_request = obj_request->img_request;
1823 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1824 xferred = (unsigned int)obj_request->xferred;
1825 result = obj_request->result;
1827 struct rbd_device *rbd_dev = img_request->rbd_dev;
1829 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1830 img_request_write_test(img_request) ? "write" : "read",
1831 obj_request->length, obj_request->img_offset,
1832 obj_request->offset);
1833 rbd_warn(rbd_dev, " result %d xferred %x\n",
1835 if (!img_request->result)
1836 img_request->result = result;
1839 /* Image object requests don't own their page array */
1841 if (obj_request->type == OBJ_REQUEST_PAGES) {
1842 obj_request->pages = NULL;
1843 obj_request->page_count = 0;
1846 if (img_request_child_test(img_request)) {
1847 rbd_assert(img_request->obj_request != NULL);
1848 more = obj_request->which < img_request->obj_request_count - 1;
1850 rbd_assert(img_request->rq != NULL);
1851 more = blk_end_request(img_request->rq, result, xferred);
1857 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1859 struct rbd_img_request *img_request;
1860 u32 which = obj_request->which;
1863 rbd_assert(obj_request_img_data_test(obj_request));
1864 img_request = obj_request->img_request;
1866 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1867 rbd_assert(img_request != NULL);
1868 rbd_assert(img_request->obj_request_count > 0);
1869 rbd_assert(which != BAD_WHICH);
1870 rbd_assert(which < img_request->obj_request_count);
1871 rbd_assert(which >= img_request->next_completion);
1873 spin_lock_irq(&img_request->completion_lock);
1874 if (which != img_request->next_completion)
1877 for_each_obj_request_from(img_request, obj_request) {
1879 rbd_assert(which < img_request->obj_request_count);
1881 if (!obj_request_done_test(obj_request))
1883 more = rbd_img_obj_end_request(obj_request);
1887 rbd_assert(more ^ (which == img_request->obj_request_count));
1888 img_request->next_completion = which;
1890 spin_unlock_irq(&img_request->completion_lock);
1893 rbd_img_request_complete(img_request);
1897 * Split up an image request into one or more object requests, each
1898 * to a different object. The "type" parameter indicates whether
1899 * "data_desc" is the pointer to the head of a list of bio
1900 * structures, or the base of a page array. In either case this
1901 * function assumes data_desc describes memory sufficient to hold
1902 * all data described by the image request.
1904 static int rbd_img_request_fill(struct rbd_img_request *img_request,
1905 enum obj_request_type type,
1908 struct rbd_device *rbd_dev = img_request->rbd_dev;
1909 struct rbd_obj_request *obj_request = NULL;
1910 struct rbd_obj_request *next_obj_request;
1911 bool write_request = img_request_write_test(img_request);
1912 struct bio *bio_list;
1913 unsigned int bio_offset = 0;
1914 struct page **pages;
1919 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1920 (int)type, data_desc);
1922 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
1923 img_offset = img_request->offset;
1924 resid = img_request->length;
1925 rbd_assert(resid > 0);
1927 if (type == OBJ_REQUEST_BIO) {
1928 bio_list = data_desc;
1929 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1931 rbd_assert(type == OBJ_REQUEST_PAGES);
1936 struct ceph_osd_request *osd_req;
1937 const char *object_name;
1941 object_name = rbd_segment_name(rbd_dev, img_offset);
1944 offset = rbd_segment_offset(rbd_dev, img_offset);
1945 length = rbd_segment_length(rbd_dev, img_offset, resid);
1946 obj_request = rbd_obj_request_create(object_name,
1947 offset, length, type);
1948 kfree(object_name); /* object request has its own copy */
1952 if (type == OBJ_REQUEST_BIO) {
1953 unsigned int clone_size;
1955 rbd_assert(length <= (u64)UINT_MAX);
1956 clone_size = (unsigned int)length;
1957 obj_request->bio_list =
1958 bio_chain_clone_range(&bio_list,
1962 if (!obj_request->bio_list)
1965 unsigned int page_count;
1967 obj_request->pages = pages;
1968 page_count = (u32)calc_pages_for(offset, length);
1969 obj_request->page_count = page_count;
1970 if ((offset + length) & ~PAGE_MASK)
1971 page_count--; /* more on last page */
1972 pages += page_count;
1975 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1979 obj_request->osd_req = osd_req;
1980 obj_request->callback = rbd_img_obj_callback;
1982 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1984 if (type == OBJ_REQUEST_BIO)
1985 osd_req_op_extent_osd_data_bio(osd_req, 0,
1986 obj_request->bio_list, length);
1988 osd_req_op_extent_osd_data_pages(osd_req, 0,
1989 obj_request->pages, length,
1990 offset & ~PAGE_MASK, false, false);
1993 rbd_osd_req_format_write(obj_request);
1995 rbd_osd_req_format_read(obj_request);
1997 obj_request->img_offset = img_offset;
1998 rbd_img_obj_request_add(img_request, obj_request);
2000 img_offset += length;
2007 rbd_obj_request_put(obj_request);
2009 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2010 rbd_obj_request_put(obj_request);
2016 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2018 struct rbd_img_request *img_request;
2019 struct rbd_device *rbd_dev;
2023 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2024 rbd_assert(obj_request_img_data_test(obj_request));
2025 img_request = obj_request->img_request;
2026 rbd_assert(img_request);
2028 rbd_dev = img_request->rbd_dev;
2029 rbd_assert(rbd_dev);
2030 length = (u64)1 << rbd_dev->header.obj_order;
2031 page_count = (u32)calc_pages_for(0, length);
2033 rbd_assert(obj_request->copyup_pages);
2034 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2035 obj_request->copyup_pages = NULL;
2038 * We want the transfer count to reflect the size of the
2039 * original write request. There is no such thing as a
2040 * successful short write, so if the request was successful
2041 * we can just set it to the originally-requested length.
2043 if (!obj_request->result)
2044 obj_request->xferred = obj_request->length;
2046 /* Finish up with the normal image object callback */
2048 rbd_img_obj_callback(obj_request);
2052 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2054 struct rbd_obj_request *orig_request;
2055 struct ceph_osd_request *osd_req;
2056 struct ceph_osd_client *osdc;
2057 struct rbd_device *rbd_dev;
2058 struct page **pages;
2063 rbd_assert(img_request_child_test(img_request));
2065 /* First get what we need from the image request */
2067 pages = img_request->copyup_pages;
2068 rbd_assert(pages != NULL);
2069 img_request->copyup_pages = NULL;
2071 orig_request = img_request->obj_request;
2072 rbd_assert(orig_request != NULL);
2073 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2074 result = img_request->result;
2075 obj_size = img_request->length;
2076 xferred = img_request->xferred;
2078 rbd_dev = img_request->rbd_dev;
2079 rbd_assert(rbd_dev);
2080 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2082 rbd_img_request_put(img_request);
2087 /* Allocate the new copyup osd request for the original request */
2090 rbd_assert(!orig_request->osd_req);
2091 osd_req = rbd_osd_req_create_copyup(orig_request);
2094 orig_request->osd_req = osd_req;
2095 orig_request->copyup_pages = pages;
2097 /* Initialize the copyup op */
2099 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2100 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2103 /* Then the original write request op */
2105 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2106 orig_request->offset,
2107 orig_request->length, 0, 0);
2108 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2109 orig_request->length);
2111 rbd_osd_req_format_write(orig_request);
2113 /* All set, send it off. */
2115 orig_request->callback = rbd_img_obj_copyup_callback;
2116 osdc = &rbd_dev->rbd_client->client->osdc;
2117 result = rbd_obj_request_submit(osdc, orig_request);
2121 /* Record the error code and complete the request */
2123 orig_request->result = result;
2124 orig_request->xferred = 0;
2125 obj_request_done_set(orig_request);
2126 rbd_obj_request_complete(orig_request);
2130 * Read from the parent image the range of data that covers the
2131 * entire target of the given object request. This is used for
2132 * satisfying a layered image write request when the target of an
2133 * object request from the image request does not exist.
2135 * A page array big enough to hold the returned data is allocated
2136 * and supplied to rbd_img_request_fill() as the "data descriptor."
2137 * When the read completes, this page array will be transferred to
2138 * the original object request for the copyup operation.
2140 * If an error occurs, record it as the result of the original
2141 * object request and mark it done so it gets completed.
2143 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2145 struct rbd_img_request *img_request = NULL;
2146 struct rbd_img_request *parent_request = NULL;
2147 struct rbd_device *rbd_dev;
2150 struct page **pages = NULL;
2154 rbd_assert(obj_request_img_data_test(obj_request));
2155 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2157 img_request = obj_request->img_request;
2158 rbd_assert(img_request != NULL);
2159 rbd_dev = img_request->rbd_dev;
2160 rbd_assert(rbd_dev->parent != NULL);
2163 * First things first. The original osd request is of no
2164 * use to use any more, we'll need a new one that can hold
2165 * the two ops in a copyup request. We'll get that later,
2166 * but for now we can release the old one.
2168 rbd_osd_req_destroy(obj_request->osd_req);
2169 obj_request->osd_req = NULL;
2172 * Determine the byte range covered by the object in the
2173 * child image to which the original request was to be sent.
2175 img_offset = obj_request->img_offset - obj_request->offset;
2176 length = (u64)1 << rbd_dev->header.obj_order;
2179 * There is no defined parent data beyond the parent
2180 * overlap, so limit what we read at that boundary if
2183 if (img_offset + length > rbd_dev->parent_overlap) {
2184 rbd_assert(img_offset < rbd_dev->parent_overlap);
2185 length = rbd_dev->parent_overlap - img_offset;
2189 * Allocate a page array big enough to receive the data read
2192 page_count = (u32)calc_pages_for(0, length);
2193 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2194 if (IS_ERR(pages)) {
2195 result = PTR_ERR(pages);
2201 parent_request = rbd_img_request_create(rbd_dev->parent,
2204 if (!parent_request)
2206 rbd_obj_request_get(obj_request);
2207 parent_request->obj_request = obj_request;
2209 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2212 parent_request->copyup_pages = pages;
2214 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2215 result = rbd_img_request_submit(parent_request);
2219 parent_request->copyup_pages = NULL;
2220 parent_request->obj_request = NULL;
2221 rbd_obj_request_put(obj_request);
2224 ceph_release_page_vector(pages, page_count);
2226 rbd_img_request_put(parent_request);
2227 obj_request->result = result;
2228 obj_request->xferred = 0;
2229 obj_request_done_set(obj_request);
2234 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2236 struct rbd_obj_request *orig_request;
2239 rbd_assert(!obj_request_img_data_test(obj_request));
2242 * All we need from the object request is the original
2243 * request and the result of the STAT op. Grab those, then
2244 * we're done with the request.
2246 orig_request = obj_request->obj_request;
2247 obj_request->obj_request = NULL;
2248 rbd_assert(orig_request);
2249 rbd_assert(orig_request->img_request);
2251 result = obj_request->result;
2252 obj_request->result = 0;
2254 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2255 obj_request, orig_request, result,
2256 obj_request->xferred, obj_request->length);
2257 rbd_obj_request_put(obj_request);
2259 rbd_assert(orig_request);
2260 rbd_assert(orig_request->img_request);
2263 * Our only purpose here is to determine whether the object
2264 * exists, and we don't want to treat the non-existence as
2265 * an error. If something else comes back, transfer the
2266 * error to the original request and complete it now.
2269 obj_request_existence_set(orig_request, true);
2270 } else if (result == -ENOENT) {
2271 obj_request_existence_set(orig_request, false);
2272 } else if (result) {
2273 orig_request->result = result;
2278 * Resubmit the original request now that we have recorded
2279 * whether the target object exists.
2281 orig_request->result = rbd_img_obj_request_submit(orig_request);
2283 if (orig_request->result)
2284 rbd_obj_request_complete(orig_request);
2285 rbd_obj_request_put(orig_request);
2288 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2290 struct rbd_obj_request *stat_request;
2291 struct rbd_device *rbd_dev;
2292 struct ceph_osd_client *osdc;
2293 struct page **pages = NULL;
2299 * The response data for a STAT call consists of:
2306 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2307 page_count = (u32)calc_pages_for(0, size);
2308 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2310 return PTR_ERR(pages);
2313 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2318 rbd_obj_request_get(obj_request);
2319 stat_request->obj_request = obj_request;
2320 stat_request->pages = pages;
2321 stat_request->page_count = page_count;
2323 rbd_assert(obj_request->img_request);
2324 rbd_dev = obj_request->img_request->rbd_dev;
2325 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2327 if (!stat_request->osd_req)
2329 stat_request->callback = rbd_img_obj_exists_callback;
2331 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2332 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2334 rbd_osd_req_format_read(stat_request);
2336 osdc = &rbd_dev->rbd_client->client->osdc;
2337 ret = rbd_obj_request_submit(osdc, stat_request);
2340 rbd_obj_request_put(obj_request);
2345 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2347 struct rbd_img_request *img_request;
2348 struct rbd_device *rbd_dev;
2351 rbd_assert(obj_request_img_data_test(obj_request));
2353 img_request = obj_request->img_request;
2354 rbd_assert(img_request);
2355 rbd_dev = img_request->rbd_dev;
2358 * Only writes to layered images need special handling.
2359 * Reads and non-layered writes are simple object requests.
2360 * Layered writes that start beyond the end of the overlap
2361 * with the parent have no parent data, so they too are
2362 * simple object requests. Finally, if the target object is
2363 * known to already exist, its parent data has already been
2364 * copied, so a write to the object can also be handled as a
2365 * simple object request.
2367 if (!img_request_write_test(img_request) ||
2368 !img_request_layered_test(img_request) ||
2369 rbd_dev->parent_overlap <= obj_request->img_offset ||
2370 ((known = obj_request_known_test(obj_request)) &&
2371 obj_request_exists_test(obj_request))) {
2373 struct rbd_device *rbd_dev;
2374 struct ceph_osd_client *osdc;
2376 rbd_dev = obj_request->img_request->rbd_dev;
2377 osdc = &rbd_dev->rbd_client->client->osdc;
2379 return rbd_obj_request_submit(osdc, obj_request);
2383 * It's a layered write. The target object might exist but
2384 * we may not know that yet. If we know it doesn't exist,
2385 * start by reading the data for the full target object from
2386 * the parent so we can use it for a copyup to the target.
2389 return rbd_img_obj_parent_read_full(obj_request);
2391 /* We don't know whether the target exists. Go find out. */
2393 return rbd_img_obj_exists_submit(obj_request);
2396 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2398 struct rbd_obj_request *obj_request;
2399 struct rbd_obj_request *next_obj_request;
2401 dout("%s: img %p\n", __func__, img_request);
2402 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2405 ret = rbd_img_obj_request_submit(obj_request);
2413 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2415 struct rbd_obj_request *obj_request;
2416 struct rbd_device *rbd_dev;
2419 rbd_assert(img_request_child_test(img_request));
2421 obj_request = img_request->obj_request;
2422 rbd_assert(obj_request);
2423 rbd_assert(obj_request->img_request);
2425 obj_request->result = img_request->result;
2426 if (obj_request->result)
2430 * We need to zero anything beyond the parent overlap
2431 * boundary. Since rbd_img_obj_request_read_callback()
2432 * will zero anything beyond the end of a short read, an
2433 * easy way to do this is to pretend the data from the
2434 * parent came up short--ending at the overlap boundary.
2436 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2437 obj_end = obj_request->img_offset + obj_request->length;
2438 rbd_dev = obj_request->img_request->rbd_dev;
2439 if (obj_end > rbd_dev->parent_overlap) {
2442 if (obj_request->img_offset < rbd_dev->parent_overlap)
2443 xferred = rbd_dev->parent_overlap -
2444 obj_request->img_offset;
2446 obj_request->xferred = min(img_request->xferred, xferred);
2448 obj_request->xferred = img_request->xferred;
2451 rbd_img_obj_request_read_callback(obj_request);
2452 rbd_obj_request_complete(obj_request);
2455 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2457 struct rbd_device *rbd_dev;
2458 struct rbd_img_request *img_request;
2461 rbd_assert(obj_request_img_data_test(obj_request));
2462 rbd_assert(obj_request->img_request != NULL);
2463 rbd_assert(obj_request->result == (s32) -ENOENT);
2464 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2466 rbd_dev = obj_request->img_request->rbd_dev;
2467 rbd_assert(rbd_dev->parent != NULL);
2468 /* rbd_read_finish(obj_request, obj_request->length); */
2469 img_request = rbd_img_request_create(rbd_dev->parent,
2470 obj_request->img_offset,
2471 obj_request->length,
2477 rbd_obj_request_get(obj_request);
2478 img_request->obj_request = obj_request;
2480 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2481 obj_request->bio_list);
2485 img_request->callback = rbd_img_parent_read_callback;
2486 result = rbd_img_request_submit(img_request);
2493 rbd_img_request_put(img_request);
2494 obj_request->result = result;
2495 obj_request->xferred = 0;
2496 obj_request_done_set(obj_request);
2499 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2501 struct rbd_obj_request *obj_request;
2502 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2505 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2506 OBJ_REQUEST_NODATA);
2511 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2512 if (!obj_request->osd_req)
2514 obj_request->callback = rbd_obj_request_put;
2516 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2518 rbd_osd_req_format_read(obj_request);
2520 ret = rbd_obj_request_submit(osdc, obj_request);
2523 rbd_obj_request_put(obj_request);
2528 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2530 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2535 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2536 rbd_dev->header_name, (unsigned long long)notify_id,
2537 (unsigned int)opcode);
2538 (void)rbd_dev_refresh(rbd_dev);
2540 rbd_obj_notify_ack(rbd_dev, notify_id);
2544 * Request sync osd watch/unwatch. The value of "start" determines
2545 * whether a watch request is being initiated or torn down.
2547 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2549 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2550 struct rbd_obj_request *obj_request;
2553 rbd_assert(start ^ !!rbd_dev->watch_event);
2554 rbd_assert(start ^ !!rbd_dev->watch_request);
2557 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2558 &rbd_dev->watch_event);
2561 rbd_assert(rbd_dev->watch_event != NULL);
2565 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2566 OBJ_REQUEST_NODATA);
2570 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2571 if (!obj_request->osd_req)
2575 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2577 ceph_osdc_unregister_linger_request(osdc,
2578 rbd_dev->watch_request->osd_req);
2580 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2581 rbd_dev->watch_event->cookie, 0, start);
2582 rbd_osd_req_format_write(obj_request);
2584 ret = rbd_obj_request_submit(osdc, obj_request);
2587 ret = rbd_obj_request_wait(obj_request);
2590 ret = obj_request->result;
2595 * A watch request is set to linger, so the underlying osd
2596 * request won't go away until we unregister it. We retain
2597 * a pointer to the object request during that time (in
2598 * rbd_dev->watch_request), so we'll keep a reference to
2599 * it. We'll drop that reference (below) after we've
2603 rbd_dev->watch_request = obj_request;
2608 /* We have successfully torn down the watch request */
2610 rbd_obj_request_put(rbd_dev->watch_request);
2611 rbd_dev->watch_request = NULL;
2613 /* Cancel the event if we're tearing down, or on error */
2614 ceph_osdc_cancel_event(rbd_dev->watch_event);
2615 rbd_dev->watch_event = NULL;
2617 rbd_obj_request_put(obj_request);
2623 * Synchronous osd object method call. Returns the number of bytes
2624 * returned in the outbound buffer, or a negative error code.
2626 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2627 const char *object_name,
2628 const char *class_name,
2629 const char *method_name,
2630 const void *outbound,
2631 size_t outbound_size,
2633 size_t inbound_size)
2635 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2636 struct rbd_obj_request *obj_request;
2637 struct page **pages;
2642 * Method calls are ultimately read operations. The result
2643 * should placed into the inbound buffer provided. They
2644 * also supply outbound data--parameters for the object
2645 * method. Currently if this is present it will be a
2648 page_count = (u32)calc_pages_for(0, inbound_size);
2649 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2651 return PTR_ERR(pages);
2654 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2659 obj_request->pages = pages;
2660 obj_request->page_count = page_count;
2662 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2663 if (!obj_request->osd_req)
2666 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2667 class_name, method_name);
2668 if (outbound_size) {
2669 struct ceph_pagelist *pagelist;
2671 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2675 ceph_pagelist_init(pagelist);
2676 ceph_pagelist_append(pagelist, outbound, outbound_size);
2677 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2680 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2681 obj_request->pages, inbound_size,
2683 rbd_osd_req_format_read(obj_request);
2685 ret = rbd_obj_request_submit(osdc, obj_request);
2688 ret = rbd_obj_request_wait(obj_request);
2692 ret = obj_request->result;
2696 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2697 ret = (int)obj_request->xferred;
2698 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2701 rbd_obj_request_put(obj_request);
2703 ceph_release_page_vector(pages, page_count);
2708 static void rbd_request_fn(struct request_queue *q)
2709 __releases(q->queue_lock) __acquires(q->queue_lock)
2711 struct rbd_device *rbd_dev = q->queuedata;
2712 bool read_only = rbd_dev->mapping.read_only;
2716 while ((rq = blk_fetch_request(q))) {
2717 bool write_request = rq_data_dir(rq) == WRITE;
2718 struct rbd_img_request *img_request;
2722 /* Ignore any non-FS requests that filter through. */
2724 if (rq->cmd_type != REQ_TYPE_FS) {
2725 dout("%s: non-fs request type %d\n", __func__,
2726 (int) rq->cmd_type);
2727 __blk_end_request_all(rq, 0);
2731 /* Ignore/skip any zero-length requests */
2733 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2734 length = (u64) blk_rq_bytes(rq);
2737 dout("%s: zero-length request\n", __func__);
2738 __blk_end_request_all(rq, 0);
2742 spin_unlock_irq(q->queue_lock);
2744 /* Disallow writes to a read-only device */
2746 if (write_request) {
2750 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2754 * Quit early if the mapped snapshot no longer
2755 * exists. It's still possible the snapshot will
2756 * have disappeared by the time our request arrives
2757 * at the osd, but there's no sense in sending it if
2760 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2761 dout("request for non-existent snapshot");
2762 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2768 if (offset && length > U64_MAX - offset + 1) {
2769 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2771 goto end_request; /* Shouldn't happen */
2775 img_request = rbd_img_request_create(rbd_dev, offset, length,
2776 write_request, false);
2780 img_request->rq = rq;
2782 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2785 result = rbd_img_request_submit(img_request);
2787 rbd_img_request_put(img_request);
2789 spin_lock_irq(q->queue_lock);
2791 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2792 write_request ? "write" : "read",
2793 length, offset, result);
2795 __blk_end_request_all(rq, result);
2801 * a queue callback. Makes sure that we don't create a bio that spans across
2802 * multiple osd objects. One exception would be with a single page bios,
2803 * which we handle later at bio_chain_clone_range()
2805 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2806 struct bio_vec *bvec)
2808 struct rbd_device *rbd_dev = q->queuedata;
2809 sector_t sector_offset;
2810 sector_t sectors_per_obj;
2811 sector_t obj_sector_offset;
2815 * Find how far into its rbd object the partition-relative
2816 * bio start sector is to offset relative to the enclosing
2819 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2820 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2821 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2824 * Compute the number of bytes from that offset to the end
2825 * of the object. Account for what's already used by the bio.
2827 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2828 if (ret > bmd->bi_size)
2829 ret -= bmd->bi_size;
2834 * Don't send back more than was asked for. And if the bio
2835 * was empty, let the whole thing through because: "Note
2836 * that a block device *must* allow a single page to be
2837 * added to an empty bio."
2839 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2840 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2841 ret = (int) bvec->bv_len;
2846 static void rbd_free_disk(struct rbd_device *rbd_dev)
2848 struct gendisk *disk = rbd_dev->disk;
2853 rbd_dev->disk = NULL;
2854 if (disk->flags & GENHD_FL_UP) {
2857 blk_cleanup_queue(disk->queue);
2862 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2863 const char *object_name,
2864 u64 offset, u64 length, void *buf)
2867 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2868 struct rbd_obj_request *obj_request;
2869 struct page **pages = NULL;
2874 page_count = (u32) calc_pages_for(offset, length);
2875 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2877 ret = PTR_ERR(pages);
2880 obj_request = rbd_obj_request_create(object_name, offset, length,
2885 obj_request->pages = pages;
2886 obj_request->page_count = page_count;
2888 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2889 if (!obj_request->osd_req)
2892 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2893 offset, length, 0, 0);
2894 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
2896 obj_request->length,
2897 obj_request->offset & ~PAGE_MASK,
2899 rbd_osd_req_format_read(obj_request);
2901 ret = rbd_obj_request_submit(osdc, obj_request);
2904 ret = rbd_obj_request_wait(obj_request);
2908 ret = obj_request->result;
2912 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2913 size = (size_t) obj_request->xferred;
2914 ceph_copy_from_page_vector(pages, buf, 0, size);
2915 rbd_assert(size <= (size_t)INT_MAX);
2919 rbd_obj_request_put(obj_request);
2921 ceph_release_page_vector(pages, page_count);
2927 * Read the complete header for the given rbd device.
2929 * Returns a pointer to a dynamically-allocated buffer containing
2930 * the complete and validated header. Caller can pass the address
2931 * of a variable that will be filled in with the version of the
2932 * header object at the time it was read.
2934 * Returns a pointer-coded errno if a failure occurs.
2936 static struct rbd_image_header_ondisk *
2937 rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
2939 struct rbd_image_header_ondisk *ondisk = NULL;
2946 * The complete header will include an array of its 64-bit
2947 * snapshot ids, followed by the names of those snapshots as
2948 * a contiguous block of NUL-terminated strings. Note that
2949 * the number of snapshots could change by the time we read
2950 * it in, in which case we re-read it.
2957 size = sizeof (*ondisk);
2958 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2960 ondisk = kmalloc(size, GFP_KERNEL);
2962 return ERR_PTR(-ENOMEM);
2964 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2968 if ((size_t)ret < size) {
2970 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2974 if (!rbd_dev_ondisk_valid(ondisk)) {
2976 rbd_warn(rbd_dev, "invalid header");
2980 names_size = le64_to_cpu(ondisk->snap_names_len);
2981 want_count = snap_count;
2982 snap_count = le32_to_cpu(ondisk->snap_count);
2983 } while (snap_count != want_count);
2990 return ERR_PTR(ret);
2994 * reload the ondisk the header
2996 static int rbd_read_header(struct rbd_device *rbd_dev,
2997 struct rbd_image_header *header)
2999 struct rbd_image_header_ondisk *ondisk;
3002 ondisk = rbd_dev_v1_header_read(rbd_dev);
3004 return PTR_ERR(ondisk);
3005 ret = rbd_header_from_disk(header, ondisk);
3011 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
3013 struct rbd_snap *snap;
3014 struct rbd_snap *next;
3016 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) {
3017 list_del(&snap->node);
3018 rbd_snap_destroy(snap);
3022 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3024 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3027 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3030 rbd_dev->mapping.size = rbd_dev->header.image_size;
3031 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3032 dout("setting size to %llu sectors", (unsigned long long)size);
3033 set_capacity(rbd_dev->disk, size);
3038 * only read the first part of the ondisk header, without the snaps info
3040 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3043 struct rbd_image_header h;
3045 ret = rbd_read_header(rbd_dev, &h);
3049 down_write(&rbd_dev->header_rwsem);
3051 /* Update image size, and check for resize of mapped image */
3052 rbd_dev->header.image_size = h.image_size;
3053 rbd_update_mapping_size(rbd_dev);
3055 /* rbd_dev->header.object_prefix shouldn't change */
3056 kfree(rbd_dev->header.snap_sizes);
3057 kfree(rbd_dev->header.snap_names);
3058 /* osd requests may still refer to snapc */
3059 ceph_put_snap_context(rbd_dev->header.snapc);
3061 rbd_dev->header.image_size = h.image_size;
3062 rbd_dev->header.snapc = h.snapc;
3063 rbd_dev->header.snap_names = h.snap_names;
3064 rbd_dev->header.snap_sizes = h.snap_sizes;
3065 /* Free the extra copy of the object prefix */
3066 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3067 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3068 kfree(h.object_prefix);
3070 ret = rbd_dev_snaps_update(rbd_dev);
3072 up_write(&rbd_dev->header_rwsem);
3077 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3082 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3083 image_size = rbd_dev->header.image_size;
3084 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3085 if (rbd_dev->image_format == 1)
3086 ret = rbd_dev_v1_refresh(rbd_dev);
3088 ret = rbd_dev_v2_refresh(rbd_dev);
3089 mutex_unlock(&ctl_mutex);
3091 rbd_warn(rbd_dev, "got notification but failed to "
3092 " update snaps: %d\n", ret);
3093 if (image_size != rbd_dev->header.image_size)
3094 revalidate_disk(rbd_dev->disk);
3099 static int rbd_init_disk(struct rbd_device *rbd_dev)
3101 struct gendisk *disk;
3102 struct request_queue *q;
3105 /* create gendisk info */
3106 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3110 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3112 disk->major = rbd_dev->major;
3113 disk->first_minor = 0;
3114 disk->fops = &rbd_bd_ops;
3115 disk->private_data = rbd_dev;
3117 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3121 /* We use the default size, but let's be explicit about it. */
3122 blk_queue_physical_block_size(q, SECTOR_SIZE);
3124 /* set io sizes to object size */
3125 segment_size = rbd_obj_bytes(&rbd_dev->header);
3126 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3127 blk_queue_max_segment_size(q, segment_size);
3128 blk_queue_io_min(q, segment_size);
3129 blk_queue_io_opt(q, segment_size);
3131 blk_queue_merge_bvec(q, rbd_merge_bvec);
3134 q->queuedata = rbd_dev;
3136 rbd_dev->disk = disk;
3149 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3151 return container_of(dev, struct rbd_device, dev);
3154 static ssize_t rbd_size_show(struct device *dev,
3155 struct device_attribute *attr, char *buf)
3157 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3159 return sprintf(buf, "%llu\n",
3160 (unsigned long long)rbd_dev->mapping.size);
3164 * Note this shows the features for whatever's mapped, which is not
3165 * necessarily the base image.
3167 static ssize_t rbd_features_show(struct device *dev,
3168 struct device_attribute *attr, char *buf)
3170 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3172 return sprintf(buf, "0x%016llx\n",
3173 (unsigned long long)rbd_dev->mapping.features);
3176 static ssize_t rbd_major_show(struct device *dev,
3177 struct device_attribute *attr, char *buf)
3179 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3182 return sprintf(buf, "%d\n", rbd_dev->major);
3184 return sprintf(buf, "(none)\n");
3188 static ssize_t rbd_client_id_show(struct device *dev,
3189 struct device_attribute *attr, char *buf)
3191 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3193 return sprintf(buf, "client%lld\n",
3194 ceph_client_id(rbd_dev->rbd_client->client));
3197 static ssize_t rbd_pool_show(struct device *dev,
3198 struct device_attribute *attr, char *buf)
3200 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3202 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3205 static ssize_t rbd_pool_id_show(struct device *dev,
3206 struct device_attribute *attr, char *buf)
3208 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3210 return sprintf(buf, "%llu\n",
3211 (unsigned long long) rbd_dev->spec->pool_id);
3214 static ssize_t rbd_name_show(struct device *dev,
3215 struct device_attribute *attr, char *buf)
3217 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3219 if (rbd_dev->spec->image_name)
3220 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3222 return sprintf(buf, "(unknown)\n");
3225 static ssize_t rbd_image_id_show(struct device *dev,
3226 struct device_attribute *attr, char *buf)
3228 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3230 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3234 * Shows the name of the currently-mapped snapshot (or
3235 * RBD_SNAP_HEAD_NAME for the base image).
3237 static ssize_t rbd_snap_show(struct device *dev,
3238 struct device_attribute *attr,
3241 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3243 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3247 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3248 * for the parent image. If there is no parent, simply shows
3249 * "(no parent image)".
3251 static ssize_t rbd_parent_show(struct device *dev,
3252 struct device_attribute *attr,
3255 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3256 struct rbd_spec *spec = rbd_dev->parent_spec;
3261 return sprintf(buf, "(no parent image)\n");
3263 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3264 (unsigned long long) spec->pool_id, spec->pool_name);
3269 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3270 spec->image_name ? spec->image_name : "(unknown)");
3275 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3276 (unsigned long long) spec->snap_id, spec->snap_name);
3281 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3286 return (ssize_t) (bufp - buf);
3289 static ssize_t rbd_image_refresh(struct device *dev,
3290 struct device_attribute *attr,
3294 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3297 ret = rbd_dev_refresh(rbd_dev);
3299 return ret < 0 ? ret : size;
3302 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3303 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3304 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3305 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3306 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3307 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3308 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3309 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3310 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3311 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3312 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3314 static struct attribute *rbd_attrs[] = {
3315 &dev_attr_size.attr,
3316 &dev_attr_features.attr,
3317 &dev_attr_major.attr,
3318 &dev_attr_client_id.attr,
3319 &dev_attr_pool.attr,
3320 &dev_attr_pool_id.attr,
3321 &dev_attr_name.attr,
3322 &dev_attr_image_id.attr,
3323 &dev_attr_current_snap.attr,
3324 &dev_attr_parent.attr,
3325 &dev_attr_refresh.attr,
3329 static struct attribute_group rbd_attr_group = {
3333 static const struct attribute_group *rbd_attr_groups[] = {
3338 static void rbd_sysfs_dev_release(struct device *dev)
3342 static struct device_type rbd_device_type = {
3344 .groups = rbd_attr_groups,
3345 .release = rbd_sysfs_dev_release,
3348 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3350 kref_get(&spec->kref);
3355 static void rbd_spec_free(struct kref *kref);
3356 static void rbd_spec_put(struct rbd_spec *spec)
3359 kref_put(&spec->kref, rbd_spec_free);
3362 static struct rbd_spec *rbd_spec_alloc(void)
3364 struct rbd_spec *spec;
3366 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3369 kref_init(&spec->kref);
3374 static void rbd_spec_free(struct kref *kref)
3376 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3378 kfree(spec->pool_name);
3379 kfree(spec->image_id);
3380 kfree(spec->image_name);
3381 kfree(spec->snap_name);
3385 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3386 struct rbd_spec *spec)
3388 struct rbd_device *rbd_dev;
3390 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3394 spin_lock_init(&rbd_dev->lock);
3396 INIT_LIST_HEAD(&rbd_dev->node);
3397 INIT_LIST_HEAD(&rbd_dev->snaps);
3398 init_rwsem(&rbd_dev->header_rwsem);
3400 rbd_dev->spec = spec;
3401 rbd_dev->rbd_client = rbdc;
3403 /* Initialize the layout used for all rbd requests */
3405 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3406 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3407 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3408 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3413 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3415 rbd_put_client(rbd_dev->rbd_client);
3416 rbd_spec_put(rbd_dev->spec);
3420 static void rbd_snap_destroy(struct rbd_snap *snap)
3426 static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev,
3427 const char *snap_name,
3428 u64 snap_id, u64 snap_size,
3431 struct rbd_snap *snap;
3433 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
3435 return ERR_PTR(-ENOMEM);
3437 snap->name = snap_name;
3439 snap->size = snap_size;
3440 snap->features = snap_features;
3446 * Returns a dynamically-allocated snapshot name if successful, or a
3447 * pointer-coded error otherwise.
3449 static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
3450 u64 *snap_size, u64 *snap_features)
3452 const char *snap_name;
3454 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
3456 return ERR_PTR(-ENOMEM);
3458 *snap_size = rbd_dev->header.snap_sizes[which];
3459 *snap_features = 0; /* No features for v1 */
3465 * Get the size and object order for an image snapshot, or if
3466 * snap_id is CEPH_NOSNAP, gets this information for the base
3469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3470 u8 *order, u64 *snap_size)
3472 __le64 snapid = cpu_to_le64(snap_id);
3477 } __attribute__ ((packed)) size_buf = { 0 };
3479 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3481 &snapid, sizeof (snapid),
3482 &size_buf, sizeof (size_buf));
3483 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3486 if (ret < sizeof (size_buf))
3490 *order = size_buf.order;
3491 *snap_size = le64_to_cpu(size_buf.size);
3493 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3494 (unsigned long long)snap_id, (unsigned int)*order,
3495 (unsigned long long)*snap_size);
3500 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3502 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3503 &rbd_dev->header.obj_order,
3504 &rbd_dev->header.image_size);
3507 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3513 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3517 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3518 "rbd", "get_object_prefix", NULL, 0,
3519 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3520 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3525 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3526 p + ret, NULL, GFP_NOIO);
3529 if (IS_ERR(rbd_dev->header.object_prefix)) {
3530 ret = PTR_ERR(rbd_dev->header.object_prefix);
3531 rbd_dev->header.object_prefix = NULL;
3533 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3541 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3544 __le64 snapid = cpu_to_le64(snap_id);
3548 } __attribute__ ((packed)) features_buf = { 0 };
3552 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3553 "rbd", "get_features",
3554 &snapid, sizeof (snapid),
3555 &features_buf, sizeof (features_buf));
3556 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3559 if (ret < sizeof (features_buf))
3562 incompat = le64_to_cpu(features_buf.incompat);
3563 if (incompat & ~RBD_FEATURES_SUPPORTED)
3566 *snap_features = le64_to_cpu(features_buf.features);
3568 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3569 (unsigned long long)snap_id,
3570 (unsigned long long)*snap_features,
3571 (unsigned long long)le64_to_cpu(features_buf.incompat));
3576 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3578 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3579 &rbd_dev->header.features);
3582 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3584 struct rbd_spec *parent_spec;
3586 void *reply_buf = NULL;
3594 parent_spec = rbd_spec_alloc();
3598 size = sizeof (__le64) + /* pool_id */
3599 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3600 sizeof (__le64) + /* snap_id */
3601 sizeof (__le64); /* overlap */
3602 reply_buf = kmalloc(size, GFP_KERNEL);
3608 snapid = cpu_to_le64(CEPH_NOSNAP);
3609 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3610 "rbd", "get_parent",
3611 &snapid, sizeof (snapid),
3613 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3618 end = reply_buf + ret;
3620 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3621 if (parent_spec->pool_id == CEPH_NOPOOL)
3622 goto out; /* No parent? No problem. */
3624 /* The ceph file layout needs to fit pool id in 32 bits */
3627 if (parent_spec->pool_id > (u64)U32_MAX) {
3628 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3629 (unsigned long long)parent_spec->pool_id, U32_MAX);
3633 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3634 if (IS_ERR(image_id)) {
3635 ret = PTR_ERR(image_id);
3638 parent_spec->image_id = image_id;
3639 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3640 ceph_decode_64_safe(&p, end, overlap, out_err);
3642 rbd_dev->parent_overlap = overlap;
3643 rbd_dev->parent_spec = parent_spec;
3644 parent_spec = NULL; /* rbd_dev now owns this */
3649 rbd_spec_put(parent_spec);
3654 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3658 __le64 stripe_count;
3659 } __attribute__ ((packed)) striping_info_buf = { 0 };
3660 size_t size = sizeof (striping_info_buf);
3667 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3668 "rbd", "get_stripe_unit_count", NULL, 0,
3669 (char *)&striping_info_buf, size);
3670 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3677 * We don't actually support the "fancy striping" feature
3678 * (STRIPINGV2) yet, but if the striping sizes are the
3679 * defaults the behavior is the same as before. So find
3680 * out, and only fail if the image has non-default values.
3683 obj_size = (u64)1 << rbd_dev->header.obj_order;
3684 p = &striping_info_buf;
3685 stripe_unit = ceph_decode_64(&p);
3686 if (stripe_unit != obj_size) {
3687 rbd_warn(rbd_dev, "unsupported stripe unit "
3688 "(got %llu want %llu)",
3689 stripe_unit, obj_size);
3692 stripe_count = ceph_decode_64(&p);
3693 if (stripe_count != 1) {
3694 rbd_warn(rbd_dev, "unsupported stripe count "
3695 "(got %llu want 1)", stripe_count);
3698 rbd_dev->header.stripe_unit = stripe_unit;
3699 rbd_dev->header.stripe_count = stripe_count;
3704 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3706 size_t image_id_size;
3711 void *reply_buf = NULL;
3713 char *image_name = NULL;
3716 rbd_assert(!rbd_dev->spec->image_name);
3718 len = strlen(rbd_dev->spec->image_id);
3719 image_id_size = sizeof (__le32) + len;
3720 image_id = kmalloc(image_id_size, GFP_KERNEL);
3725 end = image_id + image_id_size;
3726 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3728 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3729 reply_buf = kmalloc(size, GFP_KERNEL);
3733 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3734 "rbd", "dir_get_name",
3735 image_id, image_id_size,
3740 end = reply_buf + ret;
3742 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3743 if (IS_ERR(image_name))
3746 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3755 * When an rbd image has a parent image, it is identified by the
3756 * pool, image, and snapshot ids (not names). This function fills
3757 * in the names for those ids. (It's OK if we can't figure out the
3758 * name for an image id, but the pool and snapshot ids should always
3759 * exist and have names.) All names in an rbd spec are dynamically
3762 * When an image being mapped (not a parent) is probed, we have the
3763 * pool name and pool id, image name and image id, and the snapshot
3764 * name. The only thing we're missing is the snapshot id.
3766 * The set of snapshots for an image is not known until they have
3767 * been read by rbd_dev_snaps_update(), so we can't completely fill
3768 * in this information until after that has been called.
3770 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
3772 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3773 struct rbd_spec *spec = rbd_dev->spec;
3774 const char *pool_name;
3775 const char *image_name;
3776 const char *snap_name;
3780 * An image being mapped will have the pool name (etc.), but
3781 * we need to look up the snapshot id.
3783 if (spec->pool_name) {
3784 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
3785 struct rbd_snap *snap;
3787 snap = snap_by_name(rbd_dev, spec->snap_name);
3790 spec->snap_id = snap->id;
3792 spec->snap_id = CEPH_NOSNAP;
3798 /* Get the pool name; we have to make our own copy of this */
3800 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3802 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
3805 pool_name = kstrdup(pool_name, GFP_KERNEL);
3809 /* Fetch the image name; tolerate failure here */
3811 image_name = rbd_dev_image_name(rbd_dev);
3813 rbd_warn(rbd_dev, "unable to get image name");
3815 /* Look up the snapshot name, and make a copy */
3817 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3819 rbd_warn(rbd_dev, "no snapshot with id %llu", spec->snap_id);
3823 snap_name = kstrdup(snap_name, GFP_KERNEL);
3829 spec->pool_name = pool_name;
3830 spec->image_name = image_name;
3831 spec->snap_name = snap_name;
3841 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
3850 struct ceph_snap_context *snapc;
3854 * We'll need room for the seq value (maximum snapshot id),
3855 * snapshot count, and array of that many snapshot ids.
3856 * For now we have a fixed upper limit on the number we're
3857 * prepared to receive.
3859 size = sizeof (__le64) + sizeof (__le32) +
3860 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3861 reply_buf = kzalloc(size, GFP_KERNEL);
3865 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3866 "rbd", "get_snapcontext", NULL, 0,
3868 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3873 end = reply_buf + ret;
3875 ceph_decode_64_safe(&p, end, seq, out);
3876 ceph_decode_32_safe(&p, end, snap_count, out);
3879 * Make sure the reported number of snapshot ids wouldn't go
3880 * beyond the end of our buffer. But before checking that,
3881 * make sure the computed size of the snapshot context we
3882 * allocate is representable in a size_t.
3884 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3889 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3893 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
3899 for (i = 0; i < snap_count; i++)
3900 snapc->snaps[i] = ceph_decode_64(&p);
3902 rbd_dev->header.snapc = snapc;
3904 dout(" snap context seq = %llu, snap_count = %u\n",
3905 (unsigned long long)seq, (unsigned int)snap_count);
3912 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3922 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3923 reply_buf = kmalloc(size, GFP_KERNEL);
3925 return ERR_PTR(-ENOMEM);
3927 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3928 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3929 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3930 "rbd", "get_snapshot_name",
3931 &snap_id, sizeof (snap_id),
3933 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3935 snap_name = ERR_PTR(ret);
3940 end = reply_buf + ret;
3941 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3942 if (IS_ERR(snap_name))
3945 dout(" snap_id 0x%016llx snap_name = %s\n",
3946 (unsigned long long)le64_to_cpu(snap_id), snap_name);
3953 static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3954 u64 *snap_size, u64 *snap_features)
3959 const char *snap_name;
3962 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3963 snap_id = rbd_dev->header.snapc->snaps[which];
3964 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
3968 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
3972 snap_name = rbd_dev_v2_snap_name(rbd_dev, which);
3973 if (!IS_ERR(snap_name)) {
3975 *snap_features = features;
3980 return ERR_PTR(ret);
3983 static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3984 u64 *snap_size, u64 *snap_features)
3986 if (rbd_dev->image_format == 1)
3987 return rbd_dev_v1_snap_info(rbd_dev, which,
3988 snap_size, snap_features);
3989 if (rbd_dev->image_format == 2)
3990 return rbd_dev_v2_snap_info(rbd_dev, which,
3991 snap_size, snap_features);
3992 return ERR_PTR(-EINVAL);
3995 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
3999 down_write(&rbd_dev->header_rwsem);
4001 ret = rbd_dev_v2_image_size(rbd_dev);
4004 rbd_update_mapping_size(rbd_dev);
4006 ret = rbd_dev_v2_snap_context(rbd_dev);
4007 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4010 ret = rbd_dev_snaps_update(rbd_dev);
4011 dout("rbd_dev_snaps_update returned %d\n", ret);
4015 up_write(&rbd_dev->header_rwsem);
4021 * Scan the rbd device's current snapshot list and compare it to the
4022 * newly-received snapshot context. Remove any existing snapshots
4023 * not present in the new snapshot context. Add a new snapshot for
4024 * any snaphots in the snapshot context not in the current list.
4025 * And verify there are no changes to snapshots we already know
4028 * Assumes the snapshots in the snapshot context are sorted by
4029 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
4030 * are also maintained in that order.)
4032 * Note that any error occurs while updating the snapshot list
4033 * aborts the update, and the entire list is cleared. The snapshot
4034 * list becomes inconsistent at that point anyway, so it might as
4037 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
4039 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4040 const u32 snap_count = snapc->num_snaps;
4041 struct list_head *head = &rbd_dev->snaps;
4042 struct list_head *links = head->next;
4046 dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count);
4047 while (index < snap_count || links != head) {
4049 struct rbd_snap *snap;
4050 const char *snap_name;
4052 u64 snap_features = 0;
4054 snap_id = index < snap_count ? snapc->snaps[index]
4056 snap = links != head ? list_entry(links, struct rbd_snap, node)
4058 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
4060 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
4061 struct list_head *next = links->next;
4064 * A previously-existing snapshot is not in
4065 * the new snap context.
4067 * If the now-missing snapshot is the one
4068 * the image represents, clear its existence
4069 * flag so we can avoid sending any more
4072 if (rbd_dev->spec->snap_id == snap->id)
4073 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4074 dout("removing %ssnap id %llu\n",
4075 rbd_dev->spec->snap_id == snap->id ?
4077 (unsigned long long)snap->id);
4079 list_del(&snap->node);
4080 rbd_snap_destroy(snap);
4082 /* Done with this list entry; advance */
4088 snap_name = rbd_dev_snap_info(rbd_dev, index,
4089 &snap_size, &snap_features);
4090 if (IS_ERR(snap_name)) {
4091 ret = PTR_ERR(snap_name);
4092 dout("failed to get snap info, error %d\n", ret);
4096 dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count,
4097 (unsigned long long)snap_id);
4098 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
4099 struct rbd_snap *new_snap;
4101 /* We haven't seen this snapshot before */
4103 new_snap = rbd_snap_create(rbd_dev, snap_name,
4104 snap_id, snap_size, snap_features);
4105 if (IS_ERR(new_snap)) {
4106 ret = PTR_ERR(new_snap);
4107 dout(" failed to add dev, error %d\n", ret);
4111 /* New goes before existing, or at end of list */
4113 dout(" added dev%s\n", snap ? "" : " at end\n");
4115 list_add_tail(&new_snap->node, &snap->node);
4117 list_add_tail(&new_snap->node, head);
4119 /* Already have this one */
4121 dout(" already present\n");
4123 rbd_assert(snap->size == snap_size);
4124 rbd_assert(!strcmp(snap->name, snap_name));
4125 rbd_assert(snap->features == snap_features);
4127 /* Done with this list entry; advance */
4129 links = links->next;
4132 /* Advance to the next entry in the snapshot context */
4136 dout("%s: done\n", __func__);
4140 rbd_remove_all_snaps(rbd_dev);
4145 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4150 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4152 dev = &rbd_dev->dev;
4153 dev->bus = &rbd_bus_type;
4154 dev->type = &rbd_device_type;
4155 dev->parent = &rbd_root_dev;
4156 dev->release = rbd_dev_device_release;
4157 dev_set_name(dev, "%d", rbd_dev->dev_id);
4158 ret = device_register(dev);
4160 mutex_unlock(&ctl_mutex);
4165 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4167 device_unregister(&rbd_dev->dev);
4170 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4173 * Get a unique rbd identifier for the given new rbd_dev, and add
4174 * the rbd_dev to the global list. The minimum rbd id is 1.
4176 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4178 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4180 spin_lock(&rbd_dev_list_lock);
4181 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4182 spin_unlock(&rbd_dev_list_lock);
4183 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4184 (unsigned long long) rbd_dev->dev_id);
4188 * Remove an rbd_dev from the global list, and record that its
4189 * identifier is no longer in use.
4191 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4193 struct list_head *tmp;
4194 int rbd_id = rbd_dev->dev_id;
4197 rbd_assert(rbd_id > 0);
4199 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4200 (unsigned long long) rbd_dev->dev_id);
4201 spin_lock(&rbd_dev_list_lock);
4202 list_del_init(&rbd_dev->node);
4205 * If the id being "put" is not the current maximum, there
4206 * is nothing special we need to do.
4208 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4209 spin_unlock(&rbd_dev_list_lock);
4214 * We need to update the current maximum id. Search the
4215 * list to find out what it is. We're more likely to find
4216 * the maximum at the end, so search the list backward.
4219 list_for_each_prev(tmp, &rbd_dev_list) {
4220 struct rbd_device *rbd_dev;
4222 rbd_dev = list_entry(tmp, struct rbd_device, node);
4223 if (rbd_dev->dev_id > max_id)
4224 max_id = rbd_dev->dev_id;
4226 spin_unlock(&rbd_dev_list_lock);
4229 * The max id could have been updated by rbd_dev_id_get(), in
4230 * which case it now accurately reflects the new maximum.
4231 * Be careful not to overwrite the maximum value in that
4234 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4235 dout(" max dev id has been reset\n");
4239 * Skips over white space at *buf, and updates *buf to point to the
4240 * first found non-space character (if any). Returns the length of
4241 * the token (string of non-white space characters) found. Note
4242 * that *buf must be terminated with '\0'.
4244 static inline size_t next_token(const char **buf)
4247 * These are the characters that produce nonzero for
4248 * isspace() in the "C" and "POSIX" locales.
4250 const char *spaces = " \f\n\r\t\v";
4252 *buf += strspn(*buf, spaces); /* Find start of token */
4254 return strcspn(*buf, spaces); /* Return token length */
4258 * Finds the next token in *buf, and if the provided token buffer is
4259 * big enough, copies the found token into it. The result, if
4260 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4261 * must be terminated with '\0' on entry.
4263 * Returns the length of the token found (not including the '\0').
4264 * Return value will be 0 if no token is found, and it will be >=
4265 * token_size if the token would not fit.
4267 * The *buf pointer will be updated to point beyond the end of the
4268 * found token. Note that this occurs even if the token buffer is
4269 * too small to hold it.
4271 static inline size_t copy_token(const char **buf,
4277 len = next_token(buf);
4278 if (len < token_size) {
4279 memcpy(token, *buf, len);
4280 *(token + len) = '\0';
4288 * Finds the next token in *buf, dynamically allocates a buffer big
4289 * enough to hold a copy of it, and copies the token into the new
4290 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4291 * that a duplicate buffer is created even for a zero-length token.
4293 * Returns a pointer to the newly-allocated duplicate, or a null
4294 * pointer if memory for the duplicate was not available. If
4295 * the lenp argument is a non-null pointer, the length of the token
4296 * (not including the '\0') is returned in *lenp.
4298 * If successful, the *buf pointer will be updated to point beyond
4299 * the end of the found token.
4301 * Note: uses GFP_KERNEL for allocation.
4303 static inline char *dup_token(const char **buf, size_t *lenp)
4308 len = next_token(buf);
4309 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4312 *(dup + len) = '\0';
4322 * Parse the options provided for an "rbd add" (i.e., rbd image
4323 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4324 * and the data written is passed here via a NUL-terminated buffer.
4325 * Returns 0 if successful or an error code otherwise.
4327 * The information extracted from these options is recorded in
4328 * the other parameters which return dynamically-allocated
4331 * The address of a pointer that will refer to a ceph options
4332 * structure. Caller must release the returned pointer using
4333 * ceph_destroy_options() when it is no longer needed.
4335 * Address of an rbd options pointer. Fully initialized by
4336 * this function; caller must release with kfree().
4338 * Address of an rbd image specification pointer. Fully
4339 * initialized by this function based on parsed options.
4340 * Caller must release with rbd_spec_put().
4342 * The options passed take this form:
4343 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4346 * A comma-separated list of one or more monitor addresses.
4347 * A monitor address is an ip address, optionally followed
4348 * by a port number (separated by a colon).
4349 * I.e.: ip1[:port1][,ip2[:port2]...]
4351 * A comma-separated list of ceph and/or rbd options.
4353 * The name of the rados pool containing the rbd image.
4355 * The name of the image in that pool to map.
4357 * An optional snapshot id. If provided, the mapping will
4358 * present data from the image at the time that snapshot was
4359 * created. The image head is used if no snapshot id is
4360 * provided. Snapshot mappings are always read-only.
4362 static int rbd_add_parse_args(const char *buf,
4363 struct ceph_options **ceph_opts,
4364 struct rbd_options **opts,
4365 struct rbd_spec **rbd_spec)
4369 const char *mon_addrs;
4371 size_t mon_addrs_size;
4372 struct rbd_spec *spec = NULL;
4373 struct rbd_options *rbd_opts = NULL;
4374 struct ceph_options *copts;
4377 /* The first four tokens are required */
4379 len = next_token(&buf);
4381 rbd_warn(NULL, "no monitor address(es) provided");
4385 mon_addrs_size = len + 1;
4389 options = dup_token(&buf, NULL);
4393 rbd_warn(NULL, "no options provided");
4397 spec = rbd_spec_alloc();
4401 spec->pool_name = dup_token(&buf, NULL);
4402 if (!spec->pool_name)
4404 if (!*spec->pool_name) {
4405 rbd_warn(NULL, "no pool name provided");
4409 spec->image_name = dup_token(&buf, NULL);
4410 if (!spec->image_name)
4412 if (!*spec->image_name) {
4413 rbd_warn(NULL, "no image name provided");
4418 * Snapshot name is optional; default is to use "-"
4419 * (indicating the head/no snapshot).
4421 len = next_token(&buf);
4423 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4424 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4425 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4426 ret = -ENAMETOOLONG;
4429 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4432 *(snap_name + len) = '\0';
4433 spec->snap_name = snap_name;
4435 /* Initialize all rbd options to the defaults */
4437 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4441 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4443 copts = ceph_parse_options(options, mon_addrs,
4444 mon_addrs + mon_addrs_size - 1,
4445 parse_rbd_opts_token, rbd_opts);
4446 if (IS_ERR(copts)) {
4447 ret = PTR_ERR(copts);
4468 * An rbd format 2 image has a unique identifier, distinct from the
4469 * name given to it by the user. Internally, that identifier is
4470 * what's used to specify the names of objects related to the image.
4472 * A special "rbd id" object is used to map an rbd image name to its
4473 * id. If that object doesn't exist, then there is no v2 rbd image
4474 * with the supplied name.
4476 * This function will record the given rbd_dev's image_id field if
4477 * it can be determined, and in that case will return 0. If any
4478 * errors occur a negative errno will be returned and the rbd_dev's
4479 * image_id field will be unchanged (and should be NULL).
4481 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4490 * When probing a parent image, the image id is already
4491 * known (and the image name likely is not). There's no
4492 * need to fetch the image id again in this case. We
4493 * do still need to set the image format though.
4495 if (rbd_dev->spec->image_id) {
4496 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4502 * First, see if the format 2 image id file exists, and if
4503 * so, get the image's persistent id from it.
4505 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4506 object_name = kmalloc(size, GFP_NOIO);
4509 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4510 dout("rbd id object name is %s\n", object_name);
4512 /* Response will be an encoded string, which includes a length */
4514 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4515 response = kzalloc(size, GFP_NOIO);
4521 /* If it doesn't exist we'll assume it's a format 1 image */
4523 ret = rbd_obj_method_sync(rbd_dev, object_name,
4524 "rbd", "get_id", NULL, 0,
4525 response, RBD_IMAGE_ID_LEN_MAX);
4526 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4527 if (ret == -ENOENT) {
4528 image_id = kstrdup("", GFP_KERNEL);
4529 ret = image_id ? 0 : -ENOMEM;
4531 rbd_dev->image_format = 1;
4532 } else if (ret > sizeof (__le32)) {
4535 image_id = ceph_extract_encoded_string(&p, p + ret,
4537 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4539 rbd_dev->image_format = 2;
4545 rbd_dev->spec->image_id = image_id;
4546 dout("image_id is %s\n", image_id);
4555 /* Undo whatever state changes are made by v1 or v2 image probe */
4557 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4559 struct rbd_image_header *header;
4561 rbd_dev_remove_parent(rbd_dev);
4562 rbd_spec_put(rbd_dev->parent_spec);
4563 rbd_dev->parent_spec = NULL;
4564 rbd_dev->parent_overlap = 0;
4566 /* Free dynamic fields from the header, then zero it out */
4568 header = &rbd_dev->header;
4569 ceph_put_snap_context(header->snapc);
4570 kfree(header->snap_sizes);
4571 kfree(header->snap_names);
4572 kfree(header->object_prefix);
4573 memset(header, 0, sizeof (*header));
4576 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4580 /* Populate rbd image metadata */
4582 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4586 /* Version 1 images have no parent (no layering) */
4588 rbd_dev->parent_spec = NULL;
4589 rbd_dev->parent_overlap = 0;
4591 dout("discovered version 1 image, header name is %s\n",
4592 rbd_dev->header_name);
4597 kfree(rbd_dev->header_name);
4598 rbd_dev->header_name = NULL;
4599 kfree(rbd_dev->spec->image_id);
4600 rbd_dev->spec->image_id = NULL;
4605 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4609 ret = rbd_dev_v2_image_size(rbd_dev);
4613 /* Get the object prefix (a.k.a. block_name) for the image */
4615 ret = rbd_dev_v2_object_prefix(rbd_dev);
4619 /* Get the and check features for the image */
4621 ret = rbd_dev_v2_features(rbd_dev);
4625 /* If the image supports layering, get the parent info */
4627 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4628 ret = rbd_dev_v2_parent_info(rbd_dev);
4633 * Don't print a warning for parent images. We can
4634 * tell this point because we won't know its pool
4635 * name yet (just its pool id).
4637 if (rbd_dev->spec->pool_name)
4638 rbd_warn(rbd_dev, "WARNING: kernel layering "
4639 "is EXPERIMENTAL!");
4642 /* If the image supports fancy striping, get its parameters */
4644 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4645 ret = rbd_dev_v2_striping_info(rbd_dev);
4650 /* crypto and compression type aren't (yet) supported for v2 images */
4652 rbd_dev->header.crypt_type = 0;
4653 rbd_dev->header.comp_type = 0;
4655 /* Get the snapshot context, plus the header version */
4657 ret = rbd_dev_v2_snap_context(rbd_dev);
4661 dout("discovered version 2 image, header name is %s\n",
4662 rbd_dev->header_name);
4666 rbd_dev->parent_overlap = 0;
4667 rbd_spec_put(rbd_dev->parent_spec);
4668 rbd_dev->parent_spec = NULL;
4669 kfree(rbd_dev->header_name);
4670 rbd_dev->header_name = NULL;
4671 kfree(rbd_dev->header.object_prefix);
4672 rbd_dev->header.object_prefix = NULL;
4677 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4679 struct rbd_device *parent = NULL;
4680 struct rbd_spec *parent_spec;
4681 struct rbd_client *rbdc;
4684 if (!rbd_dev->parent_spec)
4687 * We need to pass a reference to the client and the parent
4688 * spec when creating the parent rbd_dev. Images related by
4689 * parent/child relationships always share both.
4691 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4692 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4695 parent = rbd_dev_create(rbdc, parent_spec);
4699 ret = rbd_dev_image_probe(parent);
4702 rbd_dev->parent = parent;
4707 rbd_spec_put(rbd_dev->parent_spec);
4708 kfree(rbd_dev->header_name);
4709 rbd_dev_destroy(parent);
4711 rbd_put_client(rbdc);
4712 rbd_spec_put(parent_spec);
4718 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4722 ret = rbd_dev_mapping_set(rbd_dev);
4726 /* generate unique id: find highest unique id, add one */
4727 rbd_dev_id_get(rbd_dev);
4729 /* Fill in the device name, now that we have its id. */
4730 BUILD_BUG_ON(DEV_NAME_LEN
4731 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4732 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4734 /* Get our block major device number. */
4736 ret = register_blkdev(0, rbd_dev->name);
4739 rbd_dev->major = ret;
4741 /* Set up the blkdev mapping. */
4743 ret = rbd_init_disk(rbd_dev);
4745 goto err_out_blkdev;
4747 ret = rbd_bus_add_dev(rbd_dev);
4751 /* Everything's ready. Announce the disk to the world. */
4753 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4754 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4755 add_disk(rbd_dev->disk);
4757 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4758 (unsigned long long) rbd_dev->mapping.size);
4763 rbd_free_disk(rbd_dev);
4765 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4767 rbd_dev_id_put(rbd_dev);
4768 rbd_dev_mapping_clear(rbd_dev);
4773 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4775 struct rbd_spec *spec = rbd_dev->spec;
4778 /* Record the header object name for this rbd image. */
4780 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4782 if (rbd_dev->image_format == 1)
4783 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4785 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4787 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4788 if (!rbd_dev->header_name)
4791 if (rbd_dev->image_format == 1)
4792 sprintf(rbd_dev->header_name, "%s%s",
4793 spec->image_name, RBD_SUFFIX);
4795 sprintf(rbd_dev->header_name, "%s%s",
4796 RBD_HEADER_PREFIX, spec->image_id);
4800 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4804 rbd_remove_all_snaps(rbd_dev);
4805 rbd_dev_unprobe(rbd_dev);
4806 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4808 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4809 kfree(rbd_dev->header_name);
4810 rbd_dev->header_name = NULL;
4811 rbd_dev->image_format = 0;
4812 kfree(rbd_dev->spec->image_id);
4813 rbd_dev->spec->image_id = NULL;
4815 rbd_dev_destroy(rbd_dev);
4819 * Probe for the existence of the header object for the given rbd
4820 * device. For format 2 images this includes determining the image
4823 static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4829 * Get the id from the image id object. If it's not a
4830 * format 2 image, we'll get ENOENT back, and we'll assume
4831 * it's a format 1 image.
4833 ret = rbd_dev_image_id(rbd_dev);
4836 rbd_assert(rbd_dev->spec->image_id);
4837 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4839 ret = rbd_dev_header_name(rbd_dev);
4841 goto err_out_format;
4843 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4845 goto out_header_name;
4847 if (rbd_dev->image_format == 1)
4848 ret = rbd_dev_v1_probe(rbd_dev);
4850 ret = rbd_dev_v2_probe(rbd_dev);
4854 ret = rbd_dev_snaps_update(rbd_dev);
4858 ret = rbd_dev_spec_update(rbd_dev);
4862 ret = rbd_dev_probe_parent(rbd_dev);
4867 rbd_remove_all_snaps(rbd_dev);
4869 rbd_dev_unprobe(rbd_dev);
4871 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4873 rbd_warn(rbd_dev, "unable to tear down watch request\n");
4875 kfree(rbd_dev->header_name);
4876 rbd_dev->header_name = NULL;
4878 rbd_dev->image_format = 0;
4879 kfree(rbd_dev->spec->image_id);
4880 rbd_dev->spec->image_id = NULL;
4882 dout("probe failed, returning %d\n", ret);
4887 static ssize_t rbd_add(struct bus_type *bus,
4891 struct rbd_device *rbd_dev = NULL;
4892 struct ceph_options *ceph_opts = NULL;
4893 struct rbd_options *rbd_opts = NULL;
4894 struct rbd_spec *spec = NULL;
4895 struct rbd_client *rbdc;
4896 struct ceph_osd_client *osdc;
4899 if (!try_module_get(THIS_MODULE))
4902 /* parse add command */
4903 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4905 goto err_out_module;
4907 rbdc = rbd_get_client(ceph_opts);
4912 ceph_opts = NULL; /* rbd_dev client now owns this */
4915 osdc = &rbdc->client->osdc;
4916 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4918 goto err_out_client;
4919 spec->pool_id = (u64)rc;
4921 /* The ceph file layout needs to fit pool id in 32 bits */
4923 if (spec->pool_id > (u64)U32_MAX) {
4924 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4925 (unsigned long long)spec->pool_id, U32_MAX);
4927 goto err_out_client;
4930 rbd_dev = rbd_dev_create(rbdc, spec);
4932 goto err_out_client;
4933 rbdc = NULL; /* rbd_dev now owns this */
4934 spec = NULL; /* rbd_dev now owns this */
4936 rbd_dev->mapping.read_only = rbd_opts->read_only;
4938 rbd_opts = NULL; /* done with this */
4940 rc = rbd_dev_image_probe(rbd_dev);
4942 goto err_out_rbd_dev;
4944 rc = rbd_dev_device_setup(rbd_dev);
4948 rbd_dev_image_release(rbd_dev);
4950 rbd_dev_destroy(rbd_dev);
4952 rbd_put_client(rbdc);
4955 ceph_destroy_options(ceph_opts);
4959 module_put(THIS_MODULE);
4961 dout("Error adding device %s\n", buf);
4966 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4968 struct list_head *tmp;
4969 struct rbd_device *rbd_dev;
4971 spin_lock(&rbd_dev_list_lock);
4972 list_for_each(tmp, &rbd_dev_list) {
4973 rbd_dev = list_entry(tmp, struct rbd_device, node);
4974 if (rbd_dev->dev_id == dev_id) {
4975 spin_unlock(&rbd_dev_list_lock);
4979 spin_unlock(&rbd_dev_list_lock);
4983 static void rbd_dev_device_release(struct device *dev)
4985 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4987 rbd_free_disk(rbd_dev);
4988 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4989 rbd_dev_clear_mapping(rbd_dev);
4990 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4992 rbd_dev_id_put(rbd_dev);
4993 rbd_dev_mapping_clear(rbd_dev);
4996 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4998 while (rbd_dev->parent) {
4999 struct rbd_device *first = rbd_dev;
5000 struct rbd_device *second = first->parent;
5001 struct rbd_device *third;
5004 * Follow to the parent with no grandparent and
5007 while (second && (third = second->parent)) {
5012 rbd_dev_image_release(second);
5013 first->parent = NULL;
5014 first->parent_overlap = 0;
5016 rbd_assert(first->parent_spec);
5017 rbd_spec_put(first->parent_spec);
5018 first->parent_spec = NULL;
5022 static ssize_t rbd_remove(struct bus_type *bus,
5026 struct rbd_device *rbd_dev = NULL;
5031 ret = strict_strtoul(buf, 10, &ul);
5035 /* convert to int; abort if we lost anything in the conversion */
5036 target_id = (int) ul;
5037 if (target_id != ul)
5040 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5042 rbd_dev = __rbd_get_dev(target_id);
5048 spin_lock_irq(&rbd_dev->lock);
5049 if (rbd_dev->open_count)
5052 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5053 spin_unlock_irq(&rbd_dev->lock);
5057 rbd_bus_del_dev(rbd_dev);
5058 rbd_dev_image_release(rbd_dev);
5059 module_put(THIS_MODULE);
5061 mutex_unlock(&ctl_mutex);
5067 * create control files in sysfs
5070 static int rbd_sysfs_init(void)
5074 ret = device_register(&rbd_root_dev);
5078 ret = bus_register(&rbd_bus_type);
5080 device_unregister(&rbd_root_dev);
5085 static void rbd_sysfs_cleanup(void)
5087 bus_unregister(&rbd_bus_type);
5088 device_unregister(&rbd_root_dev);
5091 static int __init rbd_init(void)
5095 if (!libceph_compatible(NULL)) {
5096 rbd_warn(NULL, "libceph incompatibility (quitting)");
5100 rc = rbd_sysfs_init();
5103 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5107 static void __exit rbd_exit(void)
5109 rbd_sysfs_cleanup();
5112 module_init(rbd_init);
5113 module_exit(rbd_exit);
5115 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5116 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5117 MODULE_DESCRIPTION("rados block device");
5119 /* following authorship retained from original osdblk.c */
5120 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5122 MODULE_LICENSE("GPL");