3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
401 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
403 static struct attribute *rbd_bus_attrs[] = {
405 &bus_attr_remove.attr,
408 ATTRIBUTE_GROUPS(rbd_bus);
410 static struct bus_type rbd_bus_type = {
412 .bus_groups = rbd_bus_groups,
415 static void rbd_root_dev_release(struct device *dev)
419 static struct device rbd_root_dev = {
421 .release = rbd_root_dev_release,
424 static __printf(2, 3)
425 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
427 struct va_format vaf;
435 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
436 else if (rbd_dev->disk)
437 printk(KERN_WARNING "%s: %s: %pV\n",
438 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
439 else if (rbd_dev->spec && rbd_dev->spec->image_name)
440 printk(KERN_WARNING "%s: image %s: %pV\n",
441 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
442 else if (rbd_dev->spec && rbd_dev->spec->image_id)
443 printk(KERN_WARNING "%s: id %s: %pV\n",
444 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
446 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
447 RBD_DRV_NAME, rbd_dev, &vaf);
452 #define rbd_assert(expr) \
453 if (unlikely(!(expr))) { \
454 printk(KERN_ERR "\nAssertion failure in %s() " \
456 "\trbd_assert(%s);\n\n", \
457 __func__, __LINE__, #expr); \
460 #else /* !RBD_DEBUG */
461 # define rbd_assert(expr) ((void) 0)
462 #endif /* !RBD_DEBUG */
464 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
465 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
466 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
468 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
469 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
470 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
471 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
473 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
474 u8 *order, u64 *snap_size);
475 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
477 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
479 static int rbd_open(struct block_device *bdev, fmode_t mode)
481 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
482 bool removing = false;
484 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
487 spin_lock_irq(&rbd_dev->lock);
488 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
491 rbd_dev->open_count++;
492 spin_unlock_irq(&rbd_dev->lock);
496 (void) get_device(&rbd_dev->dev);
497 set_device_ro(bdev, rbd_dev->mapping.read_only);
502 static void rbd_release(struct gendisk *disk, fmode_t mode)
504 struct rbd_device *rbd_dev = disk->private_data;
505 unsigned long open_count_before;
507 spin_lock_irq(&rbd_dev->lock);
508 open_count_before = rbd_dev->open_count--;
509 spin_unlock_irq(&rbd_dev->lock);
510 rbd_assert(open_count_before > 0);
512 put_device(&rbd_dev->dev);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts. Caller holds client_mutex.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
539 if (IS_ERR(rbdc->client))
541 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
543 ret = ceph_open_session(rbdc->client);
547 spin_lock(&rbd_client_list_lock);
548 list_add_tail(&rbdc->node, &rbd_client_list);
549 spin_unlock(&rbd_client_list_lock);
551 dout("%s: rbdc %p\n", __func__, rbdc);
555 ceph_destroy_client(rbdc->client);
560 ceph_destroy_options(ceph_opts);
561 dout("%s: error %d\n", __func__, ret);
566 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
568 kref_get(&rbdc->kref);
574 * Find a ceph client with specific addr and configuration. If
575 * found, bump its reference count.
577 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
579 struct rbd_client *client_node;
582 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
585 spin_lock(&rbd_client_list_lock);
586 list_for_each_entry(client_node, &rbd_client_list, node) {
587 if (!ceph_compare_options(ceph_opts, client_node->client)) {
588 __rbd_get_client(client_node);
594 spin_unlock(&rbd_client_list_lock);
596 return found ? client_node : NULL;
606 /* string args above */
609 /* Boolean args above */
613 static match_table_t rbd_opts_tokens = {
615 /* string args above */
616 {Opt_read_only, "read_only"},
617 {Opt_read_only, "ro"}, /* Alternate spelling */
618 {Opt_read_write, "read_write"},
619 {Opt_read_write, "rw"}, /* Alternate spelling */
620 /* Boolean args above */
628 #define RBD_READ_ONLY_DEFAULT false
630 static int parse_rbd_opts_token(char *c, void *private)
632 struct rbd_options *rbd_opts = private;
633 substring_t argstr[MAX_OPT_ARGS];
634 int token, intval, ret;
636 token = match_token(c, rbd_opts_tokens, argstr);
640 if (token < Opt_last_int) {
641 ret = match_int(&argstr[0], &intval);
643 pr_err("bad mount option arg (not int) "
647 dout("got int token %d val %d\n", token, intval);
648 } else if (token > Opt_last_int && token < Opt_last_string) {
649 dout("got string token %d val %s\n", token,
651 } else if (token > Opt_last_string && token < Opt_last_bool) {
652 dout("got Boolean token %d\n", token);
654 dout("got token %d\n", token);
659 rbd_opts->read_only = true;
662 rbd_opts->read_only = false;
672 * Get a ceph client with specific addr and configuration, if one does
673 * not exist create it. Either way, ceph_opts is consumed by this
676 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
678 struct rbd_client *rbdc;
680 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
681 rbdc = rbd_client_find(ceph_opts);
682 if (rbdc) /* using an existing client */
683 ceph_destroy_options(ceph_opts);
685 rbdc = rbd_client_create(ceph_opts);
686 mutex_unlock(&client_mutex);
692 * Destroy ceph client
694 * Caller must hold rbd_client_list_lock.
696 static void rbd_client_release(struct kref *kref)
698 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
700 dout("%s: rbdc %p\n", __func__, rbdc);
701 spin_lock(&rbd_client_list_lock);
702 list_del(&rbdc->node);
703 spin_unlock(&rbd_client_list_lock);
705 ceph_destroy_client(rbdc->client);
710 * Drop reference to ceph client node. If it's not referenced anymore, release
713 static void rbd_put_client(struct rbd_client *rbdc)
716 kref_put(&rbdc->kref, rbd_client_release);
719 static bool rbd_image_format_valid(u32 image_format)
721 return image_format == 1 || image_format == 2;
724 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
729 /* The header has to start with the magic rbd header text */
730 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
733 /* The bio layer requires at least sector-sized I/O */
735 if (ondisk->options.order < SECTOR_SHIFT)
738 /* If we use u64 in a few spots we may be able to loosen this */
740 if (ondisk->options.order > 8 * sizeof (int) - 1)
744 * The size of a snapshot header has to fit in a size_t, and
745 * that limits the number of snapshots.
747 snap_count = le32_to_cpu(ondisk->snap_count);
748 size = SIZE_MAX - sizeof (struct ceph_snap_context);
749 if (snap_count > size / sizeof (__le64))
753 * Not only that, but the size of the entire the snapshot
754 * header must also be representable in a size_t.
756 size -= snap_count * sizeof (__le64);
757 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
764 * Fill an rbd image header with information from the given format 1
767 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
768 struct rbd_image_header_ondisk *ondisk)
770 struct rbd_image_header *header = &rbd_dev->header;
771 bool first_time = header->object_prefix == NULL;
772 struct ceph_snap_context *snapc;
773 char *object_prefix = NULL;
774 char *snap_names = NULL;
775 u64 *snap_sizes = NULL;
781 /* Allocate this now to avoid having to handle failure below */
786 len = strnlen(ondisk->object_prefix,
787 sizeof (ondisk->object_prefix));
788 object_prefix = kmalloc(len + 1, GFP_KERNEL);
791 memcpy(object_prefix, ondisk->object_prefix, len);
792 object_prefix[len] = '\0';
795 /* Allocate the snapshot context and fill it in */
797 snap_count = le32_to_cpu(ondisk->snap_count);
798 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 snapc->seq = le64_to_cpu(ondisk->snap_seq);
803 struct rbd_image_snap_ondisk *snaps;
804 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
806 /* We'll keep a copy of the snapshot names... */
808 if (snap_names_len > (u64)SIZE_MAX)
810 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
814 /* ...as well as the array of their sizes. */
816 size = snap_count * sizeof (*header->snap_sizes);
817 snap_sizes = kmalloc(size, GFP_KERNEL);
822 * Copy the names, and fill in each snapshot's id
825 * Note that rbd_dev_v1_header_info() guarantees the
826 * ondisk buffer we're working with has
827 * snap_names_len bytes beyond the end of the
828 * snapshot id array, this memcpy() is safe.
830 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
831 snaps = ondisk->snaps;
832 for (i = 0; i < snap_count; i++) {
833 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
834 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
838 /* We won't fail any more, fill in the header */
841 header->object_prefix = object_prefix;
842 header->obj_order = ondisk->options.order;
843 header->crypt_type = ondisk->options.crypt_type;
844 header->comp_type = ondisk->options.comp_type;
845 /* The rest aren't used for format 1 images */
846 header->stripe_unit = 0;
847 header->stripe_count = 0;
848 header->features = 0;
850 ceph_put_snap_context(header->snapc);
851 kfree(header->snap_names);
852 kfree(header->snap_sizes);
855 /* The remaining fields always get updated (when we refresh) */
857 header->image_size = le64_to_cpu(ondisk->image_size);
858 header->snapc = snapc;
859 header->snap_names = snap_names;
860 header->snap_sizes = snap_sizes;
862 /* Make sure mapping size is consistent with header info */
864 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
865 if (rbd_dev->mapping.size != header->image_size)
866 rbd_dev->mapping.size = header->image_size;
874 ceph_put_snap_context(snapc);
875 kfree(object_prefix);
880 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
882 const char *snap_name;
884 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
886 /* Skip over names until we find the one we are looking for */
888 snap_name = rbd_dev->header.snap_names;
890 snap_name += strlen(snap_name) + 1;
892 return kstrdup(snap_name, GFP_KERNEL);
896 * Snapshot id comparison function for use with qsort()/bsearch().
897 * Note that result is for snapshots in *descending* order.
899 static int snapid_compare_reverse(const void *s1, const void *s2)
901 u64 snap_id1 = *(u64 *)s1;
902 u64 snap_id2 = *(u64 *)s2;
904 if (snap_id1 < snap_id2)
906 return snap_id1 == snap_id2 ? 0 : -1;
910 * Search a snapshot context to see if the given snapshot id is
913 * Returns the position of the snapshot id in the array if it's found,
914 * or BAD_SNAP_INDEX otherwise.
916 * Note: The snapshot array is in kept sorted (by the osd) in
917 * reverse order, highest snapshot id first.
919 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
921 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
924 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
925 sizeof (snap_id), snapid_compare_reverse);
927 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
930 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
935 which = rbd_dev_snap_index(rbd_dev, snap_id);
936 if (which == BAD_SNAP_INDEX)
939 return _rbd_dev_v1_snap_name(rbd_dev, which);
942 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
944 if (snap_id == CEPH_NOSNAP)
945 return RBD_SNAP_HEAD_NAME;
947 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
948 if (rbd_dev->image_format == 1)
949 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
951 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
954 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
957 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
958 if (snap_id == CEPH_NOSNAP) {
959 *snap_size = rbd_dev->header.image_size;
960 } else if (rbd_dev->image_format == 1) {
963 which = rbd_dev_snap_index(rbd_dev, snap_id);
964 if (which == BAD_SNAP_INDEX)
967 *snap_size = rbd_dev->header.snap_sizes[which];
972 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
981 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
984 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
985 if (snap_id == CEPH_NOSNAP) {
986 *snap_features = rbd_dev->header.features;
987 } else if (rbd_dev->image_format == 1) {
988 *snap_features = 0; /* No features for format 1 */
993 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
997 *snap_features = features;
1002 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1004 u64 snap_id = rbd_dev->spec->snap_id;
1009 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1012 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1016 rbd_dev->mapping.size = size;
1017 rbd_dev->mapping.features = features;
1022 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1024 rbd_dev->mapping.size = 0;
1025 rbd_dev->mapping.features = 0;
1028 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1035 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1038 segment = offset >> rbd_dev->header.obj_order;
1039 name_format = "%s.%012llx";
1040 if (rbd_dev->image_format == 2)
1041 name_format = "%s.%016llx";
1042 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1043 rbd_dev->header.object_prefix, segment);
1044 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1045 pr_err("error formatting segment name for #%llu (%d)\n",
1054 static void rbd_segment_name_free(const char *name)
1056 /* The explicit cast here is needed to drop the const qualifier */
1058 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1061 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1063 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1065 return offset & (segment_size - 1);
1068 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1069 u64 offset, u64 length)
1071 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1073 offset &= segment_size - 1;
1075 rbd_assert(length <= U64_MAX - offset);
1076 if (offset + length > segment_size)
1077 length = segment_size - offset;
1083 * returns the size of an object in the image
1085 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1087 return 1 << header->obj_order;
1094 static void bio_chain_put(struct bio *chain)
1100 chain = chain->bi_next;
1106 * zeros a bio chain, starting at specific offset
1108 static void zero_bio_chain(struct bio *chain, int start_ofs)
1111 unsigned long flags;
1117 bio_for_each_segment(bv, chain, i) {
1118 if (pos + bv->bv_len > start_ofs) {
1119 int remainder = max(start_ofs - pos, 0);
1120 buf = bvec_kmap_irq(bv, &flags);
1121 memset(buf + remainder, 0,
1122 bv->bv_len - remainder);
1123 flush_dcache_page(bv->bv_page);
1124 bvec_kunmap_irq(buf, &flags);
1129 chain = chain->bi_next;
1134 * similar to zero_bio_chain(), zeros data defined by a page array,
1135 * starting at the given byte offset from the start of the array and
1136 * continuing up to the given end offset. The pages array is
1137 * assumed to be big enough to hold all bytes up to the end.
1139 static void zero_pages(struct page **pages, u64 offset, u64 end)
1141 struct page **page = &pages[offset >> PAGE_SHIFT];
1143 rbd_assert(end > offset);
1144 rbd_assert(end - offset <= (u64)SIZE_MAX);
1145 while (offset < end) {
1148 unsigned long flags;
1151 page_offset = offset & ~PAGE_MASK;
1152 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1153 local_irq_save(flags);
1154 kaddr = kmap_atomic(*page);
1155 memset(kaddr + page_offset, 0, length);
1156 flush_dcache_page(*page);
1157 kunmap_atomic(kaddr);
1158 local_irq_restore(flags);
1166 * Clone a portion of a bio, starting at the given byte offset
1167 * and continuing for the number of bytes indicated.
1169 static struct bio *bio_clone_range(struct bio *bio_src,
1170 unsigned int offset,
1178 unsigned short end_idx;
1179 unsigned short vcnt;
1182 /* Handle the easy case for the caller */
1184 if (!offset && len == bio_src->bi_size)
1185 return bio_clone(bio_src, gfpmask);
1187 if (WARN_ON_ONCE(!len))
1189 if (WARN_ON_ONCE(len > bio_src->bi_size))
1191 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1194 /* Find first affected segment... */
1197 bio_for_each_segment(bv, bio_src, idx) {
1198 if (resid < bv->bv_len)
1200 resid -= bv->bv_len;
1204 /* ...and the last affected segment */
1207 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1208 if (resid <= bv->bv_len)
1210 resid -= bv->bv_len;
1212 vcnt = end_idx - idx + 1;
1214 /* Build the clone */
1216 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1218 return NULL; /* ENOMEM */
1220 bio->bi_bdev = bio_src->bi_bdev;
1221 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1222 bio->bi_rw = bio_src->bi_rw;
1223 bio->bi_flags |= 1 << BIO_CLONED;
1226 * Copy over our part of the bio_vec, then update the first
1227 * and last (or only) entries.
1229 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1230 vcnt * sizeof (struct bio_vec));
1231 bio->bi_io_vec[0].bv_offset += voff;
1233 bio->bi_io_vec[0].bv_len -= voff;
1234 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1236 bio->bi_io_vec[0].bv_len = len;
1239 bio->bi_vcnt = vcnt;
1247 * Clone a portion of a bio chain, starting at the given byte offset
1248 * into the first bio in the source chain and continuing for the
1249 * number of bytes indicated. The result is another bio chain of
1250 * exactly the given length, or a null pointer on error.
1252 * The bio_src and offset parameters are both in-out. On entry they
1253 * refer to the first source bio and the offset into that bio where
1254 * the start of data to be cloned is located.
1256 * On return, bio_src is updated to refer to the bio in the source
1257 * chain that contains first un-cloned byte, and *offset will
1258 * contain the offset of that byte within that bio.
1260 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1261 unsigned int *offset,
1265 struct bio *bi = *bio_src;
1266 unsigned int off = *offset;
1267 struct bio *chain = NULL;
1270 /* Build up a chain of clone bios up to the limit */
1272 if (!bi || off >= bi->bi_size || !len)
1273 return NULL; /* Nothing to clone */
1277 unsigned int bi_size;
1281 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1282 goto out_err; /* EINVAL; ran out of bio's */
1284 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1285 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1287 goto out_err; /* ENOMEM */
1290 end = &bio->bi_next;
1293 if (off == bi->bi_size) {
1304 bio_chain_put(chain);
1310 * The default/initial value for all object request flags is 0. For
1311 * each flag, once its value is set to 1 it is never reset to 0
1314 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1316 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1317 struct rbd_device *rbd_dev;
1319 rbd_dev = obj_request->img_request->rbd_dev;
1320 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1325 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1328 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1331 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1333 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1334 struct rbd_device *rbd_dev = NULL;
1336 if (obj_request_img_data_test(obj_request))
1337 rbd_dev = obj_request->img_request->rbd_dev;
1338 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1343 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1346 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1350 * This sets the KNOWN flag after (possibly) setting the EXISTS
1351 * flag. The latter is set based on the "exists" value provided.
1353 * Note that for our purposes once an object exists it never goes
1354 * away again. It's possible that the response from two existence
1355 * checks are separated by the creation of the target object, and
1356 * the first ("doesn't exist") response arrives *after* the second
1357 * ("does exist"). In that case we ignore the second one.
1359 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1363 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1364 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1368 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1371 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1374 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1377 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1380 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1382 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1383 atomic_read(&obj_request->kref.refcount));
1384 kref_get(&obj_request->kref);
1387 static void rbd_obj_request_destroy(struct kref *kref);
1388 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1390 rbd_assert(obj_request != NULL);
1391 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1392 atomic_read(&obj_request->kref.refcount));
1393 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1396 static bool img_request_child_test(struct rbd_img_request *img_request);
1397 static void rbd_parent_request_destroy(struct kref *kref);
1398 static void rbd_img_request_destroy(struct kref *kref);
1399 static void rbd_img_request_put(struct rbd_img_request *img_request)
1401 rbd_assert(img_request != NULL);
1402 dout("%s: img %p (was %d)\n", __func__, img_request,
1403 atomic_read(&img_request->kref.refcount));
1404 if (img_request_child_test(img_request))
1405 kref_put(&img_request->kref, rbd_parent_request_destroy);
1407 kref_put(&img_request->kref, rbd_img_request_destroy);
1410 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1411 struct rbd_obj_request *obj_request)
1413 rbd_assert(obj_request->img_request == NULL);
1415 /* Image request now owns object's original reference */
1416 obj_request->img_request = img_request;
1417 obj_request->which = img_request->obj_request_count;
1418 rbd_assert(!obj_request_img_data_test(obj_request));
1419 obj_request_img_data_set(obj_request);
1420 rbd_assert(obj_request->which != BAD_WHICH);
1421 img_request->obj_request_count++;
1422 list_add_tail(&obj_request->links, &img_request->obj_requests);
1423 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1424 obj_request->which);
1427 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1428 struct rbd_obj_request *obj_request)
1430 rbd_assert(obj_request->which != BAD_WHICH);
1432 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1433 obj_request->which);
1434 list_del(&obj_request->links);
1435 rbd_assert(img_request->obj_request_count > 0);
1436 img_request->obj_request_count--;
1437 rbd_assert(obj_request->which == img_request->obj_request_count);
1438 obj_request->which = BAD_WHICH;
1439 rbd_assert(obj_request_img_data_test(obj_request));
1440 rbd_assert(obj_request->img_request == img_request);
1441 obj_request->img_request = NULL;
1442 obj_request->callback = NULL;
1443 rbd_obj_request_put(obj_request);
1446 static bool obj_request_type_valid(enum obj_request_type type)
1449 case OBJ_REQUEST_NODATA:
1450 case OBJ_REQUEST_BIO:
1451 case OBJ_REQUEST_PAGES:
1458 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1459 struct rbd_obj_request *obj_request)
1461 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1463 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1466 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1469 dout("%s: img %p\n", __func__, img_request);
1472 * If no error occurred, compute the aggregate transfer
1473 * count for the image request. We could instead use
1474 * atomic64_cmpxchg() to update it as each object request
1475 * completes; not clear which way is better off hand.
1477 if (!img_request->result) {
1478 struct rbd_obj_request *obj_request;
1481 for_each_obj_request(img_request, obj_request)
1482 xferred += obj_request->xferred;
1483 img_request->xferred = xferred;
1486 if (img_request->callback)
1487 img_request->callback(img_request);
1489 rbd_img_request_put(img_request);
1492 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1494 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1496 dout("%s: obj %p\n", __func__, obj_request);
1498 return wait_for_completion_interruptible(&obj_request->completion);
1502 * The default/initial value for all image request flags is 0. Each
1503 * is conditionally set to 1 at image request initialization time
1504 * and currently never change thereafter.
1506 static void img_request_write_set(struct rbd_img_request *img_request)
1508 set_bit(IMG_REQ_WRITE, &img_request->flags);
1512 static bool img_request_write_test(struct rbd_img_request *img_request)
1515 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1518 static void img_request_child_set(struct rbd_img_request *img_request)
1520 set_bit(IMG_REQ_CHILD, &img_request->flags);
1524 static void img_request_child_clear(struct rbd_img_request *img_request)
1526 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1530 static bool img_request_child_test(struct rbd_img_request *img_request)
1533 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1536 static void img_request_layered_set(struct rbd_img_request *img_request)
1538 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1542 static void img_request_layered_clear(struct rbd_img_request *img_request)
1544 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1548 static bool img_request_layered_test(struct rbd_img_request *img_request)
1551 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1555 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1557 u64 xferred = obj_request->xferred;
1558 u64 length = obj_request->length;
1560 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1561 obj_request, obj_request->img_request, obj_request->result,
1564 * ENOENT means a hole in the image. We zero-fill the
1565 * entire length of the request. A short read also implies
1566 * zero-fill to the end of the request. Either way we
1567 * update the xferred count to indicate the whole request
1570 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1571 if (obj_request->result == -ENOENT) {
1572 if (obj_request->type == OBJ_REQUEST_BIO)
1573 zero_bio_chain(obj_request->bio_list, 0);
1575 zero_pages(obj_request->pages, 0, length);
1576 obj_request->result = 0;
1577 obj_request->xferred = length;
1578 } else if (xferred < length && !obj_request->result) {
1579 if (obj_request->type == OBJ_REQUEST_BIO)
1580 zero_bio_chain(obj_request->bio_list, xferred);
1582 zero_pages(obj_request->pages, xferred, length);
1583 obj_request->xferred = length;
1585 obj_request_done_set(obj_request);
1588 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1590 dout("%s: obj %p cb %p\n", __func__, obj_request,
1591 obj_request->callback);
1592 if (obj_request->callback)
1593 obj_request->callback(obj_request);
1595 complete_all(&obj_request->completion);
1598 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1600 dout("%s: obj %p\n", __func__, obj_request);
1601 obj_request_done_set(obj_request);
1604 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1606 struct rbd_img_request *img_request = NULL;
1607 struct rbd_device *rbd_dev = NULL;
1608 bool layered = false;
1610 if (obj_request_img_data_test(obj_request)) {
1611 img_request = obj_request->img_request;
1612 layered = img_request && img_request_layered_test(img_request);
1613 rbd_dev = img_request->rbd_dev;
1616 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1617 obj_request, img_request, obj_request->result,
1618 obj_request->xferred, obj_request->length);
1619 if (layered && obj_request->result == -ENOENT &&
1620 obj_request->img_offset < rbd_dev->parent_overlap)
1621 rbd_img_parent_read(obj_request);
1622 else if (img_request)
1623 rbd_img_obj_request_read_callback(obj_request);
1625 obj_request_done_set(obj_request);
1628 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1630 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1631 obj_request->result, obj_request->length);
1633 * There is no such thing as a successful short write. Set
1634 * it to our originally-requested length.
1636 obj_request->xferred = obj_request->length;
1637 obj_request_done_set(obj_request);
1641 * For a simple stat call there's nothing to do. We'll do more if
1642 * this is part of a write sequence for a layered image.
1644 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1646 dout("%s: obj %p\n", __func__, obj_request);
1647 obj_request_done_set(obj_request);
1650 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1651 struct ceph_msg *msg)
1653 struct rbd_obj_request *obj_request = osd_req->r_priv;
1656 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1657 rbd_assert(osd_req == obj_request->osd_req);
1658 if (obj_request_img_data_test(obj_request)) {
1659 rbd_assert(obj_request->img_request);
1660 rbd_assert(obj_request->which != BAD_WHICH);
1662 rbd_assert(obj_request->which == BAD_WHICH);
1665 if (osd_req->r_result < 0)
1666 obj_request->result = osd_req->r_result;
1668 BUG_ON(osd_req->r_num_ops > 2);
1671 * We support a 64-bit length, but ultimately it has to be
1672 * passed to blk_end_request(), which takes an unsigned int.
1674 obj_request->xferred = osd_req->r_reply_op_len[0];
1675 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1676 opcode = osd_req->r_ops[0].op;
1678 case CEPH_OSD_OP_READ:
1679 rbd_osd_read_callback(obj_request);
1681 case CEPH_OSD_OP_WRITE:
1682 rbd_osd_write_callback(obj_request);
1684 case CEPH_OSD_OP_STAT:
1685 rbd_osd_stat_callback(obj_request);
1687 case CEPH_OSD_OP_CALL:
1688 case CEPH_OSD_OP_NOTIFY_ACK:
1689 case CEPH_OSD_OP_WATCH:
1690 rbd_osd_trivial_callback(obj_request);
1693 rbd_warn(NULL, "%s: unsupported op %hu\n",
1694 obj_request->object_name, (unsigned short) opcode);
1698 if (obj_request_done_test(obj_request))
1699 rbd_obj_request_complete(obj_request);
1702 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1704 struct rbd_img_request *img_request = obj_request->img_request;
1705 struct ceph_osd_request *osd_req = obj_request->osd_req;
1708 rbd_assert(osd_req != NULL);
1710 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1711 ceph_osdc_build_request(osd_req, obj_request->offset,
1712 NULL, snap_id, NULL);
1715 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1717 struct rbd_img_request *img_request = obj_request->img_request;
1718 struct ceph_osd_request *osd_req = obj_request->osd_req;
1719 struct ceph_snap_context *snapc;
1720 struct timespec mtime = CURRENT_TIME;
1722 rbd_assert(osd_req != NULL);
1724 snapc = img_request ? img_request->snapc : NULL;
1725 ceph_osdc_build_request(osd_req, obj_request->offset,
1726 snapc, CEPH_NOSNAP, &mtime);
1729 static struct ceph_osd_request *rbd_osd_req_create(
1730 struct rbd_device *rbd_dev,
1732 struct rbd_obj_request *obj_request)
1734 struct ceph_snap_context *snapc = NULL;
1735 struct ceph_osd_client *osdc;
1736 struct ceph_osd_request *osd_req;
1738 if (obj_request_img_data_test(obj_request)) {
1739 struct rbd_img_request *img_request = obj_request->img_request;
1741 rbd_assert(write_request ==
1742 img_request_write_test(img_request));
1744 snapc = img_request->snapc;
1747 /* Allocate and initialize the request, for the single op */
1749 osdc = &rbd_dev->rbd_client->client->osdc;
1750 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1752 return NULL; /* ENOMEM */
1755 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1757 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1759 osd_req->r_callback = rbd_osd_req_callback;
1760 osd_req->r_priv = obj_request;
1762 osd_req->r_oid_len = strlen(obj_request->object_name);
1763 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1764 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1766 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1772 * Create a copyup osd request based on the information in the
1773 * object request supplied. A copyup request has two osd ops,
1774 * a copyup method call, and a "normal" write request.
1776 static struct ceph_osd_request *
1777 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1779 struct rbd_img_request *img_request;
1780 struct ceph_snap_context *snapc;
1781 struct rbd_device *rbd_dev;
1782 struct ceph_osd_client *osdc;
1783 struct ceph_osd_request *osd_req;
1785 rbd_assert(obj_request_img_data_test(obj_request));
1786 img_request = obj_request->img_request;
1787 rbd_assert(img_request);
1788 rbd_assert(img_request_write_test(img_request));
1790 /* Allocate and initialize the request, for the two ops */
1792 snapc = img_request->snapc;
1793 rbd_dev = img_request->rbd_dev;
1794 osdc = &rbd_dev->rbd_client->client->osdc;
1795 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1797 return NULL; /* ENOMEM */
1799 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1800 osd_req->r_callback = rbd_osd_req_callback;
1801 osd_req->r_priv = obj_request;
1803 osd_req->r_oid_len = strlen(obj_request->object_name);
1804 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1805 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1807 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1813 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1815 ceph_osdc_put_request(osd_req);
1818 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1820 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1821 u64 offset, u64 length,
1822 enum obj_request_type type)
1824 struct rbd_obj_request *obj_request;
1828 rbd_assert(obj_request_type_valid(type));
1830 size = strlen(object_name) + 1;
1831 name = kmalloc(size, GFP_KERNEL);
1835 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1841 obj_request->object_name = memcpy(name, object_name, size);
1842 obj_request->offset = offset;
1843 obj_request->length = length;
1844 obj_request->flags = 0;
1845 obj_request->which = BAD_WHICH;
1846 obj_request->type = type;
1847 INIT_LIST_HEAD(&obj_request->links);
1848 init_completion(&obj_request->completion);
1849 kref_init(&obj_request->kref);
1851 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1852 offset, length, (int)type, obj_request);
1857 static void rbd_obj_request_destroy(struct kref *kref)
1859 struct rbd_obj_request *obj_request;
1861 obj_request = container_of(kref, struct rbd_obj_request, kref);
1863 dout("%s: obj %p\n", __func__, obj_request);
1865 rbd_assert(obj_request->img_request == NULL);
1866 rbd_assert(obj_request->which == BAD_WHICH);
1868 if (obj_request->osd_req)
1869 rbd_osd_req_destroy(obj_request->osd_req);
1871 rbd_assert(obj_request_type_valid(obj_request->type));
1872 switch (obj_request->type) {
1873 case OBJ_REQUEST_NODATA:
1874 break; /* Nothing to do */
1875 case OBJ_REQUEST_BIO:
1876 if (obj_request->bio_list)
1877 bio_chain_put(obj_request->bio_list);
1879 case OBJ_REQUEST_PAGES:
1880 if (obj_request->pages)
1881 ceph_release_page_vector(obj_request->pages,
1882 obj_request->page_count);
1886 kfree(obj_request->object_name);
1887 obj_request->object_name = NULL;
1888 kmem_cache_free(rbd_obj_request_cache, obj_request);
1891 /* It's OK to call this for a device with no parent */
1893 static void rbd_spec_put(struct rbd_spec *spec);
1894 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1896 rbd_dev_remove_parent(rbd_dev);
1897 rbd_spec_put(rbd_dev->parent_spec);
1898 rbd_dev->parent_spec = NULL;
1899 rbd_dev->parent_overlap = 0;
1903 * Parent image reference counting is used to determine when an
1904 * image's parent fields can be safely torn down--after there are no
1905 * more in-flight requests to the parent image. When the last
1906 * reference is dropped, cleaning them up is safe.
1908 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1912 if (!rbd_dev->parent_spec)
1915 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1919 /* Last reference; clean up parent data structures */
1922 rbd_dev_unparent(rbd_dev);
1924 rbd_warn(rbd_dev, "parent reference underflow\n");
1928 * If an image has a non-zero parent overlap, get a reference to its
1931 * We must get the reference before checking for the overlap to
1932 * coordinate properly with zeroing the parent overlap in
1933 * rbd_dev_v2_parent_info() when an image gets flattened. We
1934 * drop it again if there is no overlap.
1936 * Returns true if the rbd device has a parent with a non-zero
1937 * overlap and a reference for it was successfully taken, or
1940 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1944 if (!rbd_dev->parent_spec)
1947 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1948 if (counter > 0 && rbd_dev->parent_overlap)
1951 /* Image was flattened, but parent is not yet torn down */
1954 rbd_warn(rbd_dev, "parent reference overflow\n");
1960 * Caller is responsible for filling in the list of object requests
1961 * that comprises the image request, and the Linux request pointer
1962 * (if there is one).
1964 static struct rbd_img_request *rbd_img_request_create(
1965 struct rbd_device *rbd_dev,
1966 u64 offset, u64 length,
1969 struct rbd_img_request *img_request;
1971 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1975 if (write_request) {
1976 down_read(&rbd_dev->header_rwsem);
1977 ceph_get_snap_context(rbd_dev->header.snapc);
1978 up_read(&rbd_dev->header_rwsem);
1981 img_request->rq = NULL;
1982 img_request->rbd_dev = rbd_dev;
1983 img_request->offset = offset;
1984 img_request->length = length;
1985 img_request->flags = 0;
1986 if (write_request) {
1987 img_request_write_set(img_request);
1988 img_request->snapc = rbd_dev->header.snapc;
1990 img_request->snap_id = rbd_dev->spec->snap_id;
1992 if (rbd_dev_parent_get(rbd_dev))
1993 img_request_layered_set(img_request);
1994 spin_lock_init(&img_request->completion_lock);
1995 img_request->next_completion = 0;
1996 img_request->callback = NULL;
1997 img_request->result = 0;
1998 img_request->obj_request_count = 0;
1999 INIT_LIST_HEAD(&img_request->obj_requests);
2000 kref_init(&img_request->kref);
2002 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2003 write_request ? "write" : "read", offset, length,
2009 static void rbd_img_request_destroy(struct kref *kref)
2011 struct rbd_img_request *img_request;
2012 struct rbd_obj_request *obj_request;
2013 struct rbd_obj_request *next_obj_request;
2015 img_request = container_of(kref, struct rbd_img_request, kref);
2017 dout("%s: img %p\n", __func__, img_request);
2019 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2020 rbd_img_obj_request_del(img_request, obj_request);
2021 rbd_assert(img_request->obj_request_count == 0);
2023 if (img_request_layered_test(img_request)) {
2024 img_request_layered_clear(img_request);
2025 rbd_dev_parent_put(img_request->rbd_dev);
2028 if (img_request_write_test(img_request))
2029 ceph_put_snap_context(img_request->snapc);
2031 kmem_cache_free(rbd_img_request_cache, img_request);
2034 static struct rbd_img_request *rbd_parent_request_create(
2035 struct rbd_obj_request *obj_request,
2036 u64 img_offset, u64 length)
2038 struct rbd_img_request *parent_request;
2039 struct rbd_device *rbd_dev;
2041 rbd_assert(obj_request->img_request);
2042 rbd_dev = obj_request->img_request->rbd_dev;
2044 parent_request = rbd_img_request_create(rbd_dev->parent,
2045 img_offset, length, false);
2046 if (!parent_request)
2049 img_request_child_set(parent_request);
2050 rbd_obj_request_get(obj_request);
2051 parent_request->obj_request = obj_request;
2053 return parent_request;
2056 static void rbd_parent_request_destroy(struct kref *kref)
2058 struct rbd_img_request *parent_request;
2059 struct rbd_obj_request *orig_request;
2061 parent_request = container_of(kref, struct rbd_img_request, kref);
2062 orig_request = parent_request->obj_request;
2064 parent_request->obj_request = NULL;
2065 rbd_obj_request_put(orig_request);
2066 img_request_child_clear(parent_request);
2068 rbd_img_request_destroy(kref);
2071 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2073 struct rbd_img_request *img_request;
2074 unsigned int xferred;
2078 rbd_assert(obj_request_img_data_test(obj_request));
2079 img_request = obj_request->img_request;
2081 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2082 xferred = (unsigned int)obj_request->xferred;
2083 result = obj_request->result;
2085 struct rbd_device *rbd_dev = img_request->rbd_dev;
2087 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2088 img_request_write_test(img_request) ? "write" : "read",
2089 obj_request->length, obj_request->img_offset,
2090 obj_request->offset);
2091 rbd_warn(rbd_dev, " result %d xferred %x\n",
2093 if (!img_request->result)
2094 img_request->result = result;
2097 /* Image object requests don't own their page array */
2099 if (obj_request->type == OBJ_REQUEST_PAGES) {
2100 obj_request->pages = NULL;
2101 obj_request->page_count = 0;
2104 if (img_request_child_test(img_request)) {
2105 rbd_assert(img_request->obj_request != NULL);
2106 more = obj_request->which < img_request->obj_request_count - 1;
2108 rbd_assert(img_request->rq != NULL);
2109 more = blk_end_request(img_request->rq, result, xferred);
2115 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2117 struct rbd_img_request *img_request;
2118 u32 which = obj_request->which;
2121 rbd_assert(obj_request_img_data_test(obj_request));
2122 img_request = obj_request->img_request;
2124 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2125 rbd_assert(img_request != NULL);
2126 rbd_assert(img_request->obj_request_count > 0);
2127 rbd_assert(which != BAD_WHICH);
2128 rbd_assert(which < img_request->obj_request_count);
2129 rbd_assert(which >= img_request->next_completion);
2131 spin_lock_irq(&img_request->completion_lock);
2132 if (which != img_request->next_completion)
2135 for_each_obj_request_from(img_request, obj_request) {
2137 rbd_assert(which < img_request->obj_request_count);
2139 if (!obj_request_done_test(obj_request))
2141 more = rbd_img_obj_end_request(obj_request);
2145 rbd_assert(more ^ (which == img_request->obj_request_count));
2146 img_request->next_completion = which;
2148 spin_unlock_irq(&img_request->completion_lock);
2151 rbd_img_request_complete(img_request);
2155 * Split up an image request into one or more object requests, each
2156 * to a different object. The "type" parameter indicates whether
2157 * "data_desc" is the pointer to the head of a list of bio
2158 * structures, or the base of a page array. In either case this
2159 * function assumes data_desc describes memory sufficient to hold
2160 * all data described by the image request.
2162 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2163 enum obj_request_type type,
2166 struct rbd_device *rbd_dev = img_request->rbd_dev;
2167 struct rbd_obj_request *obj_request = NULL;
2168 struct rbd_obj_request *next_obj_request;
2169 bool write_request = img_request_write_test(img_request);
2170 struct bio *bio_list = 0;
2171 unsigned int bio_offset = 0;
2172 struct page **pages = 0;
2177 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2178 (int)type, data_desc);
2180 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2181 img_offset = img_request->offset;
2182 resid = img_request->length;
2183 rbd_assert(resid > 0);
2185 if (type == OBJ_REQUEST_BIO) {
2186 bio_list = data_desc;
2187 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2189 rbd_assert(type == OBJ_REQUEST_PAGES);
2194 struct ceph_osd_request *osd_req;
2195 const char *object_name;
2199 object_name = rbd_segment_name(rbd_dev, img_offset);
2202 offset = rbd_segment_offset(rbd_dev, img_offset);
2203 length = rbd_segment_length(rbd_dev, img_offset, resid);
2204 obj_request = rbd_obj_request_create(object_name,
2205 offset, length, type);
2206 /* object request has its own copy of the object name */
2207 rbd_segment_name_free(object_name);
2211 if (type == OBJ_REQUEST_BIO) {
2212 unsigned int clone_size;
2214 rbd_assert(length <= (u64)UINT_MAX);
2215 clone_size = (unsigned int)length;
2216 obj_request->bio_list =
2217 bio_chain_clone_range(&bio_list,
2221 if (!obj_request->bio_list)
2224 unsigned int page_count;
2226 obj_request->pages = pages;
2227 page_count = (u32)calc_pages_for(offset, length);
2228 obj_request->page_count = page_count;
2229 if ((offset + length) & ~PAGE_MASK)
2230 page_count--; /* more on last page */
2231 pages += page_count;
2234 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2238 obj_request->osd_req = osd_req;
2239 obj_request->callback = rbd_img_obj_callback;
2241 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2243 if (type == OBJ_REQUEST_BIO)
2244 osd_req_op_extent_osd_data_bio(osd_req, 0,
2245 obj_request->bio_list, length);
2247 osd_req_op_extent_osd_data_pages(osd_req, 0,
2248 obj_request->pages, length,
2249 offset & ~PAGE_MASK, false, false);
2252 * set obj_request->img_request before formatting
2253 * the osd_request so that it gets the right snapc
2255 rbd_img_obj_request_add(img_request, obj_request);
2257 rbd_osd_req_format_write(obj_request);
2259 rbd_osd_req_format_read(obj_request);
2261 obj_request->img_offset = img_offset;
2263 img_offset += length;
2270 rbd_obj_request_put(obj_request);
2272 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2273 rbd_obj_request_put(obj_request);
2279 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2281 struct rbd_img_request *img_request;
2282 struct rbd_device *rbd_dev;
2283 struct page **pages;
2286 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2287 rbd_assert(obj_request_img_data_test(obj_request));
2288 img_request = obj_request->img_request;
2289 rbd_assert(img_request);
2291 rbd_dev = img_request->rbd_dev;
2292 rbd_assert(rbd_dev);
2294 pages = obj_request->copyup_pages;
2295 rbd_assert(pages != NULL);
2296 obj_request->copyup_pages = NULL;
2297 page_count = obj_request->copyup_page_count;
2298 rbd_assert(page_count);
2299 obj_request->copyup_page_count = 0;
2300 ceph_release_page_vector(pages, page_count);
2303 * We want the transfer count to reflect the size of the
2304 * original write request. There is no such thing as a
2305 * successful short write, so if the request was successful
2306 * we can just set it to the originally-requested length.
2308 if (!obj_request->result)
2309 obj_request->xferred = obj_request->length;
2311 /* Finish up with the normal image object callback */
2313 rbd_img_obj_callback(obj_request);
2317 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2319 struct rbd_obj_request *orig_request;
2320 struct ceph_osd_request *osd_req;
2321 struct ceph_osd_client *osdc;
2322 struct rbd_device *rbd_dev;
2323 struct page **pages;
2330 rbd_assert(img_request_child_test(img_request));
2332 /* First get what we need from the image request */
2334 pages = img_request->copyup_pages;
2335 rbd_assert(pages != NULL);
2336 img_request->copyup_pages = NULL;
2337 page_count = img_request->copyup_page_count;
2338 rbd_assert(page_count);
2339 img_request->copyup_page_count = 0;
2341 orig_request = img_request->obj_request;
2342 rbd_assert(orig_request != NULL);
2343 rbd_assert(obj_request_type_valid(orig_request->type));
2344 img_result = img_request->result;
2345 parent_length = img_request->length;
2346 rbd_assert(parent_length == img_request->xferred);
2347 rbd_img_request_put(img_request);
2349 rbd_assert(orig_request->img_request);
2350 rbd_dev = orig_request->img_request->rbd_dev;
2351 rbd_assert(rbd_dev);
2354 * If the overlap has become 0 (most likely because the
2355 * image has been flattened) we need to free the pages
2356 * and re-submit the original write request.
2358 if (!rbd_dev->parent_overlap) {
2359 struct ceph_osd_client *osdc;
2361 ceph_release_page_vector(pages, page_count);
2362 osdc = &rbd_dev->rbd_client->client->osdc;
2363 img_result = rbd_obj_request_submit(osdc, orig_request);
2372 * The original osd request is of no use to use any more.
2373 * We need a new one that can hold the two ops in a copyup
2374 * request. Allocate the new copyup osd request for the
2375 * original request, and release the old one.
2377 img_result = -ENOMEM;
2378 osd_req = rbd_osd_req_create_copyup(orig_request);
2381 rbd_osd_req_destroy(orig_request->osd_req);
2382 orig_request->osd_req = osd_req;
2383 orig_request->copyup_pages = pages;
2384 orig_request->copyup_page_count = page_count;
2386 /* Initialize the copyup op */
2388 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2389 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2392 /* Then the original write request op */
2394 offset = orig_request->offset;
2395 length = orig_request->length;
2396 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2397 offset, length, 0, 0);
2398 if (orig_request->type == OBJ_REQUEST_BIO)
2399 osd_req_op_extent_osd_data_bio(osd_req, 1,
2400 orig_request->bio_list, length);
2402 osd_req_op_extent_osd_data_pages(osd_req, 1,
2403 orig_request->pages, length,
2404 offset & ~PAGE_MASK, false, false);
2406 rbd_osd_req_format_write(orig_request);
2408 /* All set, send it off. */
2410 orig_request->callback = rbd_img_obj_copyup_callback;
2411 osdc = &rbd_dev->rbd_client->client->osdc;
2412 img_result = rbd_obj_request_submit(osdc, orig_request);
2416 /* Record the error code and complete the request */
2418 orig_request->result = img_result;
2419 orig_request->xferred = 0;
2420 obj_request_done_set(orig_request);
2421 rbd_obj_request_complete(orig_request);
2425 * Read from the parent image the range of data that covers the
2426 * entire target of the given object request. This is used for
2427 * satisfying a layered image write request when the target of an
2428 * object request from the image request does not exist.
2430 * A page array big enough to hold the returned data is allocated
2431 * and supplied to rbd_img_request_fill() as the "data descriptor."
2432 * When the read completes, this page array will be transferred to
2433 * the original object request for the copyup operation.
2435 * If an error occurs, record it as the result of the original
2436 * object request and mark it done so it gets completed.
2438 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2440 struct rbd_img_request *img_request = NULL;
2441 struct rbd_img_request *parent_request = NULL;
2442 struct rbd_device *rbd_dev;
2445 struct page **pages = NULL;
2449 rbd_assert(obj_request_img_data_test(obj_request));
2450 rbd_assert(obj_request_type_valid(obj_request->type));
2452 img_request = obj_request->img_request;
2453 rbd_assert(img_request != NULL);
2454 rbd_dev = img_request->rbd_dev;
2455 rbd_assert(rbd_dev->parent != NULL);
2458 * Determine the byte range covered by the object in the
2459 * child image to which the original request was to be sent.
2461 img_offset = obj_request->img_offset - obj_request->offset;
2462 length = (u64)1 << rbd_dev->header.obj_order;
2465 * There is no defined parent data beyond the parent
2466 * overlap, so limit what we read at that boundary if
2469 if (img_offset + length > rbd_dev->parent_overlap) {
2470 rbd_assert(img_offset < rbd_dev->parent_overlap);
2471 length = rbd_dev->parent_overlap - img_offset;
2475 * Allocate a page array big enough to receive the data read
2478 page_count = (u32)calc_pages_for(0, length);
2479 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2480 if (IS_ERR(pages)) {
2481 result = PTR_ERR(pages);
2487 parent_request = rbd_parent_request_create(obj_request,
2488 img_offset, length);
2489 if (!parent_request)
2492 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2495 parent_request->copyup_pages = pages;
2496 parent_request->copyup_page_count = page_count;
2498 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2499 result = rbd_img_request_submit(parent_request);
2503 parent_request->copyup_pages = NULL;
2504 parent_request->copyup_page_count = 0;
2505 parent_request->obj_request = NULL;
2506 rbd_obj_request_put(obj_request);
2509 ceph_release_page_vector(pages, page_count);
2511 rbd_img_request_put(parent_request);
2512 obj_request->result = result;
2513 obj_request->xferred = 0;
2514 obj_request_done_set(obj_request);
2519 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2521 struct rbd_obj_request *orig_request;
2522 struct rbd_device *rbd_dev;
2525 rbd_assert(!obj_request_img_data_test(obj_request));
2528 * All we need from the object request is the original
2529 * request and the result of the STAT op. Grab those, then
2530 * we're done with the request.
2532 orig_request = obj_request->obj_request;
2533 obj_request->obj_request = NULL;
2534 rbd_obj_request_put(orig_request);
2535 rbd_assert(orig_request);
2536 rbd_assert(orig_request->img_request);
2538 result = obj_request->result;
2539 obj_request->result = 0;
2541 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2542 obj_request, orig_request, result,
2543 obj_request->xferred, obj_request->length);
2544 rbd_obj_request_put(obj_request);
2547 * If the overlap has become 0 (most likely because the
2548 * image has been flattened) we need to free the pages
2549 * and re-submit the original write request.
2551 rbd_dev = orig_request->img_request->rbd_dev;
2552 if (!rbd_dev->parent_overlap) {
2553 struct ceph_osd_client *osdc;
2555 osdc = &rbd_dev->rbd_client->client->osdc;
2556 result = rbd_obj_request_submit(osdc, orig_request);
2562 * Our only purpose here is to determine whether the object
2563 * exists, and we don't want to treat the non-existence as
2564 * an error. If something else comes back, transfer the
2565 * error to the original request and complete it now.
2568 obj_request_existence_set(orig_request, true);
2569 } else if (result == -ENOENT) {
2570 obj_request_existence_set(orig_request, false);
2571 } else if (result) {
2572 orig_request->result = result;
2577 * Resubmit the original request now that we have recorded
2578 * whether the target object exists.
2580 orig_request->result = rbd_img_obj_request_submit(orig_request);
2582 if (orig_request->result)
2583 rbd_obj_request_complete(orig_request);
2586 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2588 struct rbd_obj_request *stat_request;
2589 struct rbd_device *rbd_dev;
2590 struct ceph_osd_client *osdc;
2591 struct page **pages = NULL;
2597 * The response data for a STAT call consists of:
2604 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2605 page_count = (u32)calc_pages_for(0, size);
2606 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2608 return PTR_ERR(pages);
2611 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2616 rbd_obj_request_get(obj_request);
2617 stat_request->obj_request = obj_request;
2618 stat_request->pages = pages;
2619 stat_request->page_count = page_count;
2621 rbd_assert(obj_request->img_request);
2622 rbd_dev = obj_request->img_request->rbd_dev;
2623 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2625 if (!stat_request->osd_req)
2627 stat_request->callback = rbd_img_obj_exists_callback;
2629 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2630 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2632 rbd_osd_req_format_read(stat_request);
2634 osdc = &rbd_dev->rbd_client->client->osdc;
2635 ret = rbd_obj_request_submit(osdc, stat_request);
2638 rbd_obj_request_put(obj_request);
2643 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2645 struct rbd_img_request *img_request;
2646 struct rbd_device *rbd_dev;
2649 rbd_assert(obj_request_img_data_test(obj_request));
2651 img_request = obj_request->img_request;
2652 rbd_assert(img_request);
2653 rbd_dev = img_request->rbd_dev;
2656 * Only writes to layered images need special handling.
2657 * Reads and non-layered writes are simple object requests.
2658 * Layered writes that start beyond the end of the overlap
2659 * with the parent have no parent data, so they too are
2660 * simple object requests. Finally, if the target object is
2661 * known to already exist, its parent data has already been
2662 * copied, so a write to the object can also be handled as a
2663 * simple object request.
2665 if (!img_request_write_test(img_request) ||
2666 !img_request_layered_test(img_request) ||
2667 rbd_dev->parent_overlap <= obj_request->img_offset ||
2668 ((known = obj_request_known_test(obj_request)) &&
2669 obj_request_exists_test(obj_request))) {
2671 struct rbd_device *rbd_dev;
2672 struct ceph_osd_client *osdc;
2674 rbd_dev = obj_request->img_request->rbd_dev;
2675 osdc = &rbd_dev->rbd_client->client->osdc;
2677 return rbd_obj_request_submit(osdc, obj_request);
2681 * It's a layered write. The target object might exist but
2682 * we may not know that yet. If we know it doesn't exist,
2683 * start by reading the data for the full target object from
2684 * the parent so we can use it for a copyup to the target.
2687 return rbd_img_obj_parent_read_full(obj_request);
2689 /* We don't know whether the target exists. Go find out. */
2691 return rbd_img_obj_exists_submit(obj_request);
2694 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2696 struct rbd_obj_request *obj_request;
2697 struct rbd_obj_request *next_obj_request;
2699 dout("%s: img %p\n", __func__, img_request);
2700 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2703 ret = rbd_img_obj_request_submit(obj_request);
2711 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2713 struct rbd_obj_request *obj_request;
2714 struct rbd_device *rbd_dev;
2719 rbd_assert(img_request_child_test(img_request));
2721 /* First get what we need from the image request and release it */
2723 obj_request = img_request->obj_request;
2724 img_xferred = img_request->xferred;
2725 img_result = img_request->result;
2726 rbd_img_request_put(img_request);
2729 * If the overlap has become 0 (most likely because the
2730 * image has been flattened) we need to re-submit the
2733 rbd_assert(obj_request);
2734 rbd_assert(obj_request->img_request);
2735 rbd_dev = obj_request->img_request->rbd_dev;
2736 if (!rbd_dev->parent_overlap) {
2737 struct ceph_osd_client *osdc;
2739 osdc = &rbd_dev->rbd_client->client->osdc;
2740 img_result = rbd_obj_request_submit(osdc, obj_request);
2745 obj_request->result = img_result;
2746 if (obj_request->result)
2750 * We need to zero anything beyond the parent overlap
2751 * boundary. Since rbd_img_obj_request_read_callback()
2752 * will zero anything beyond the end of a short read, an
2753 * easy way to do this is to pretend the data from the
2754 * parent came up short--ending at the overlap boundary.
2756 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2757 obj_end = obj_request->img_offset + obj_request->length;
2758 if (obj_end > rbd_dev->parent_overlap) {
2761 if (obj_request->img_offset < rbd_dev->parent_overlap)
2762 xferred = rbd_dev->parent_overlap -
2763 obj_request->img_offset;
2765 obj_request->xferred = min(img_xferred, xferred);
2767 obj_request->xferred = img_xferred;
2770 rbd_img_obj_request_read_callback(obj_request);
2771 rbd_obj_request_complete(obj_request);
2774 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2776 struct rbd_img_request *img_request;
2779 rbd_assert(obj_request_img_data_test(obj_request));
2780 rbd_assert(obj_request->img_request != NULL);
2781 rbd_assert(obj_request->result == (s32) -ENOENT);
2782 rbd_assert(obj_request_type_valid(obj_request->type));
2784 /* rbd_read_finish(obj_request, obj_request->length); */
2785 img_request = rbd_parent_request_create(obj_request,
2786 obj_request->img_offset,
2787 obj_request->length);
2792 if (obj_request->type == OBJ_REQUEST_BIO)
2793 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2794 obj_request->bio_list);
2796 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2797 obj_request->pages);
2801 img_request->callback = rbd_img_parent_read_callback;
2802 result = rbd_img_request_submit(img_request);
2809 rbd_img_request_put(img_request);
2810 obj_request->result = result;
2811 obj_request->xferred = 0;
2812 obj_request_done_set(obj_request);
2815 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2817 struct rbd_obj_request *obj_request;
2818 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2821 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2822 OBJ_REQUEST_NODATA);
2827 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2828 if (!obj_request->osd_req)
2830 obj_request->callback = rbd_obj_request_put;
2832 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2834 rbd_osd_req_format_read(obj_request);
2836 ret = rbd_obj_request_submit(osdc, obj_request);
2839 rbd_obj_request_put(obj_request);
2844 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2846 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2852 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2853 rbd_dev->header_name, (unsigned long long)notify_id,
2854 (unsigned int)opcode);
2855 ret = rbd_dev_refresh(rbd_dev);
2857 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2859 rbd_obj_notify_ack(rbd_dev, notify_id);
2863 * Request sync osd watch/unwatch. The value of "start" determines
2864 * whether a watch request is being initiated or torn down.
2866 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2868 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2869 struct rbd_obj_request *obj_request;
2872 rbd_assert(start ^ !!rbd_dev->watch_event);
2873 rbd_assert(start ^ !!rbd_dev->watch_request);
2876 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2877 &rbd_dev->watch_event);
2880 rbd_assert(rbd_dev->watch_event != NULL);
2884 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2885 OBJ_REQUEST_NODATA);
2889 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2890 if (!obj_request->osd_req)
2894 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2896 ceph_osdc_unregister_linger_request(osdc,
2897 rbd_dev->watch_request->osd_req);
2899 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2900 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2901 rbd_osd_req_format_write(obj_request);
2903 ret = rbd_obj_request_submit(osdc, obj_request);
2906 ret = rbd_obj_request_wait(obj_request);
2909 ret = obj_request->result;
2914 * A watch request is set to linger, so the underlying osd
2915 * request won't go away until we unregister it. We retain
2916 * a pointer to the object request during that time (in
2917 * rbd_dev->watch_request), so we'll keep a reference to
2918 * it. We'll drop that reference (below) after we've
2922 rbd_dev->watch_request = obj_request;
2927 /* We have successfully torn down the watch request */
2929 rbd_obj_request_put(rbd_dev->watch_request);
2930 rbd_dev->watch_request = NULL;
2932 /* Cancel the event if we're tearing down, or on error */
2933 ceph_osdc_cancel_event(rbd_dev->watch_event);
2934 rbd_dev->watch_event = NULL;
2936 rbd_obj_request_put(obj_request);
2942 * Synchronous osd object method call. Returns the number of bytes
2943 * returned in the outbound buffer, or a negative error code.
2945 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2946 const char *object_name,
2947 const char *class_name,
2948 const char *method_name,
2949 const void *outbound,
2950 size_t outbound_size,
2952 size_t inbound_size)
2954 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2955 struct rbd_obj_request *obj_request;
2956 struct page **pages;
2961 * Method calls are ultimately read operations. The result
2962 * should placed into the inbound buffer provided. They
2963 * also supply outbound data--parameters for the object
2964 * method. Currently if this is present it will be a
2967 page_count = (u32)calc_pages_for(0, inbound_size);
2968 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2970 return PTR_ERR(pages);
2973 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2978 obj_request->pages = pages;
2979 obj_request->page_count = page_count;
2981 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2982 if (!obj_request->osd_req)
2985 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2986 class_name, method_name);
2987 if (outbound_size) {
2988 struct ceph_pagelist *pagelist;
2990 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2994 ceph_pagelist_init(pagelist);
2995 ceph_pagelist_append(pagelist, outbound, outbound_size);
2996 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2999 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3000 obj_request->pages, inbound_size,
3002 rbd_osd_req_format_read(obj_request);
3004 ret = rbd_obj_request_submit(osdc, obj_request);
3007 ret = rbd_obj_request_wait(obj_request);
3011 ret = obj_request->result;
3015 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3016 ret = (int)obj_request->xferred;
3017 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3020 rbd_obj_request_put(obj_request);
3022 ceph_release_page_vector(pages, page_count);
3027 static void rbd_request_fn(struct request_queue *q)
3028 __releases(q->queue_lock) __acquires(q->queue_lock)
3030 struct rbd_device *rbd_dev = q->queuedata;
3031 bool read_only = rbd_dev->mapping.read_only;
3035 while ((rq = blk_fetch_request(q))) {
3036 bool write_request = rq_data_dir(rq) == WRITE;
3037 struct rbd_img_request *img_request;
3041 /* Ignore any non-FS requests that filter through. */
3043 if (rq->cmd_type != REQ_TYPE_FS) {
3044 dout("%s: non-fs request type %d\n", __func__,
3045 (int) rq->cmd_type);
3046 __blk_end_request_all(rq, 0);
3050 /* Ignore/skip any zero-length requests */
3052 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3053 length = (u64) blk_rq_bytes(rq);
3056 dout("%s: zero-length request\n", __func__);
3057 __blk_end_request_all(rq, 0);
3061 spin_unlock_irq(q->queue_lock);
3063 /* Disallow writes to a read-only device */
3065 if (write_request) {
3069 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3073 * Quit early if the mapped snapshot no longer
3074 * exists. It's still possible the snapshot will
3075 * have disappeared by the time our request arrives
3076 * at the osd, but there's no sense in sending it if
3079 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3080 dout("request for non-existent snapshot");
3081 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3087 if (offset && length > U64_MAX - offset + 1) {
3088 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3090 goto end_request; /* Shouldn't happen */
3094 if (offset + length > rbd_dev->mapping.size) {
3095 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3096 offset, length, rbd_dev->mapping.size);
3101 img_request = rbd_img_request_create(rbd_dev, offset, length,
3106 img_request->rq = rq;
3108 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3111 result = rbd_img_request_submit(img_request);
3113 rbd_img_request_put(img_request);
3115 spin_lock_irq(q->queue_lock);
3117 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3118 write_request ? "write" : "read",
3119 length, offset, result);
3121 __blk_end_request_all(rq, result);
3127 * a queue callback. Makes sure that we don't create a bio that spans across
3128 * multiple osd objects. One exception would be with a single page bios,
3129 * which we handle later at bio_chain_clone_range()
3131 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3132 struct bio_vec *bvec)
3134 struct rbd_device *rbd_dev = q->queuedata;
3135 sector_t sector_offset;
3136 sector_t sectors_per_obj;
3137 sector_t obj_sector_offset;
3141 * Find how far into its rbd object the partition-relative
3142 * bio start sector is to offset relative to the enclosing
3145 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3146 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3147 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3150 * Compute the number of bytes from that offset to the end
3151 * of the object. Account for what's already used by the bio.
3153 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3154 if (ret > bmd->bi_size)
3155 ret -= bmd->bi_size;
3160 * Don't send back more than was asked for. And if the bio
3161 * was empty, let the whole thing through because: "Note
3162 * that a block device *must* allow a single page to be
3163 * added to an empty bio."
3165 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3166 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3167 ret = (int) bvec->bv_len;
3172 static void rbd_free_disk(struct rbd_device *rbd_dev)
3174 struct gendisk *disk = rbd_dev->disk;
3179 rbd_dev->disk = NULL;
3180 if (disk->flags & GENHD_FL_UP) {
3183 blk_cleanup_queue(disk->queue);
3188 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3189 const char *object_name,
3190 u64 offset, u64 length, void *buf)
3193 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3194 struct rbd_obj_request *obj_request;
3195 struct page **pages = NULL;
3200 page_count = (u32) calc_pages_for(offset, length);
3201 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3203 ret = PTR_ERR(pages);
3206 obj_request = rbd_obj_request_create(object_name, offset, length,
3211 obj_request->pages = pages;
3212 obj_request->page_count = page_count;
3214 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3215 if (!obj_request->osd_req)
3218 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3219 offset, length, 0, 0);
3220 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3222 obj_request->length,
3223 obj_request->offset & ~PAGE_MASK,
3225 rbd_osd_req_format_read(obj_request);
3227 ret = rbd_obj_request_submit(osdc, obj_request);
3230 ret = rbd_obj_request_wait(obj_request);
3234 ret = obj_request->result;
3238 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3239 size = (size_t) obj_request->xferred;
3240 ceph_copy_from_page_vector(pages, buf, 0, size);
3241 rbd_assert(size <= (size_t)INT_MAX);
3245 rbd_obj_request_put(obj_request);
3247 ceph_release_page_vector(pages, page_count);
3253 * Read the complete header for the given rbd device. On successful
3254 * return, the rbd_dev->header field will contain up-to-date
3255 * information about the image.
3257 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3259 struct rbd_image_header_ondisk *ondisk = NULL;
3266 * The complete header will include an array of its 64-bit
3267 * snapshot ids, followed by the names of those snapshots as
3268 * a contiguous block of NUL-terminated strings. Note that
3269 * the number of snapshots could change by the time we read
3270 * it in, in which case we re-read it.
3277 size = sizeof (*ondisk);
3278 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3280 ondisk = kmalloc(size, GFP_KERNEL);
3284 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3288 if ((size_t)ret < size) {
3290 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3294 if (!rbd_dev_ondisk_valid(ondisk)) {
3296 rbd_warn(rbd_dev, "invalid header");
3300 names_size = le64_to_cpu(ondisk->snap_names_len);
3301 want_count = snap_count;
3302 snap_count = le32_to_cpu(ondisk->snap_count);
3303 } while (snap_count != want_count);
3305 ret = rbd_header_from_disk(rbd_dev, ondisk);
3313 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3314 * has disappeared from the (just updated) snapshot context.
3316 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3320 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3323 snap_id = rbd_dev->spec->snap_id;
3324 if (snap_id == CEPH_NOSNAP)
3327 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3328 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3331 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3336 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3337 down_write(&rbd_dev->header_rwsem);
3338 mapping_size = rbd_dev->mapping.size;
3339 if (rbd_dev->image_format == 1)
3340 ret = rbd_dev_v1_header_info(rbd_dev);
3342 ret = rbd_dev_v2_header_info(rbd_dev);
3344 /* If it's a mapped snapshot, validate its EXISTS flag */
3346 rbd_exists_validate(rbd_dev);
3347 up_write(&rbd_dev->header_rwsem);
3349 if (mapping_size != rbd_dev->mapping.size) {
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3361 static int rbd_init_disk(struct rbd_device *rbd_dev)
3363 struct gendisk *disk;
3364 struct request_queue *q;
3367 /* create gendisk info */
3368 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3372 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3374 disk->major = rbd_dev->major;
3375 disk->first_minor = 0;
3376 disk->fops = &rbd_bd_ops;
3377 disk->private_data = rbd_dev;
3379 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3383 /* We use the default size, but let's be explicit about it. */
3384 blk_queue_physical_block_size(q, SECTOR_SIZE);
3386 /* set io sizes to object size */
3387 segment_size = rbd_obj_bytes(&rbd_dev->header);
3388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3389 blk_queue_max_segment_size(q, segment_size);
3390 blk_queue_io_min(q, segment_size);
3391 blk_queue_io_opt(q, segment_size);
3393 blk_queue_merge_bvec(q, rbd_merge_bvec);
3396 q->queuedata = rbd_dev;
3398 rbd_dev->disk = disk;
3411 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3413 return container_of(dev, struct rbd_device, dev);
3416 static ssize_t rbd_size_show(struct device *dev,
3417 struct device_attribute *attr, char *buf)
3419 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3421 return sprintf(buf, "%llu\n",
3422 (unsigned long long)rbd_dev->mapping.size);
3426 * Note this shows the features for whatever's mapped, which is not
3427 * necessarily the base image.
3429 static ssize_t rbd_features_show(struct device *dev,
3430 struct device_attribute *attr, char *buf)
3432 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3434 return sprintf(buf, "0x%016llx\n",
3435 (unsigned long long)rbd_dev->mapping.features);
3438 static ssize_t rbd_major_show(struct device *dev,
3439 struct device_attribute *attr, char *buf)
3441 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3444 return sprintf(buf, "%d\n", rbd_dev->major);
3446 return sprintf(buf, "(none)\n");
3450 static ssize_t rbd_client_id_show(struct device *dev,
3451 struct device_attribute *attr, char *buf)
3453 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3455 return sprintf(buf, "client%lld\n",
3456 ceph_client_id(rbd_dev->rbd_client->client));
3459 static ssize_t rbd_pool_show(struct device *dev,
3460 struct device_attribute *attr, char *buf)
3462 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3464 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3467 static ssize_t rbd_pool_id_show(struct device *dev,
3468 struct device_attribute *attr, char *buf)
3470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3472 return sprintf(buf, "%llu\n",
3473 (unsigned long long) rbd_dev->spec->pool_id);
3476 static ssize_t rbd_name_show(struct device *dev,
3477 struct device_attribute *attr, char *buf)
3479 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3481 if (rbd_dev->spec->image_name)
3482 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3484 return sprintf(buf, "(unknown)\n");
3487 static ssize_t rbd_image_id_show(struct device *dev,
3488 struct device_attribute *attr, char *buf)
3490 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3492 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3496 * Shows the name of the currently-mapped snapshot (or
3497 * RBD_SNAP_HEAD_NAME for the base image).
3499 static ssize_t rbd_snap_show(struct device *dev,
3500 struct device_attribute *attr,
3503 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3505 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3509 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3510 * for the parent image. If there is no parent, simply shows
3511 * "(no parent image)".
3513 static ssize_t rbd_parent_show(struct device *dev,
3514 struct device_attribute *attr,
3517 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3518 struct rbd_spec *spec = rbd_dev->parent_spec;
3523 return sprintf(buf, "(no parent image)\n");
3525 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3526 (unsigned long long) spec->pool_id, spec->pool_name);
3531 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3532 spec->image_name ? spec->image_name : "(unknown)");
3537 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3538 (unsigned long long) spec->snap_id, spec->snap_name);
3543 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3548 return (ssize_t) (bufp - buf);
3551 static ssize_t rbd_image_refresh(struct device *dev,
3552 struct device_attribute *attr,
3556 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3559 ret = rbd_dev_refresh(rbd_dev);
3561 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3563 return ret < 0 ? ret : size;
3566 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3567 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3568 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3569 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3570 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3571 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3572 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3573 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3574 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3575 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3576 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3578 static struct attribute *rbd_attrs[] = {
3579 &dev_attr_size.attr,
3580 &dev_attr_features.attr,
3581 &dev_attr_major.attr,
3582 &dev_attr_client_id.attr,
3583 &dev_attr_pool.attr,
3584 &dev_attr_pool_id.attr,
3585 &dev_attr_name.attr,
3586 &dev_attr_image_id.attr,
3587 &dev_attr_current_snap.attr,
3588 &dev_attr_parent.attr,
3589 &dev_attr_refresh.attr,
3593 static struct attribute_group rbd_attr_group = {
3597 static const struct attribute_group *rbd_attr_groups[] = {
3602 static void rbd_sysfs_dev_release(struct device *dev)
3606 static struct device_type rbd_device_type = {
3608 .groups = rbd_attr_groups,
3609 .release = rbd_sysfs_dev_release,
3612 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3614 kref_get(&spec->kref);
3619 static void rbd_spec_free(struct kref *kref);
3620 static void rbd_spec_put(struct rbd_spec *spec)
3623 kref_put(&spec->kref, rbd_spec_free);
3626 static struct rbd_spec *rbd_spec_alloc(void)
3628 struct rbd_spec *spec;
3630 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3633 kref_init(&spec->kref);
3638 static void rbd_spec_free(struct kref *kref)
3640 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3642 kfree(spec->pool_name);
3643 kfree(spec->image_id);
3644 kfree(spec->image_name);
3645 kfree(spec->snap_name);
3649 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3650 struct rbd_spec *spec)
3652 struct rbd_device *rbd_dev;
3654 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3658 spin_lock_init(&rbd_dev->lock);
3660 atomic_set(&rbd_dev->parent_ref, 0);
3661 INIT_LIST_HEAD(&rbd_dev->node);
3662 init_rwsem(&rbd_dev->header_rwsem);
3664 rbd_dev->spec = spec;
3665 rbd_dev->rbd_client = rbdc;
3667 /* Initialize the layout used for all rbd requests */
3669 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3670 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3671 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3672 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3677 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3679 rbd_put_client(rbd_dev->rbd_client);
3680 rbd_spec_put(rbd_dev->spec);
3685 * Get the size and object order for an image snapshot, or if
3686 * snap_id is CEPH_NOSNAP, gets this information for the base
3689 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3690 u8 *order, u64 *snap_size)
3692 __le64 snapid = cpu_to_le64(snap_id);
3697 } __attribute__ ((packed)) size_buf = { 0 };
3699 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3701 &snapid, sizeof (snapid),
3702 &size_buf, sizeof (size_buf));
3703 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3706 if (ret < sizeof (size_buf))
3710 *order = size_buf.order;
3711 *snap_size = le64_to_cpu(size_buf.size);
3713 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3714 (unsigned long long)snap_id, (unsigned int)*order,
3715 (unsigned long long)*snap_size);
3720 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3722 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3723 &rbd_dev->header.obj_order,
3724 &rbd_dev->header.image_size);
3727 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3733 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3737 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3738 "rbd", "get_object_prefix", NULL, 0,
3739 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3740 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3745 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3746 p + ret, NULL, GFP_NOIO);
3749 if (IS_ERR(rbd_dev->header.object_prefix)) {
3750 ret = PTR_ERR(rbd_dev->header.object_prefix);
3751 rbd_dev->header.object_prefix = NULL;
3753 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3761 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3764 __le64 snapid = cpu_to_le64(snap_id);
3768 } __attribute__ ((packed)) features_buf = { 0 };
3772 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3773 "rbd", "get_features",
3774 &snapid, sizeof (snapid),
3775 &features_buf, sizeof (features_buf));
3776 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3779 if (ret < sizeof (features_buf))
3782 incompat = le64_to_cpu(features_buf.incompat);
3783 if (incompat & ~RBD_FEATURES_SUPPORTED)
3786 *snap_features = le64_to_cpu(features_buf.features);
3788 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3789 (unsigned long long)snap_id,
3790 (unsigned long long)*snap_features,
3791 (unsigned long long)le64_to_cpu(features_buf.incompat));
3796 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3798 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3799 &rbd_dev->header.features);
3802 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3804 struct rbd_spec *parent_spec;
3806 void *reply_buf = NULL;
3816 parent_spec = rbd_spec_alloc();
3820 size = sizeof (__le64) + /* pool_id */
3821 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3822 sizeof (__le64) + /* snap_id */
3823 sizeof (__le64); /* overlap */
3824 reply_buf = kmalloc(size, GFP_KERNEL);
3830 snapid = cpu_to_le64(CEPH_NOSNAP);
3831 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3832 "rbd", "get_parent",
3833 &snapid, sizeof (snapid),
3835 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3840 end = reply_buf + ret;
3842 ceph_decode_64_safe(&p, end, pool_id, out_err);
3843 if (pool_id == CEPH_NOPOOL) {
3845 * Either the parent never existed, or we have
3846 * record of it but the image got flattened so it no
3847 * longer has a parent. When the parent of a
3848 * layered image disappears we immediately set the
3849 * overlap to 0. The effect of this is that all new
3850 * requests will be treated as if the image had no
3853 if (rbd_dev->parent_overlap) {
3854 rbd_dev->parent_overlap = 0;
3856 rbd_dev_parent_put(rbd_dev);
3857 pr_info("%s: clone image has been flattened\n",
3858 rbd_dev->disk->disk_name);
3861 goto out; /* No parent? No problem. */
3864 /* The ceph file layout needs to fit pool id in 32 bits */
3867 if (pool_id > (u64)U32_MAX) {
3868 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3869 (unsigned long long)pool_id, U32_MAX);
3873 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3874 if (IS_ERR(image_id)) {
3875 ret = PTR_ERR(image_id);
3878 ceph_decode_64_safe(&p, end, snap_id, out_err);
3879 ceph_decode_64_safe(&p, end, overlap, out_err);
3882 * The parent won't change (except when the clone is
3883 * flattened, already handled that). So we only need to
3884 * record the parent spec we have not already done so.
3886 if (!rbd_dev->parent_spec) {
3887 parent_spec->pool_id = pool_id;
3888 parent_spec->image_id = image_id;
3889 parent_spec->snap_id = snap_id;
3890 rbd_dev->parent_spec = parent_spec;
3891 parent_spec = NULL; /* rbd_dev now owns this */
3895 * We always update the parent overlap. If it's zero we
3896 * treat it specially.
3898 rbd_dev->parent_overlap = overlap;
3902 /* A null parent_spec indicates it's the initial probe */
3906 * The overlap has become zero, so the clone
3907 * must have been resized down to 0 at some
3908 * point. Treat this the same as a flatten.
3910 rbd_dev_parent_put(rbd_dev);
3911 pr_info("%s: clone image now standalone\n",
3912 rbd_dev->disk->disk_name);
3915 * For the initial probe, if we find the
3916 * overlap is zero we just pretend there was
3919 rbd_warn(rbd_dev, "ignoring parent of "
3920 "clone with overlap 0\n");
3927 rbd_spec_put(parent_spec);
3932 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3936 __le64 stripe_count;
3937 } __attribute__ ((packed)) striping_info_buf = { 0 };
3938 size_t size = sizeof (striping_info_buf);
3945 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3946 "rbd", "get_stripe_unit_count", NULL, 0,
3947 (char *)&striping_info_buf, size);
3948 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3955 * We don't actually support the "fancy striping" feature
3956 * (STRIPINGV2) yet, but if the striping sizes are the
3957 * defaults the behavior is the same as before. So find
3958 * out, and only fail if the image has non-default values.
3961 obj_size = (u64)1 << rbd_dev->header.obj_order;
3962 p = &striping_info_buf;
3963 stripe_unit = ceph_decode_64(&p);
3964 if (stripe_unit != obj_size) {
3965 rbd_warn(rbd_dev, "unsupported stripe unit "
3966 "(got %llu want %llu)",
3967 stripe_unit, obj_size);
3970 stripe_count = ceph_decode_64(&p);
3971 if (stripe_count != 1) {
3972 rbd_warn(rbd_dev, "unsupported stripe count "
3973 "(got %llu want 1)", stripe_count);
3976 rbd_dev->header.stripe_unit = stripe_unit;
3977 rbd_dev->header.stripe_count = stripe_count;
3982 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3984 size_t image_id_size;
3989 void *reply_buf = NULL;
3991 char *image_name = NULL;
3994 rbd_assert(!rbd_dev->spec->image_name);
3996 len = strlen(rbd_dev->spec->image_id);
3997 image_id_size = sizeof (__le32) + len;
3998 image_id = kmalloc(image_id_size, GFP_KERNEL);
4003 end = image_id + image_id_size;
4004 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4006 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4007 reply_buf = kmalloc(size, GFP_KERNEL);
4011 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4012 "rbd", "dir_get_name",
4013 image_id, image_id_size,
4018 end = reply_buf + ret;
4020 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4021 if (IS_ERR(image_name))
4024 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4032 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4034 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4035 const char *snap_name;
4038 /* Skip over names until we find the one we are looking for */
4040 snap_name = rbd_dev->header.snap_names;
4041 while (which < snapc->num_snaps) {
4042 if (!strcmp(name, snap_name))
4043 return snapc->snaps[which];
4044 snap_name += strlen(snap_name) + 1;
4050 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4052 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4057 for (which = 0; !found && which < snapc->num_snaps; which++) {
4058 const char *snap_name;
4060 snap_id = snapc->snaps[which];
4061 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4062 if (IS_ERR(snap_name))
4064 found = !strcmp(name, snap_name);
4067 return found ? snap_id : CEPH_NOSNAP;
4071 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4072 * no snapshot by that name is found, or if an error occurs.
4074 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4076 if (rbd_dev->image_format == 1)
4077 return rbd_v1_snap_id_by_name(rbd_dev, name);
4079 return rbd_v2_snap_id_by_name(rbd_dev, name);
4083 * When an rbd image has a parent image, it is identified by the
4084 * pool, image, and snapshot ids (not names). This function fills
4085 * in the names for those ids. (It's OK if we can't figure out the
4086 * name for an image id, but the pool and snapshot ids should always
4087 * exist and have names.) All names in an rbd spec are dynamically
4090 * When an image being mapped (not a parent) is probed, we have the
4091 * pool name and pool id, image name and image id, and the snapshot
4092 * name. The only thing we're missing is the snapshot id.
4094 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4096 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4097 struct rbd_spec *spec = rbd_dev->spec;
4098 const char *pool_name;
4099 const char *image_name;
4100 const char *snap_name;
4104 * An image being mapped will have the pool name (etc.), but
4105 * we need to look up the snapshot id.
4107 if (spec->pool_name) {
4108 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4111 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4112 if (snap_id == CEPH_NOSNAP)
4114 spec->snap_id = snap_id;
4116 spec->snap_id = CEPH_NOSNAP;
4122 /* Get the pool name; we have to make our own copy of this */
4124 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4126 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4129 pool_name = kstrdup(pool_name, GFP_KERNEL);
4133 /* Fetch the image name; tolerate failure here */
4135 image_name = rbd_dev_image_name(rbd_dev);
4137 rbd_warn(rbd_dev, "unable to get image name");
4139 /* Look up the snapshot name, and make a copy */
4141 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4147 spec->pool_name = pool_name;
4148 spec->image_name = image_name;
4149 spec->snap_name = snap_name;
4159 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4168 struct ceph_snap_context *snapc;
4172 * We'll need room for the seq value (maximum snapshot id),
4173 * snapshot count, and array of that many snapshot ids.
4174 * For now we have a fixed upper limit on the number we're
4175 * prepared to receive.
4177 size = sizeof (__le64) + sizeof (__le32) +
4178 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4179 reply_buf = kzalloc(size, GFP_KERNEL);
4183 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4184 "rbd", "get_snapcontext", NULL, 0,
4186 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4191 end = reply_buf + ret;
4193 ceph_decode_64_safe(&p, end, seq, out);
4194 ceph_decode_32_safe(&p, end, snap_count, out);
4197 * Make sure the reported number of snapshot ids wouldn't go
4198 * beyond the end of our buffer. But before checking that,
4199 * make sure the computed size of the snapshot context we
4200 * allocate is representable in a size_t.
4202 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4207 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4211 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4217 for (i = 0; i < snap_count; i++)
4218 snapc->snaps[i] = ceph_decode_64(&p);
4220 ceph_put_snap_context(rbd_dev->header.snapc);
4221 rbd_dev->header.snapc = snapc;
4223 dout(" snap context seq = %llu, snap_count = %u\n",
4224 (unsigned long long)seq, (unsigned int)snap_count);
4231 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4242 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4243 reply_buf = kmalloc(size, GFP_KERNEL);
4245 return ERR_PTR(-ENOMEM);
4247 snapid = cpu_to_le64(snap_id);
4248 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4249 "rbd", "get_snapshot_name",
4250 &snapid, sizeof (snapid),
4252 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4254 snap_name = ERR_PTR(ret);
4259 end = reply_buf + ret;
4260 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4261 if (IS_ERR(snap_name))
4264 dout(" snap_id 0x%016llx snap_name = %s\n",
4265 (unsigned long long)snap_id, snap_name);
4272 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4274 bool first_time = rbd_dev->header.object_prefix == NULL;
4277 ret = rbd_dev_v2_image_size(rbd_dev);
4282 ret = rbd_dev_v2_header_onetime(rbd_dev);
4288 * If the image supports layering, get the parent info. We
4289 * need to probe the first time regardless. Thereafter we
4290 * only need to if there's a parent, to see if it has
4291 * disappeared due to the mapped image getting flattened.
4293 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4294 (first_time || rbd_dev->parent_spec)) {
4297 ret = rbd_dev_v2_parent_info(rbd_dev);
4302 * Print a warning if this is the initial probe and
4303 * the image has a parent. Don't print it if the
4304 * image now being probed is itself a parent. We
4305 * can tell at this point because we won't know its
4306 * pool name yet (just its pool id).
4308 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4309 if (first_time && warn)
4310 rbd_warn(rbd_dev, "WARNING: kernel layering "
4311 "is EXPERIMENTAL!");
4314 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4315 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4316 rbd_dev->mapping.size = rbd_dev->header.image_size;
4318 ret = rbd_dev_v2_snap_context(rbd_dev);
4319 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4324 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4329 dev = &rbd_dev->dev;
4330 dev->bus = &rbd_bus_type;
4331 dev->type = &rbd_device_type;
4332 dev->parent = &rbd_root_dev;
4333 dev->release = rbd_dev_device_release;
4334 dev_set_name(dev, "%d", rbd_dev->dev_id);
4335 ret = device_register(dev);
4340 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4342 device_unregister(&rbd_dev->dev);
4345 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4348 * Get a unique rbd identifier for the given new rbd_dev, and add
4349 * the rbd_dev to the global list. The minimum rbd id is 1.
4351 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4353 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4355 spin_lock(&rbd_dev_list_lock);
4356 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4357 spin_unlock(&rbd_dev_list_lock);
4358 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4359 (unsigned long long) rbd_dev->dev_id);
4363 * Remove an rbd_dev from the global list, and record that its
4364 * identifier is no longer in use.
4366 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4368 struct list_head *tmp;
4369 int rbd_id = rbd_dev->dev_id;
4372 rbd_assert(rbd_id > 0);
4374 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4375 (unsigned long long) rbd_dev->dev_id);
4376 spin_lock(&rbd_dev_list_lock);
4377 list_del_init(&rbd_dev->node);
4380 * If the id being "put" is not the current maximum, there
4381 * is nothing special we need to do.
4383 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4384 spin_unlock(&rbd_dev_list_lock);
4389 * We need to update the current maximum id. Search the
4390 * list to find out what it is. We're more likely to find
4391 * the maximum at the end, so search the list backward.
4394 list_for_each_prev(tmp, &rbd_dev_list) {
4395 struct rbd_device *rbd_dev;
4397 rbd_dev = list_entry(tmp, struct rbd_device, node);
4398 if (rbd_dev->dev_id > max_id)
4399 max_id = rbd_dev->dev_id;
4401 spin_unlock(&rbd_dev_list_lock);
4404 * The max id could have been updated by rbd_dev_id_get(), in
4405 * which case it now accurately reflects the new maximum.
4406 * Be careful not to overwrite the maximum value in that
4409 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4410 dout(" max dev id has been reset\n");
4414 * Skips over white space at *buf, and updates *buf to point to the
4415 * first found non-space character (if any). Returns the length of
4416 * the token (string of non-white space characters) found. Note
4417 * that *buf must be terminated with '\0'.
4419 static inline size_t next_token(const char **buf)
4422 * These are the characters that produce nonzero for
4423 * isspace() in the "C" and "POSIX" locales.
4425 const char *spaces = " \f\n\r\t\v";
4427 *buf += strspn(*buf, spaces); /* Find start of token */
4429 return strcspn(*buf, spaces); /* Return token length */
4433 * Finds the next token in *buf, and if the provided token buffer is
4434 * big enough, copies the found token into it. The result, if
4435 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4436 * must be terminated with '\0' on entry.
4438 * Returns the length of the token found (not including the '\0').
4439 * Return value will be 0 if no token is found, and it will be >=
4440 * token_size if the token would not fit.
4442 * The *buf pointer will be updated to point beyond the end of the
4443 * found token. Note that this occurs even if the token buffer is
4444 * too small to hold it.
4446 static inline size_t copy_token(const char **buf,
4452 len = next_token(buf);
4453 if (len < token_size) {
4454 memcpy(token, *buf, len);
4455 *(token + len) = '\0';
4463 * Finds the next token in *buf, dynamically allocates a buffer big
4464 * enough to hold a copy of it, and copies the token into the new
4465 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4466 * that a duplicate buffer is created even for a zero-length token.
4468 * Returns a pointer to the newly-allocated duplicate, or a null
4469 * pointer if memory for the duplicate was not available. If
4470 * the lenp argument is a non-null pointer, the length of the token
4471 * (not including the '\0') is returned in *lenp.
4473 * If successful, the *buf pointer will be updated to point beyond
4474 * the end of the found token.
4476 * Note: uses GFP_KERNEL for allocation.
4478 static inline char *dup_token(const char **buf, size_t *lenp)
4483 len = next_token(buf);
4484 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4487 *(dup + len) = '\0';
4497 * Parse the options provided for an "rbd add" (i.e., rbd image
4498 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4499 * and the data written is passed here via a NUL-terminated buffer.
4500 * Returns 0 if successful or an error code otherwise.
4502 * The information extracted from these options is recorded in
4503 * the other parameters which return dynamically-allocated
4506 * The address of a pointer that will refer to a ceph options
4507 * structure. Caller must release the returned pointer using
4508 * ceph_destroy_options() when it is no longer needed.
4510 * Address of an rbd options pointer. Fully initialized by
4511 * this function; caller must release with kfree().
4513 * Address of an rbd image specification pointer. Fully
4514 * initialized by this function based on parsed options.
4515 * Caller must release with rbd_spec_put().
4517 * The options passed take this form:
4518 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4521 * A comma-separated list of one or more monitor addresses.
4522 * A monitor address is an ip address, optionally followed
4523 * by a port number (separated by a colon).
4524 * I.e.: ip1[:port1][,ip2[:port2]...]
4526 * A comma-separated list of ceph and/or rbd options.
4528 * The name of the rados pool containing the rbd image.
4530 * The name of the image in that pool to map.
4532 * An optional snapshot id. If provided, the mapping will
4533 * present data from the image at the time that snapshot was
4534 * created. The image head is used if no snapshot id is
4535 * provided. Snapshot mappings are always read-only.
4537 static int rbd_add_parse_args(const char *buf,
4538 struct ceph_options **ceph_opts,
4539 struct rbd_options **opts,
4540 struct rbd_spec **rbd_spec)
4544 const char *mon_addrs;
4546 size_t mon_addrs_size;
4547 struct rbd_spec *spec = NULL;
4548 struct rbd_options *rbd_opts = NULL;
4549 struct ceph_options *copts;
4552 /* The first four tokens are required */
4554 len = next_token(&buf);
4556 rbd_warn(NULL, "no monitor address(es) provided");
4560 mon_addrs_size = len + 1;
4564 options = dup_token(&buf, NULL);
4568 rbd_warn(NULL, "no options provided");
4572 spec = rbd_spec_alloc();
4576 spec->pool_name = dup_token(&buf, NULL);
4577 if (!spec->pool_name)
4579 if (!*spec->pool_name) {
4580 rbd_warn(NULL, "no pool name provided");
4584 spec->image_name = dup_token(&buf, NULL);
4585 if (!spec->image_name)
4587 if (!*spec->image_name) {
4588 rbd_warn(NULL, "no image name provided");
4593 * Snapshot name is optional; default is to use "-"
4594 * (indicating the head/no snapshot).
4596 len = next_token(&buf);
4598 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4599 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4600 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4601 ret = -ENAMETOOLONG;
4604 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4607 *(snap_name + len) = '\0';
4608 spec->snap_name = snap_name;
4610 /* Initialize all rbd options to the defaults */
4612 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4616 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4618 copts = ceph_parse_options(options, mon_addrs,
4619 mon_addrs + mon_addrs_size - 1,
4620 parse_rbd_opts_token, rbd_opts);
4621 if (IS_ERR(copts)) {
4622 ret = PTR_ERR(copts);
4643 * An rbd format 2 image has a unique identifier, distinct from the
4644 * name given to it by the user. Internally, that identifier is
4645 * what's used to specify the names of objects related to the image.
4647 * A special "rbd id" object is used to map an rbd image name to its
4648 * id. If that object doesn't exist, then there is no v2 rbd image
4649 * with the supplied name.
4651 * This function will record the given rbd_dev's image_id field if
4652 * it can be determined, and in that case will return 0. If any
4653 * errors occur a negative errno will be returned and the rbd_dev's
4654 * image_id field will be unchanged (and should be NULL).
4656 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4665 * When probing a parent image, the image id is already
4666 * known (and the image name likely is not). There's no
4667 * need to fetch the image id again in this case. We
4668 * do still need to set the image format though.
4670 if (rbd_dev->spec->image_id) {
4671 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4677 * First, see if the format 2 image id file exists, and if
4678 * so, get the image's persistent id from it.
4680 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4681 object_name = kmalloc(size, GFP_NOIO);
4684 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4685 dout("rbd id object name is %s\n", object_name);
4687 /* Response will be an encoded string, which includes a length */
4689 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4690 response = kzalloc(size, GFP_NOIO);
4696 /* If it doesn't exist we'll assume it's a format 1 image */
4698 ret = rbd_obj_method_sync(rbd_dev, object_name,
4699 "rbd", "get_id", NULL, 0,
4700 response, RBD_IMAGE_ID_LEN_MAX);
4701 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4702 if (ret == -ENOENT) {
4703 image_id = kstrdup("", GFP_KERNEL);
4704 ret = image_id ? 0 : -ENOMEM;
4706 rbd_dev->image_format = 1;
4707 } else if (ret > sizeof (__le32)) {
4710 image_id = ceph_extract_encoded_string(&p, p + ret,
4712 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4714 rbd_dev->image_format = 2;
4720 rbd_dev->spec->image_id = image_id;
4721 dout("image_id is %s\n", image_id);
4731 * Undo whatever state changes are made by v1 or v2 header info
4734 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4736 struct rbd_image_header *header;
4738 /* Drop parent reference unless it's already been done (or none) */
4740 if (rbd_dev->parent_overlap)
4741 rbd_dev_parent_put(rbd_dev);
4743 /* Free dynamic fields from the header, then zero it out */
4745 header = &rbd_dev->header;
4746 ceph_put_snap_context(header->snapc);
4747 kfree(header->snap_sizes);
4748 kfree(header->snap_names);
4749 kfree(header->object_prefix);
4750 memset(header, 0, sizeof (*header));
4753 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4757 ret = rbd_dev_v2_object_prefix(rbd_dev);
4762 * Get the and check features for the image. Currently the
4763 * features are assumed to never change.
4765 ret = rbd_dev_v2_features(rbd_dev);
4769 /* If the image supports fancy striping, get its parameters */
4771 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4772 ret = rbd_dev_v2_striping_info(rbd_dev);
4776 /* No support for crypto and compression type format 2 images */
4780 rbd_dev->header.features = 0;
4781 kfree(rbd_dev->header.object_prefix);
4782 rbd_dev->header.object_prefix = NULL;
4787 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4789 struct rbd_device *parent = NULL;
4790 struct rbd_spec *parent_spec;
4791 struct rbd_client *rbdc;
4794 if (!rbd_dev->parent_spec)
4797 * We need to pass a reference to the client and the parent
4798 * spec when creating the parent rbd_dev. Images related by
4799 * parent/child relationships always share both.
4801 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4802 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4805 parent = rbd_dev_create(rbdc, parent_spec);
4809 ret = rbd_dev_image_probe(parent, false);
4812 rbd_dev->parent = parent;
4813 atomic_set(&rbd_dev->parent_ref, 1);
4818 rbd_dev_unparent(rbd_dev);
4819 kfree(rbd_dev->header_name);
4820 rbd_dev_destroy(parent);
4822 rbd_put_client(rbdc);
4823 rbd_spec_put(parent_spec);
4829 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4833 /* generate unique id: find highest unique id, add one */
4834 rbd_dev_id_get(rbd_dev);
4836 /* Fill in the device name, now that we have its id. */
4837 BUILD_BUG_ON(DEV_NAME_LEN
4838 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4839 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4841 /* Get our block major device number. */
4843 ret = register_blkdev(0, rbd_dev->name);
4846 rbd_dev->major = ret;
4848 /* Set up the blkdev mapping. */
4850 ret = rbd_init_disk(rbd_dev);
4852 goto err_out_blkdev;
4854 ret = rbd_dev_mapping_set(rbd_dev);
4857 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4859 ret = rbd_bus_add_dev(rbd_dev);
4861 goto err_out_mapping;
4863 /* Everything's ready. Announce the disk to the world. */
4865 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4866 add_disk(rbd_dev->disk);
4868 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4869 (unsigned long long) rbd_dev->mapping.size);
4874 rbd_dev_mapping_clear(rbd_dev);
4876 rbd_free_disk(rbd_dev);
4878 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4880 rbd_dev_id_put(rbd_dev);
4881 rbd_dev_mapping_clear(rbd_dev);
4886 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4888 struct rbd_spec *spec = rbd_dev->spec;
4891 /* Record the header object name for this rbd image. */
4893 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4895 if (rbd_dev->image_format == 1)
4896 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4898 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4900 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4901 if (!rbd_dev->header_name)
4904 if (rbd_dev->image_format == 1)
4905 sprintf(rbd_dev->header_name, "%s%s",
4906 spec->image_name, RBD_SUFFIX);
4908 sprintf(rbd_dev->header_name, "%s%s",
4909 RBD_HEADER_PREFIX, spec->image_id);
4913 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4915 rbd_dev_unprobe(rbd_dev);
4916 kfree(rbd_dev->header_name);
4917 rbd_dev->header_name = NULL;
4918 rbd_dev->image_format = 0;
4919 kfree(rbd_dev->spec->image_id);
4920 rbd_dev->spec->image_id = NULL;
4922 rbd_dev_destroy(rbd_dev);
4926 * Probe for the existence of the header object for the given rbd
4927 * device. If this image is the one being mapped (i.e., not a
4928 * parent), initiate a watch on its header object before using that
4929 * object to get detailed information about the rbd image.
4931 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4937 * Get the id from the image id object. Unless there's an
4938 * error, rbd_dev->spec->image_id will be filled in with
4939 * a dynamically-allocated string, and rbd_dev->image_format
4940 * will be set to either 1 or 2.
4942 ret = rbd_dev_image_id(rbd_dev);
4945 rbd_assert(rbd_dev->spec->image_id);
4946 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4948 ret = rbd_dev_header_name(rbd_dev);
4950 goto err_out_format;
4953 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4955 goto out_header_name;
4958 if (rbd_dev->image_format == 1)
4959 ret = rbd_dev_v1_header_info(rbd_dev);
4961 ret = rbd_dev_v2_header_info(rbd_dev);
4965 ret = rbd_dev_spec_update(rbd_dev);
4969 ret = rbd_dev_probe_parent(rbd_dev);
4973 dout("discovered format %u image, header name is %s\n",
4974 rbd_dev->image_format, rbd_dev->header_name);
4978 rbd_dev_unprobe(rbd_dev);
4981 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4983 rbd_warn(rbd_dev, "unable to tear down "
4984 "watch request (%d)\n", tmp);
4987 kfree(rbd_dev->header_name);
4988 rbd_dev->header_name = NULL;
4990 rbd_dev->image_format = 0;
4991 kfree(rbd_dev->spec->image_id);
4992 rbd_dev->spec->image_id = NULL;
4994 dout("probe failed, returning %d\n", ret);
4999 static ssize_t rbd_add(struct bus_type *bus,
5003 struct rbd_device *rbd_dev = NULL;
5004 struct ceph_options *ceph_opts = NULL;
5005 struct rbd_options *rbd_opts = NULL;
5006 struct rbd_spec *spec = NULL;
5007 struct rbd_client *rbdc;
5008 struct ceph_osd_client *osdc;
5012 if (!try_module_get(THIS_MODULE))
5015 /* parse add command */
5016 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5018 goto err_out_module;
5019 read_only = rbd_opts->read_only;
5021 rbd_opts = NULL; /* done with this */
5023 rbdc = rbd_get_client(ceph_opts);
5030 osdc = &rbdc->client->osdc;
5031 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5033 goto err_out_client;
5034 spec->pool_id = (u64)rc;
5036 /* The ceph file layout needs to fit pool id in 32 bits */
5038 if (spec->pool_id > (u64)U32_MAX) {
5039 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5040 (unsigned long long)spec->pool_id, U32_MAX);
5042 goto err_out_client;
5045 rbd_dev = rbd_dev_create(rbdc, spec);
5047 goto err_out_client;
5048 rbdc = NULL; /* rbd_dev now owns this */
5049 spec = NULL; /* rbd_dev now owns this */
5051 rc = rbd_dev_image_probe(rbd_dev, true);
5053 goto err_out_rbd_dev;
5055 /* If we are mapping a snapshot it must be marked read-only */
5057 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5059 rbd_dev->mapping.read_only = read_only;
5061 rc = rbd_dev_device_setup(rbd_dev);
5063 rbd_dev_image_release(rbd_dev);
5064 goto err_out_module;
5070 rbd_dev_destroy(rbd_dev);
5072 rbd_put_client(rbdc);
5076 module_put(THIS_MODULE);
5078 dout("Error adding device %s\n", buf);
5083 static void rbd_dev_device_release(struct device *dev)
5085 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5087 rbd_free_disk(rbd_dev);
5088 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5089 rbd_dev_mapping_clear(rbd_dev);
5090 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5092 rbd_dev_id_put(rbd_dev);
5093 rbd_dev_mapping_clear(rbd_dev);
5096 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5098 while (rbd_dev->parent) {
5099 struct rbd_device *first = rbd_dev;
5100 struct rbd_device *second = first->parent;
5101 struct rbd_device *third;
5104 * Follow to the parent with no grandparent and
5107 while (second && (third = second->parent)) {
5112 rbd_dev_image_release(second);
5113 first->parent = NULL;
5114 first->parent_overlap = 0;
5116 rbd_assert(first->parent_spec);
5117 rbd_spec_put(first->parent_spec);
5118 first->parent_spec = NULL;
5122 static ssize_t rbd_remove(struct bus_type *bus,
5126 struct rbd_device *rbd_dev = NULL;
5127 struct list_head *tmp;
5130 bool already = false;
5133 ret = strict_strtoul(buf, 10, &ul);
5137 /* convert to int; abort if we lost anything in the conversion */
5143 spin_lock(&rbd_dev_list_lock);
5144 list_for_each(tmp, &rbd_dev_list) {
5145 rbd_dev = list_entry(tmp, struct rbd_device, node);
5146 if (rbd_dev->dev_id == dev_id) {
5152 spin_lock_irq(&rbd_dev->lock);
5153 if (rbd_dev->open_count)
5156 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5158 spin_unlock_irq(&rbd_dev->lock);
5160 spin_unlock(&rbd_dev_list_lock);
5161 if (ret < 0 || already)
5164 rbd_bus_del_dev(rbd_dev);
5165 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5167 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5168 rbd_dev_image_release(rbd_dev);
5169 module_put(THIS_MODULE);
5175 * create control files in sysfs
5178 static int rbd_sysfs_init(void)
5182 ret = device_register(&rbd_root_dev);
5186 ret = bus_register(&rbd_bus_type);
5188 device_unregister(&rbd_root_dev);
5193 static void rbd_sysfs_cleanup(void)
5195 bus_unregister(&rbd_bus_type);
5196 device_unregister(&rbd_root_dev);
5199 static int rbd_slab_init(void)
5201 rbd_assert(!rbd_img_request_cache);
5202 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5203 sizeof (struct rbd_img_request),
5204 __alignof__(struct rbd_img_request),
5206 if (!rbd_img_request_cache)
5209 rbd_assert(!rbd_obj_request_cache);
5210 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5211 sizeof (struct rbd_obj_request),
5212 __alignof__(struct rbd_obj_request),
5214 if (!rbd_obj_request_cache)
5217 rbd_assert(!rbd_segment_name_cache);
5218 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5219 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5220 if (rbd_segment_name_cache)
5223 if (rbd_obj_request_cache) {
5224 kmem_cache_destroy(rbd_obj_request_cache);
5225 rbd_obj_request_cache = NULL;
5228 kmem_cache_destroy(rbd_img_request_cache);
5229 rbd_img_request_cache = NULL;
5234 static void rbd_slab_exit(void)
5236 rbd_assert(rbd_segment_name_cache);
5237 kmem_cache_destroy(rbd_segment_name_cache);
5238 rbd_segment_name_cache = NULL;
5240 rbd_assert(rbd_obj_request_cache);
5241 kmem_cache_destroy(rbd_obj_request_cache);
5242 rbd_obj_request_cache = NULL;
5244 rbd_assert(rbd_img_request_cache);
5245 kmem_cache_destroy(rbd_img_request_cache);
5246 rbd_img_request_cache = NULL;
5249 static int __init rbd_init(void)
5253 if (!libceph_compatible(NULL)) {
5254 rbd_warn(NULL, "libceph incompatibility (quitting)");
5258 rc = rbd_slab_init();
5261 rc = rbd_sysfs_init();
5265 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5270 static void __exit rbd_exit(void)
5272 rbd_sysfs_cleanup();
5276 module_init(rbd_init);
5277 module_exit(rbd_exit);
5279 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5280 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5281 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5282 MODULE_DESCRIPTION("rados block device");
5284 /* following authorship retained from original osdblk.c */
5285 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5287 MODULE_LICENSE("GPL");