3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
406 static struct bus_type rbd_bus_type = {
408 .bus_attrs = rbd_bus_attrs,
411 static void rbd_root_dev_release(struct device *dev)
415 static struct device rbd_root_dev = {
417 .release = rbd_root_dev_release,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
423 struct va_format vaf;
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
500 static void rbd_release(struct gendisk *disk, fmode_t mode)
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
540 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 if (IS_ERR(rbdc->client))
543 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
545 ret = ceph_open_session(rbdc->client);
549 spin_lock(&rbd_client_list_lock);
550 list_add_tail(&rbdc->node, &rbd_client_list);
551 spin_unlock(&rbd_client_list_lock);
553 mutex_unlock(&ctl_mutex);
554 dout("%s: rbdc %p\n", __func__, rbdc);
559 ceph_destroy_client(rbdc->client);
561 mutex_unlock(&ctl_mutex);
565 ceph_destroy_options(ceph_opts);
566 dout("%s: error %d\n", __func__, ret);
571 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
573 kref_get(&rbdc->kref);
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
582 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
584 struct rbd_client *client_node;
587 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
590 spin_lock(&rbd_client_list_lock);
591 list_for_each_entry(client_node, &rbd_client_list, node) {
592 if (!ceph_compare_options(ceph_opts, client_node->client)) {
593 __rbd_get_client(client_node);
599 spin_unlock(&rbd_client_list_lock);
601 return found ? client_node : NULL;
611 /* string args above */
614 /* Boolean args above */
618 static match_table_t rbd_opts_tokens = {
620 /* string args above */
621 {Opt_read_only, "read_only"},
622 {Opt_read_only, "ro"}, /* Alternate spelling */
623 {Opt_read_write, "read_write"},
624 {Opt_read_write, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
633 #define RBD_READ_ONLY_DEFAULT false
635 static int parse_rbd_opts_token(char *c, void *private)
637 struct rbd_options *rbd_opts = private;
638 substring_t argstr[MAX_OPT_ARGS];
639 int token, intval, ret;
641 token = match_token(c, rbd_opts_tokens, argstr);
645 if (token < Opt_last_int) {
646 ret = match_int(&argstr[0], &intval);
648 pr_err("bad mount option arg (not int) "
652 dout("got int token %d val %d\n", token, intval);
653 } else if (token > Opt_last_int && token < Opt_last_string) {
654 dout("got string token %d val %s\n", token,
656 } else if (token > Opt_last_string && token < Opt_last_bool) {
657 dout("got Boolean token %d\n", token);
659 dout("got token %d\n", token);
664 rbd_opts->read_only = true;
667 rbd_opts->read_only = false;
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. Either way, ceph_opts is consumed by this
681 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
683 struct rbd_client *rbdc;
685 rbdc = rbd_client_find(ceph_opts);
686 if (rbdc) /* using an existing client */
687 ceph_destroy_options(ceph_opts);
689 rbdc = rbd_client_create(ceph_opts);
695 * Destroy ceph client
697 * Caller must hold rbd_client_list_lock.
699 static void rbd_client_release(struct kref *kref)
701 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
703 dout("%s: rbdc %p\n", __func__, rbdc);
704 spin_lock(&rbd_client_list_lock);
705 list_del(&rbdc->node);
706 spin_unlock(&rbd_client_list_lock);
708 ceph_destroy_client(rbdc->client);
713 * Drop reference to ceph client node. If it's not referenced anymore, release
716 static void rbd_put_client(struct rbd_client *rbdc)
719 kref_put(&rbdc->kref, rbd_client_release);
722 static bool rbd_image_format_valid(u32 image_format)
724 return image_format == 1 || image_format == 2;
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
732 /* The header has to start with the magic rbd header text */
733 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
736 /* The bio layer requires at least sector-sized I/O */
738 if (ondisk->options.order < SECTOR_SHIFT)
741 /* If we use u64 in a few spots we may be able to loosen this */
743 if (ondisk->options.order > 8 * sizeof (int) - 1)
747 * The size of a snapshot header has to fit in a size_t, and
748 * that limits the number of snapshots.
750 snap_count = le32_to_cpu(ondisk->snap_count);
751 size = SIZE_MAX - sizeof (struct ceph_snap_context);
752 if (snap_count > size / sizeof (__le64))
756 * Not only that, but the size of the entire the snapshot
757 * header must also be representable in a size_t.
759 size -= snap_count * sizeof (__le64);
760 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
767 * Fill an rbd image header with information from the given format 1
770 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
771 struct rbd_image_header_ondisk *ondisk)
773 struct rbd_image_header *header = &rbd_dev->header;
774 bool first_time = header->object_prefix == NULL;
775 struct ceph_snap_context *snapc;
776 char *object_prefix = NULL;
777 char *snap_names = NULL;
778 u64 *snap_sizes = NULL;
784 /* Allocate this now to avoid having to handle failure below */
789 len = strnlen(ondisk->object_prefix,
790 sizeof (ondisk->object_prefix));
791 object_prefix = kmalloc(len + 1, GFP_KERNEL);
794 memcpy(object_prefix, ondisk->object_prefix, len);
795 object_prefix[len] = '\0';
798 /* Allocate the snapshot context and fill it in */
800 snap_count = le32_to_cpu(ondisk->snap_count);
801 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
804 snapc->seq = le64_to_cpu(ondisk->snap_seq);
806 struct rbd_image_snap_ondisk *snaps;
807 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
809 /* We'll keep a copy of the snapshot names... */
811 if (snap_names_len > (u64)SIZE_MAX)
813 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
817 /* ...as well as the array of their sizes. */
819 size = snap_count * sizeof (*header->snap_sizes);
820 snap_sizes = kmalloc(size, GFP_KERNEL);
825 * Copy the names, and fill in each snapshot's id
828 * Note that rbd_dev_v1_header_info() guarantees the
829 * ondisk buffer we're working with has
830 * snap_names_len bytes beyond the end of the
831 * snapshot id array, this memcpy() is safe.
833 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
834 snaps = ondisk->snaps;
835 for (i = 0; i < snap_count; i++) {
836 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
837 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
841 /* We won't fail any more, fill in the header */
843 down_write(&rbd_dev->header_rwsem);
845 header->object_prefix = object_prefix;
846 header->obj_order = ondisk->options.order;
847 header->crypt_type = ondisk->options.crypt_type;
848 header->comp_type = ondisk->options.comp_type;
849 /* The rest aren't used for format 1 images */
850 header->stripe_unit = 0;
851 header->stripe_count = 0;
852 header->features = 0;
854 ceph_put_snap_context(header->snapc);
855 kfree(header->snap_names);
856 kfree(header->snap_sizes);
859 /* The remaining fields always get updated (when we refresh) */
861 header->image_size = le64_to_cpu(ondisk->image_size);
862 header->snapc = snapc;
863 header->snap_names = snap_names;
864 header->snap_sizes = snap_sizes;
866 /* Make sure mapping size is consistent with header info */
868 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
869 if (rbd_dev->mapping.size != header->image_size)
870 rbd_dev->mapping.size = header->image_size;
872 up_write(&rbd_dev->header_rwsem);
880 ceph_put_snap_context(snapc);
881 kfree(object_prefix);
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
888 const char *snap_name;
890 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
892 /* Skip over names until we find the one we are looking for */
894 snap_name = rbd_dev->header.snap_names;
896 snap_name += strlen(snap_name) + 1;
898 return kstrdup(snap_name, GFP_KERNEL);
902 * Snapshot id comparison function for use with qsort()/bsearch().
903 * Note that result is for snapshots in *descending* order.
905 static int snapid_compare_reverse(const void *s1, const void *s2)
907 u64 snap_id1 = *(u64 *)s1;
908 u64 snap_id2 = *(u64 *)s2;
910 if (snap_id1 < snap_id2)
912 return snap_id1 == snap_id2 ? 0 : -1;
916 * Search a snapshot context to see if the given snapshot id is
919 * Returns the position of the snapshot id in the array if it's found,
920 * or BAD_SNAP_INDEX otherwise.
922 * Note: The snapshot array is in kept sorted (by the osd) in
923 * reverse order, highest snapshot id first.
925 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
927 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
930 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
931 sizeof (snap_id), snapid_compare_reverse);
933 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
936 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
941 which = rbd_dev_snap_index(rbd_dev, snap_id);
942 if (which == BAD_SNAP_INDEX)
945 return _rbd_dev_v1_snap_name(rbd_dev, which);
948 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
950 if (snap_id == CEPH_NOSNAP)
951 return RBD_SNAP_HEAD_NAME;
953 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
954 if (rbd_dev->image_format == 1)
955 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
957 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
960 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
963 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
964 if (snap_id == CEPH_NOSNAP) {
965 *snap_size = rbd_dev->header.image_size;
966 } else if (rbd_dev->image_format == 1) {
969 which = rbd_dev_snap_index(rbd_dev, snap_id);
970 if (which == BAD_SNAP_INDEX)
973 *snap_size = rbd_dev->header.snap_sizes[which];
978 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
987 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
990 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
991 if (snap_id == CEPH_NOSNAP) {
992 *snap_features = rbd_dev->header.features;
993 } else if (rbd_dev->image_format == 1) {
994 *snap_features = 0; /* No features for format 1 */
999 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1003 *snap_features = features;
1008 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1010 u64 snap_id = rbd_dev->spec->snap_id;
1015 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1018 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1022 rbd_dev->mapping.size = size;
1023 rbd_dev->mapping.features = features;
1028 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1030 rbd_dev->mapping.size = 0;
1031 rbd_dev->mapping.features = 0;
1034 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1041 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1044 segment = offset >> rbd_dev->header.obj_order;
1045 name_format = "%s.%012llx";
1046 if (rbd_dev->image_format == 2)
1047 name_format = "%s.%016llx";
1048 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1049 rbd_dev->header.object_prefix, segment);
1050 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1051 pr_err("error formatting segment name for #%llu (%d)\n",
1060 static void rbd_segment_name_free(const char *name)
1062 /* The explicit cast here is needed to drop the const qualifier */
1064 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1067 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1069 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1071 return offset & (segment_size - 1);
1074 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1075 u64 offset, u64 length)
1077 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1079 offset &= segment_size - 1;
1081 rbd_assert(length <= U64_MAX - offset);
1082 if (offset + length > segment_size)
1083 length = segment_size - offset;
1089 * returns the size of an object in the image
1091 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1093 return 1 << header->obj_order;
1100 static void bio_chain_put(struct bio *chain)
1106 chain = chain->bi_next;
1112 * zeros a bio chain, starting at specific offset
1114 static void zero_bio_chain(struct bio *chain, int start_ofs)
1117 unsigned long flags;
1123 bio_for_each_segment(bv, chain, i) {
1124 if (pos + bv->bv_len > start_ofs) {
1125 int remainder = max(start_ofs - pos, 0);
1126 buf = bvec_kmap_irq(bv, &flags);
1127 memset(buf + remainder, 0,
1128 bv->bv_len - remainder);
1129 bvec_kunmap_irq(buf, &flags);
1134 chain = chain->bi_next;
1139 * similar to zero_bio_chain(), zeros data defined by a page array,
1140 * starting at the given byte offset from the start of the array and
1141 * continuing up to the given end offset. The pages array is
1142 * assumed to be big enough to hold all bytes up to the end.
1144 static void zero_pages(struct page **pages, u64 offset, u64 end)
1146 struct page **page = &pages[offset >> PAGE_SHIFT];
1148 rbd_assert(end > offset);
1149 rbd_assert(end - offset <= (u64)SIZE_MAX);
1150 while (offset < end) {
1153 unsigned long flags;
1156 page_offset = (size_t)(offset & ~PAGE_MASK);
1157 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1158 local_irq_save(flags);
1159 kaddr = kmap_atomic(*page);
1160 memset(kaddr + page_offset, 0, length);
1161 kunmap_atomic(kaddr);
1162 local_irq_restore(flags);
1170 * Clone a portion of a bio, starting at the given byte offset
1171 * and continuing for the number of bytes indicated.
1173 static struct bio *bio_clone_range(struct bio *bio_src,
1174 unsigned int offset,
1182 unsigned short end_idx;
1183 unsigned short vcnt;
1186 /* Handle the easy case for the caller */
1188 if (!offset && len == bio_src->bi_size)
1189 return bio_clone(bio_src, gfpmask);
1191 if (WARN_ON_ONCE(!len))
1193 if (WARN_ON_ONCE(len > bio_src->bi_size))
1195 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1198 /* Find first affected segment... */
1201 bio_for_each_segment(bv, bio_src, idx) {
1202 if (resid < bv->bv_len)
1204 resid -= bv->bv_len;
1208 /* ...and the last affected segment */
1211 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1212 if (resid <= bv->bv_len)
1214 resid -= bv->bv_len;
1216 vcnt = end_idx - idx + 1;
1218 /* Build the clone */
1220 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1222 return NULL; /* ENOMEM */
1224 bio->bi_bdev = bio_src->bi_bdev;
1225 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1226 bio->bi_rw = bio_src->bi_rw;
1227 bio->bi_flags |= 1 << BIO_CLONED;
1230 * Copy over our part of the bio_vec, then update the first
1231 * and last (or only) entries.
1233 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1234 vcnt * sizeof (struct bio_vec));
1235 bio->bi_io_vec[0].bv_offset += voff;
1237 bio->bi_io_vec[0].bv_len -= voff;
1238 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1240 bio->bi_io_vec[0].bv_len = len;
1243 bio->bi_vcnt = vcnt;
1251 * Clone a portion of a bio chain, starting at the given byte offset
1252 * into the first bio in the source chain and continuing for the
1253 * number of bytes indicated. The result is another bio chain of
1254 * exactly the given length, or a null pointer on error.
1256 * The bio_src and offset parameters are both in-out. On entry they
1257 * refer to the first source bio and the offset into that bio where
1258 * the start of data to be cloned is located.
1260 * On return, bio_src is updated to refer to the bio in the source
1261 * chain that contains first un-cloned byte, and *offset will
1262 * contain the offset of that byte within that bio.
1264 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1265 unsigned int *offset,
1269 struct bio *bi = *bio_src;
1270 unsigned int off = *offset;
1271 struct bio *chain = NULL;
1274 /* Build up a chain of clone bios up to the limit */
1276 if (!bi || off >= bi->bi_size || !len)
1277 return NULL; /* Nothing to clone */
1281 unsigned int bi_size;
1285 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1286 goto out_err; /* EINVAL; ran out of bio's */
1288 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1289 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1291 goto out_err; /* ENOMEM */
1294 end = &bio->bi_next;
1297 if (off == bi->bi_size) {
1308 bio_chain_put(chain);
1314 * The default/initial value for all object request flags is 0. For
1315 * each flag, once its value is set to 1 it is never reset to 0
1318 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1320 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1321 struct rbd_device *rbd_dev;
1323 rbd_dev = obj_request->img_request->rbd_dev;
1324 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1329 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1332 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1335 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1337 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1338 struct rbd_device *rbd_dev = NULL;
1340 if (obj_request_img_data_test(obj_request))
1341 rbd_dev = obj_request->img_request->rbd_dev;
1342 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1347 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1350 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1354 * This sets the KNOWN flag after (possibly) setting the EXISTS
1355 * flag. The latter is set based on the "exists" value provided.
1357 * Note that for our purposes once an object exists it never goes
1358 * away again. It's possible that the response from two existence
1359 * checks are separated by the creation of the target object, and
1360 * the first ("doesn't exist") response arrives *after* the second
1361 * ("does exist"). In that case we ignore the second one.
1363 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1367 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1368 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1372 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1375 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1378 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1381 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1384 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1386 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1387 atomic_read(&obj_request->kref.refcount));
1388 kref_get(&obj_request->kref);
1391 static void rbd_obj_request_destroy(struct kref *kref);
1392 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1394 rbd_assert(obj_request != NULL);
1395 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1396 atomic_read(&obj_request->kref.refcount));
1397 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1400 static bool img_request_child_test(struct rbd_img_request *img_request);
1401 static void rbd_parent_request_destroy(struct kref *kref);
1402 static void rbd_img_request_destroy(struct kref *kref);
1403 static void rbd_img_request_put(struct rbd_img_request *img_request)
1405 rbd_assert(img_request != NULL);
1406 dout("%s: img %p (was %d)\n", __func__, img_request,
1407 atomic_read(&img_request->kref.refcount));
1408 if (img_request_child_test(img_request))
1409 kref_put(&img_request->kref, rbd_parent_request_destroy);
1411 kref_put(&img_request->kref, rbd_img_request_destroy);
1414 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1415 struct rbd_obj_request *obj_request)
1417 rbd_assert(obj_request->img_request == NULL);
1419 /* Image request now owns object's original reference */
1420 obj_request->img_request = img_request;
1421 obj_request->which = img_request->obj_request_count;
1422 rbd_assert(!obj_request_img_data_test(obj_request));
1423 obj_request_img_data_set(obj_request);
1424 rbd_assert(obj_request->which != BAD_WHICH);
1425 img_request->obj_request_count++;
1426 list_add_tail(&obj_request->links, &img_request->obj_requests);
1427 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1428 obj_request->which);
1431 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1432 struct rbd_obj_request *obj_request)
1434 rbd_assert(obj_request->which != BAD_WHICH);
1436 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1437 obj_request->which);
1438 list_del(&obj_request->links);
1439 rbd_assert(img_request->obj_request_count > 0);
1440 img_request->obj_request_count--;
1441 rbd_assert(obj_request->which == img_request->obj_request_count);
1442 obj_request->which = BAD_WHICH;
1443 rbd_assert(obj_request_img_data_test(obj_request));
1444 rbd_assert(obj_request->img_request == img_request);
1445 obj_request->img_request = NULL;
1446 obj_request->callback = NULL;
1447 rbd_obj_request_put(obj_request);
1450 static bool obj_request_type_valid(enum obj_request_type type)
1453 case OBJ_REQUEST_NODATA:
1454 case OBJ_REQUEST_BIO:
1455 case OBJ_REQUEST_PAGES:
1462 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1463 struct rbd_obj_request *obj_request)
1465 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1467 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1470 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1473 dout("%s: img %p\n", __func__, img_request);
1476 * If no error occurred, compute the aggregate transfer
1477 * count for the image request. We could instead use
1478 * atomic64_cmpxchg() to update it as each object request
1479 * completes; not clear which way is better off hand.
1481 if (!img_request->result) {
1482 struct rbd_obj_request *obj_request;
1485 for_each_obj_request(img_request, obj_request)
1486 xferred += obj_request->xferred;
1487 img_request->xferred = xferred;
1490 if (img_request->callback)
1491 img_request->callback(img_request);
1493 rbd_img_request_put(img_request);
1496 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1498 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1500 dout("%s: obj %p\n", __func__, obj_request);
1502 return wait_for_completion_interruptible(&obj_request->completion);
1506 * The default/initial value for all image request flags is 0. Each
1507 * is conditionally set to 1 at image request initialization time
1508 * and currently never change thereafter.
1510 static void img_request_write_set(struct rbd_img_request *img_request)
1512 set_bit(IMG_REQ_WRITE, &img_request->flags);
1516 static bool img_request_write_test(struct rbd_img_request *img_request)
1519 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1522 static void img_request_child_set(struct rbd_img_request *img_request)
1524 set_bit(IMG_REQ_CHILD, &img_request->flags);
1528 static void img_request_child_clear(struct rbd_img_request *img_request)
1530 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1534 static bool img_request_child_test(struct rbd_img_request *img_request)
1537 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1540 static void img_request_layered_set(struct rbd_img_request *img_request)
1542 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1546 static void img_request_layered_clear(struct rbd_img_request *img_request)
1548 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1552 static bool img_request_layered_test(struct rbd_img_request *img_request)
1555 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1559 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1561 u64 xferred = obj_request->xferred;
1562 u64 length = obj_request->length;
1564 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1565 obj_request, obj_request->img_request, obj_request->result,
1568 * ENOENT means a hole in the image. We zero-fill the
1569 * entire length of the request. A short read also implies
1570 * zero-fill to the end of the request. Either way we
1571 * update the xferred count to indicate the whole request
1574 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1575 if (obj_request->result == -ENOENT) {
1576 if (obj_request->type == OBJ_REQUEST_BIO)
1577 zero_bio_chain(obj_request->bio_list, 0);
1579 zero_pages(obj_request->pages, 0, length);
1580 obj_request->result = 0;
1581 obj_request->xferred = length;
1582 } else if (xferred < length && !obj_request->result) {
1583 if (obj_request->type == OBJ_REQUEST_BIO)
1584 zero_bio_chain(obj_request->bio_list, xferred);
1586 zero_pages(obj_request->pages, xferred, length);
1587 obj_request->xferred = length;
1589 obj_request_done_set(obj_request);
1592 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1594 dout("%s: obj %p cb %p\n", __func__, obj_request,
1595 obj_request->callback);
1596 if (obj_request->callback)
1597 obj_request->callback(obj_request);
1599 complete_all(&obj_request->completion);
1602 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1604 dout("%s: obj %p\n", __func__, obj_request);
1605 obj_request_done_set(obj_request);
1608 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1610 struct rbd_img_request *img_request = NULL;
1611 struct rbd_device *rbd_dev = NULL;
1612 bool layered = false;
1614 if (obj_request_img_data_test(obj_request)) {
1615 img_request = obj_request->img_request;
1616 layered = img_request && img_request_layered_test(img_request);
1617 rbd_dev = img_request->rbd_dev;
1620 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1621 obj_request, img_request, obj_request->result,
1622 obj_request->xferred, obj_request->length);
1623 if (layered && obj_request->result == -ENOENT &&
1624 obj_request->img_offset < rbd_dev->parent_overlap)
1625 rbd_img_parent_read(obj_request);
1626 else if (img_request)
1627 rbd_img_obj_request_read_callback(obj_request);
1629 obj_request_done_set(obj_request);
1632 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1634 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1635 obj_request->result, obj_request->length);
1637 * There is no such thing as a successful short write. Set
1638 * it to our originally-requested length.
1640 obj_request->xferred = obj_request->length;
1641 obj_request_done_set(obj_request);
1645 * For a simple stat call there's nothing to do. We'll do more if
1646 * this is part of a write sequence for a layered image.
1648 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1650 dout("%s: obj %p\n", __func__, obj_request);
1651 obj_request_done_set(obj_request);
1654 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1655 struct ceph_msg *msg)
1657 struct rbd_obj_request *obj_request = osd_req->r_priv;
1660 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1661 rbd_assert(osd_req == obj_request->osd_req);
1662 if (obj_request_img_data_test(obj_request)) {
1663 rbd_assert(obj_request->img_request);
1664 rbd_assert(obj_request->which != BAD_WHICH);
1666 rbd_assert(obj_request->which == BAD_WHICH);
1669 if (osd_req->r_result < 0)
1670 obj_request->result = osd_req->r_result;
1672 BUG_ON(osd_req->r_num_ops > 2);
1675 * We support a 64-bit length, but ultimately it has to be
1676 * passed to blk_end_request(), which takes an unsigned int.
1678 obj_request->xferred = osd_req->r_reply_op_len[0];
1679 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1680 opcode = osd_req->r_ops[0].op;
1682 case CEPH_OSD_OP_READ:
1683 rbd_osd_read_callback(obj_request);
1685 case CEPH_OSD_OP_WRITE:
1686 rbd_osd_write_callback(obj_request);
1688 case CEPH_OSD_OP_STAT:
1689 rbd_osd_stat_callback(obj_request);
1691 case CEPH_OSD_OP_CALL:
1692 case CEPH_OSD_OP_NOTIFY_ACK:
1693 case CEPH_OSD_OP_WATCH:
1694 rbd_osd_trivial_callback(obj_request);
1697 rbd_warn(NULL, "%s: unsupported op %hu\n",
1698 obj_request->object_name, (unsigned short) opcode);
1702 if (obj_request_done_test(obj_request))
1703 rbd_obj_request_complete(obj_request);
1706 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1708 struct rbd_img_request *img_request = obj_request->img_request;
1709 struct ceph_osd_request *osd_req = obj_request->osd_req;
1712 rbd_assert(osd_req != NULL);
1714 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1715 ceph_osdc_build_request(osd_req, obj_request->offset,
1716 NULL, snap_id, NULL);
1719 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1721 struct rbd_img_request *img_request = obj_request->img_request;
1722 struct ceph_osd_request *osd_req = obj_request->osd_req;
1723 struct ceph_snap_context *snapc;
1724 struct timespec mtime = CURRENT_TIME;
1726 rbd_assert(osd_req != NULL);
1728 snapc = img_request ? img_request->snapc : NULL;
1729 ceph_osdc_build_request(osd_req, obj_request->offset,
1730 snapc, CEPH_NOSNAP, &mtime);
1733 static struct ceph_osd_request *rbd_osd_req_create(
1734 struct rbd_device *rbd_dev,
1736 struct rbd_obj_request *obj_request)
1738 struct ceph_snap_context *snapc = NULL;
1739 struct ceph_osd_client *osdc;
1740 struct ceph_osd_request *osd_req;
1742 if (obj_request_img_data_test(obj_request)) {
1743 struct rbd_img_request *img_request = obj_request->img_request;
1745 rbd_assert(write_request ==
1746 img_request_write_test(img_request));
1748 snapc = img_request->snapc;
1751 /* Allocate and initialize the request, for the single op */
1753 osdc = &rbd_dev->rbd_client->client->osdc;
1754 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1756 return NULL; /* ENOMEM */
1759 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1761 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1763 osd_req->r_callback = rbd_osd_req_callback;
1764 osd_req->r_priv = obj_request;
1766 osd_req->r_oid_len = strlen(obj_request->object_name);
1767 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1768 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1770 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1776 * Create a copyup osd request based on the information in the
1777 * object request supplied. A copyup request has two osd ops,
1778 * a copyup method call, and a "normal" write request.
1780 static struct ceph_osd_request *
1781 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1783 struct rbd_img_request *img_request;
1784 struct ceph_snap_context *snapc;
1785 struct rbd_device *rbd_dev;
1786 struct ceph_osd_client *osdc;
1787 struct ceph_osd_request *osd_req;
1789 rbd_assert(obj_request_img_data_test(obj_request));
1790 img_request = obj_request->img_request;
1791 rbd_assert(img_request);
1792 rbd_assert(img_request_write_test(img_request));
1794 /* Allocate and initialize the request, for the two ops */
1796 snapc = img_request->snapc;
1797 rbd_dev = img_request->rbd_dev;
1798 osdc = &rbd_dev->rbd_client->client->osdc;
1799 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1801 return NULL; /* ENOMEM */
1803 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1804 osd_req->r_callback = rbd_osd_req_callback;
1805 osd_req->r_priv = obj_request;
1807 osd_req->r_oid_len = strlen(obj_request->object_name);
1808 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1809 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1811 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1817 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1819 ceph_osdc_put_request(osd_req);
1822 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1824 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1825 u64 offset, u64 length,
1826 enum obj_request_type type)
1828 struct rbd_obj_request *obj_request;
1832 rbd_assert(obj_request_type_valid(type));
1834 size = strlen(object_name) + 1;
1835 name = kmalloc(size, GFP_KERNEL);
1839 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1845 obj_request->object_name = memcpy(name, object_name, size);
1846 obj_request->offset = offset;
1847 obj_request->length = length;
1848 obj_request->flags = 0;
1849 obj_request->which = BAD_WHICH;
1850 obj_request->type = type;
1851 INIT_LIST_HEAD(&obj_request->links);
1852 init_completion(&obj_request->completion);
1853 kref_init(&obj_request->kref);
1855 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1856 offset, length, (int)type, obj_request);
1861 static void rbd_obj_request_destroy(struct kref *kref)
1863 struct rbd_obj_request *obj_request;
1865 obj_request = container_of(kref, struct rbd_obj_request, kref);
1867 dout("%s: obj %p\n", __func__, obj_request);
1869 rbd_assert(obj_request->img_request == NULL);
1870 rbd_assert(obj_request->which == BAD_WHICH);
1872 if (obj_request->osd_req)
1873 rbd_osd_req_destroy(obj_request->osd_req);
1875 rbd_assert(obj_request_type_valid(obj_request->type));
1876 switch (obj_request->type) {
1877 case OBJ_REQUEST_NODATA:
1878 break; /* Nothing to do */
1879 case OBJ_REQUEST_BIO:
1880 if (obj_request->bio_list)
1881 bio_chain_put(obj_request->bio_list);
1883 case OBJ_REQUEST_PAGES:
1884 if (obj_request->pages)
1885 ceph_release_page_vector(obj_request->pages,
1886 obj_request->page_count);
1890 kfree(obj_request->object_name);
1891 obj_request->object_name = NULL;
1892 kmem_cache_free(rbd_obj_request_cache, obj_request);
1895 /* It's OK to call this for a device with no parent */
1897 static void rbd_spec_put(struct rbd_spec *spec);
1898 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1900 rbd_dev_remove_parent(rbd_dev);
1901 rbd_spec_put(rbd_dev->parent_spec);
1902 rbd_dev->parent_spec = NULL;
1903 rbd_dev->parent_overlap = 0;
1907 * Parent image reference counting is used to determine when an
1908 * image's parent fields can be safely torn down--after there are no
1909 * more in-flight requests to the parent image. When the last
1910 * reference is dropped, cleaning them up is safe.
1912 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1916 if (!rbd_dev->parent_spec)
1919 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1923 /* Last reference; clean up parent data structures */
1926 rbd_dev_unparent(rbd_dev);
1928 rbd_warn(rbd_dev, "parent reference underflow\n");
1932 * If an image has a non-zero parent overlap, get a reference to its
1935 * We must get the reference before checking for the overlap to
1936 * coordinate properly with zeroing the parent overlap in
1937 * rbd_dev_v2_parent_info() when an image gets flattened. We
1938 * drop it again if there is no overlap.
1940 * Returns true if the rbd device has a parent with a non-zero
1941 * overlap and a reference for it was successfully taken, or
1944 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1948 if (!rbd_dev->parent_spec)
1951 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1952 if (counter > 0 && rbd_dev->parent_overlap)
1955 /* Image was flattened, but parent is not yet torn down */
1958 rbd_warn(rbd_dev, "parent reference overflow\n");
1964 * Caller is responsible for filling in the list of object requests
1965 * that comprises the image request, and the Linux request pointer
1966 * (if there is one).
1968 static struct rbd_img_request *rbd_img_request_create(
1969 struct rbd_device *rbd_dev,
1970 u64 offset, u64 length,
1973 struct rbd_img_request *img_request;
1975 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1979 if (write_request) {
1980 down_read(&rbd_dev->header_rwsem);
1981 ceph_get_snap_context(rbd_dev->header.snapc);
1982 up_read(&rbd_dev->header_rwsem);
1985 img_request->rq = NULL;
1986 img_request->rbd_dev = rbd_dev;
1987 img_request->offset = offset;
1988 img_request->length = length;
1989 img_request->flags = 0;
1990 if (write_request) {
1991 img_request_write_set(img_request);
1992 img_request->snapc = rbd_dev->header.snapc;
1994 img_request->snap_id = rbd_dev->spec->snap_id;
1996 if (rbd_dev_parent_get(rbd_dev))
1997 img_request_layered_set(img_request);
1998 spin_lock_init(&img_request->completion_lock);
1999 img_request->next_completion = 0;
2000 img_request->callback = NULL;
2001 img_request->result = 0;
2002 img_request->obj_request_count = 0;
2003 INIT_LIST_HEAD(&img_request->obj_requests);
2004 kref_init(&img_request->kref);
2006 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2007 write_request ? "write" : "read", offset, length,
2013 static void rbd_img_request_destroy(struct kref *kref)
2015 struct rbd_img_request *img_request;
2016 struct rbd_obj_request *obj_request;
2017 struct rbd_obj_request *next_obj_request;
2019 img_request = container_of(kref, struct rbd_img_request, kref);
2021 dout("%s: img %p\n", __func__, img_request);
2023 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2024 rbd_img_obj_request_del(img_request, obj_request);
2025 rbd_assert(img_request->obj_request_count == 0);
2027 if (img_request_layered_test(img_request)) {
2028 img_request_layered_clear(img_request);
2029 rbd_dev_parent_put(img_request->rbd_dev);
2032 if (img_request_write_test(img_request))
2033 ceph_put_snap_context(img_request->snapc);
2035 kmem_cache_free(rbd_img_request_cache, img_request);
2038 static struct rbd_img_request *rbd_parent_request_create(
2039 struct rbd_obj_request *obj_request,
2040 u64 img_offset, u64 length)
2042 struct rbd_img_request *parent_request;
2043 struct rbd_device *rbd_dev;
2045 rbd_assert(obj_request->img_request);
2046 rbd_dev = obj_request->img_request->rbd_dev;
2048 parent_request = rbd_img_request_create(rbd_dev->parent,
2049 img_offset, length, false);
2050 if (!parent_request)
2053 img_request_child_set(parent_request);
2054 rbd_obj_request_get(obj_request);
2055 parent_request->obj_request = obj_request;
2057 return parent_request;
2060 static void rbd_parent_request_destroy(struct kref *kref)
2062 struct rbd_img_request *parent_request;
2063 struct rbd_obj_request *orig_request;
2065 parent_request = container_of(kref, struct rbd_img_request, kref);
2066 orig_request = parent_request->obj_request;
2068 parent_request->obj_request = NULL;
2069 rbd_obj_request_put(orig_request);
2070 img_request_child_clear(parent_request);
2072 rbd_img_request_destroy(kref);
2075 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2077 struct rbd_img_request *img_request;
2078 unsigned int xferred;
2082 rbd_assert(obj_request_img_data_test(obj_request));
2083 img_request = obj_request->img_request;
2085 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2086 xferred = (unsigned int)obj_request->xferred;
2087 result = obj_request->result;
2089 struct rbd_device *rbd_dev = img_request->rbd_dev;
2091 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2092 img_request_write_test(img_request) ? "write" : "read",
2093 obj_request->length, obj_request->img_offset,
2094 obj_request->offset);
2095 rbd_warn(rbd_dev, " result %d xferred %x\n",
2097 if (!img_request->result)
2098 img_request->result = result;
2101 /* Image object requests don't own their page array */
2103 if (obj_request->type == OBJ_REQUEST_PAGES) {
2104 obj_request->pages = NULL;
2105 obj_request->page_count = 0;
2108 if (img_request_child_test(img_request)) {
2109 rbd_assert(img_request->obj_request != NULL);
2110 more = obj_request->which < img_request->obj_request_count - 1;
2112 rbd_assert(img_request->rq != NULL);
2113 more = blk_end_request(img_request->rq, result, xferred);
2119 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2121 struct rbd_img_request *img_request;
2122 u32 which = obj_request->which;
2125 rbd_assert(obj_request_img_data_test(obj_request));
2126 img_request = obj_request->img_request;
2128 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2129 rbd_assert(img_request != NULL);
2130 rbd_assert(img_request->obj_request_count > 0);
2131 rbd_assert(which != BAD_WHICH);
2132 rbd_assert(which < img_request->obj_request_count);
2133 rbd_assert(which >= img_request->next_completion);
2135 spin_lock_irq(&img_request->completion_lock);
2136 if (which != img_request->next_completion)
2139 for_each_obj_request_from(img_request, obj_request) {
2141 rbd_assert(which < img_request->obj_request_count);
2143 if (!obj_request_done_test(obj_request))
2145 more = rbd_img_obj_end_request(obj_request);
2149 rbd_assert(more ^ (which == img_request->obj_request_count));
2150 img_request->next_completion = which;
2152 spin_unlock_irq(&img_request->completion_lock);
2155 rbd_img_request_complete(img_request);
2159 * Split up an image request into one or more object requests, each
2160 * to a different object. The "type" parameter indicates whether
2161 * "data_desc" is the pointer to the head of a list of bio
2162 * structures, or the base of a page array. In either case this
2163 * function assumes data_desc describes memory sufficient to hold
2164 * all data described by the image request.
2166 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2167 enum obj_request_type type,
2170 struct rbd_device *rbd_dev = img_request->rbd_dev;
2171 struct rbd_obj_request *obj_request = NULL;
2172 struct rbd_obj_request *next_obj_request;
2173 bool write_request = img_request_write_test(img_request);
2174 struct bio *bio_list;
2175 unsigned int bio_offset = 0;
2176 struct page **pages;
2181 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2182 (int)type, data_desc);
2184 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2185 img_offset = img_request->offset;
2186 resid = img_request->length;
2187 rbd_assert(resid > 0);
2189 if (type == OBJ_REQUEST_BIO) {
2190 bio_list = data_desc;
2191 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2193 rbd_assert(type == OBJ_REQUEST_PAGES);
2198 struct ceph_osd_request *osd_req;
2199 const char *object_name;
2203 object_name = rbd_segment_name(rbd_dev, img_offset);
2206 offset = rbd_segment_offset(rbd_dev, img_offset);
2207 length = rbd_segment_length(rbd_dev, img_offset, resid);
2208 obj_request = rbd_obj_request_create(object_name,
2209 offset, length, type);
2210 /* object request has its own copy of the object name */
2211 rbd_segment_name_free(object_name);
2215 if (type == OBJ_REQUEST_BIO) {
2216 unsigned int clone_size;
2218 rbd_assert(length <= (u64)UINT_MAX);
2219 clone_size = (unsigned int)length;
2220 obj_request->bio_list =
2221 bio_chain_clone_range(&bio_list,
2225 if (!obj_request->bio_list)
2228 unsigned int page_count;
2230 obj_request->pages = pages;
2231 page_count = (u32)calc_pages_for(offset, length);
2232 obj_request->page_count = page_count;
2233 if ((offset + length) & ~PAGE_MASK)
2234 page_count--; /* more on last page */
2235 pages += page_count;
2238 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2242 obj_request->osd_req = osd_req;
2243 obj_request->callback = rbd_img_obj_callback;
2245 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2247 if (type == OBJ_REQUEST_BIO)
2248 osd_req_op_extent_osd_data_bio(osd_req, 0,
2249 obj_request->bio_list, length);
2251 osd_req_op_extent_osd_data_pages(osd_req, 0,
2252 obj_request->pages, length,
2253 offset & ~PAGE_MASK, false, false);
2256 rbd_osd_req_format_write(obj_request);
2258 rbd_osd_req_format_read(obj_request);
2260 obj_request->img_offset = img_offset;
2261 rbd_img_obj_request_add(img_request, obj_request);
2263 img_offset += length;
2270 rbd_obj_request_put(obj_request);
2272 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2273 rbd_obj_request_put(obj_request);
2279 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2281 struct rbd_img_request *img_request;
2282 struct rbd_device *rbd_dev;
2283 struct page **pages;
2286 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2287 rbd_assert(obj_request_img_data_test(obj_request));
2288 img_request = obj_request->img_request;
2289 rbd_assert(img_request);
2291 rbd_dev = img_request->rbd_dev;
2292 rbd_assert(rbd_dev);
2294 pages = obj_request->copyup_pages;
2295 rbd_assert(pages != NULL);
2296 obj_request->copyup_pages = NULL;
2297 page_count = obj_request->copyup_page_count;
2298 rbd_assert(page_count);
2299 obj_request->copyup_page_count = 0;
2300 ceph_release_page_vector(pages, page_count);
2303 * We want the transfer count to reflect the size of the
2304 * original write request. There is no such thing as a
2305 * successful short write, so if the request was successful
2306 * we can just set it to the originally-requested length.
2308 if (!obj_request->result)
2309 obj_request->xferred = obj_request->length;
2311 /* Finish up with the normal image object callback */
2313 rbd_img_obj_callback(obj_request);
2317 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2319 struct rbd_obj_request *orig_request;
2320 struct ceph_osd_request *osd_req;
2321 struct ceph_osd_client *osdc;
2322 struct rbd_device *rbd_dev;
2323 struct page **pages;
2330 rbd_assert(img_request_child_test(img_request));
2332 /* First get what we need from the image request */
2334 pages = img_request->copyup_pages;
2335 rbd_assert(pages != NULL);
2336 img_request->copyup_pages = NULL;
2337 page_count = img_request->copyup_page_count;
2338 rbd_assert(page_count);
2339 img_request->copyup_page_count = 0;
2341 orig_request = img_request->obj_request;
2342 rbd_assert(orig_request != NULL);
2343 rbd_assert(obj_request_type_valid(orig_request->type));
2344 img_result = img_request->result;
2345 parent_length = img_request->length;
2346 rbd_assert(parent_length == img_request->xferred);
2347 rbd_img_request_put(img_request);
2349 rbd_assert(orig_request->img_request);
2350 rbd_dev = orig_request->img_request->rbd_dev;
2351 rbd_assert(rbd_dev);
2354 * If the overlap has become 0 (most likely because the
2355 * image has been flattened) we need to free the pages
2356 * and re-submit the original write request.
2358 if (!rbd_dev->parent_overlap) {
2359 struct ceph_osd_client *osdc;
2361 ceph_release_page_vector(pages, page_count);
2362 osdc = &rbd_dev->rbd_client->client->osdc;
2363 img_result = rbd_obj_request_submit(osdc, orig_request);
2372 * The original osd request is of no use to use any more.
2373 * We need a new one that can hold the two ops in a copyup
2374 * request. Allocate the new copyup osd request for the
2375 * original request, and release the old one.
2377 img_result = -ENOMEM;
2378 osd_req = rbd_osd_req_create_copyup(orig_request);
2381 rbd_osd_req_destroy(orig_request->osd_req);
2382 orig_request->osd_req = osd_req;
2383 orig_request->copyup_pages = pages;
2384 orig_request->copyup_page_count = page_count;
2386 /* Initialize the copyup op */
2388 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2389 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2392 /* Then the original write request op */
2394 offset = orig_request->offset;
2395 length = orig_request->length;
2396 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2397 offset, length, 0, 0);
2398 if (orig_request->type == OBJ_REQUEST_BIO)
2399 osd_req_op_extent_osd_data_bio(osd_req, 1,
2400 orig_request->bio_list, length);
2402 osd_req_op_extent_osd_data_pages(osd_req, 1,
2403 orig_request->pages, length,
2404 offset & ~PAGE_MASK, false, false);
2406 rbd_osd_req_format_write(orig_request);
2408 /* All set, send it off. */
2410 orig_request->callback = rbd_img_obj_copyup_callback;
2411 osdc = &rbd_dev->rbd_client->client->osdc;
2412 img_result = rbd_obj_request_submit(osdc, orig_request);
2416 /* Record the error code and complete the request */
2418 orig_request->result = img_result;
2419 orig_request->xferred = 0;
2420 obj_request_done_set(orig_request);
2421 rbd_obj_request_complete(orig_request);
2425 * Read from the parent image the range of data that covers the
2426 * entire target of the given object request. This is used for
2427 * satisfying a layered image write request when the target of an
2428 * object request from the image request does not exist.
2430 * A page array big enough to hold the returned data is allocated
2431 * and supplied to rbd_img_request_fill() as the "data descriptor."
2432 * When the read completes, this page array will be transferred to
2433 * the original object request for the copyup operation.
2435 * If an error occurs, record it as the result of the original
2436 * object request and mark it done so it gets completed.
2438 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2440 struct rbd_img_request *img_request = NULL;
2441 struct rbd_img_request *parent_request = NULL;
2442 struct rbd_device *rbd_dev;
2445 struct page **pages = NULL;
2449 rbd_assert(obj_request_img_data_test(obj_request));
2450 rbd_assert(obj_request_type_valid(obj_request->type));
2452 img_request = obj_request->img_request;
2453 rbd_assert(img_request != NULL);
2454 rbd_dev = img_request->rbd_dev;
2455 rbd_assert(rbd_dev->parent != NULL);
2458 * Determine the byte range covered by the object in the
2459 * child image to which the original request was to be sent.
2461 img_offset = obj_request->img_offset - obj_request->offset;
2462 length = (u64)1 << rbd_dev->header.obj_order;
2465 * There is no defined parent data beyond the parent
2466 * overlap, so limit what we read at that boundary if
2469 if (img_offset + length > rbd_dev->parent_overlap) {
2470 rbd_assert(img_offset < rbd_dev->parent_overlap);
2471 length = rbd_dev->parent_overlap - img_offset;
2475 * Allocate a page array big enough to receive the data read
2478 page_count = (u32)calc_pages_for(0, length);
2479 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2480 if (IS_ERR(pages)) {
2481 result = PTR_ERR(pages);
2487 parent_request = rbd_parent_request_create(obj_request,
2488 img_offset, length);
2489 if (!parent_request)
2492 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2495 parent_request->copyup_pages = pages;
2496 parent_request->copyup_page_count = page_count;
2498 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2499 result = rbd_img_request_submit(parent_request);
2503 parent_request->copyup_pages = NULL;
2504 parent_request->copyup_page_count = 0;
2505 parent_request->obj_request = NULL;
2506 rbd_obj_request_put(obj_request);
2509 ceph_release_page_vector(pages, page_count);
2511 rbd_img_request_put(parent_request);
2512 obj_request->result = result;
2513 obj_request->xferred = 0;
2514 obj_request_done_set(obj_request);
2519 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2521 struct rbd_obj_request *orig_request;
2522 struct rbd_device *rbd_dev;
2525 rbd_assert(!obj_request_img_data_test(obj_request));
2528 * All we need from the object request is the original
2529 * request and the result of the STAT op. Grab those, then
2530 * we're done with the request.
2532 orig_request = obj_request->obj_request;
2533 obj_request->obj_request = NULL;
2534 rbd_assert(orig_request);
2535 rbd_assert(orig_request->img_request);
2537 result = obj_request->result;
2538 obj_request->result = 0;
2540 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2541 obj_request, orig_request, result,
2542 obj_request->xferred, obj_request->length);
2543 rbd_obj_request_put(obj_request);
2546 * If the overlap has become 0 (most likely because the
2547 * image has been flattened) we need to free the pages
2548 * and re-submit the original write request.
2550 rbd_dev = orig_request->img_request->rbd_dev;
2551 if (!rbd_dev->parent_overlap) {
2552 struct ceph_osd_client *osdc;
2554 rbd_obj_request_put(orig_request);
2555 osdc = &rbd_dev->rbd_client->client->osdc;
2556 result = rbd_obj_request_submit(osdc, orig_request);
2562 * Our only purpose here is to determine whether the object
2563 * exists, and we don't want to treat the non-existence as
2564 * an error. If something else comes back, transfer the
2565 * error to the original request and complete it now.
2568 obj_request_existence_set(orig_request, true);
2569 } else if (result == -ENOENT) {
2570 obj_request_existence_set(orig_request, false);
2571 } else if (result) {
2572 orig_request->result = result;
2577 * Resubmit the original request now that we have recorded
2578 * whether the target object exists.
2580 orig_request->result = rbd_img_obj_request_submit(orig_request);
2582 if (orig_request->result)
2583 rbd_obj_request_complete(orig_request);
2584 rbd_obj_request_put(orig_request);
2587 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2589 struct rbd_obj_request *stat_request;
2590 struct rbd_device *rbd_dev;
2591 struct ceph_osd_client *osdc;
2592 struct page **pages = NULL;
2598 * The response data for a STAT call consists of:
2605 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2606 page_count = (u32)calc_pages_for(0, size);
2607 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2609 return PTR_ERR(pages);
2612 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2617 rbd_obj_request_get(obj_request);
2618 stat_request->obj_request = obj_request;
2619 stat_request->pages = pages;
2620 stat_request->page_count = page_count;
2622 rbd_assert(obj_request->img_request);
2623 rbd_dev = obj_request->img_request->rbd_dev;
2624 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2626 if (!stat_request->osd_req)
2628 stat_request->callback = rbd_img_obj_exists_callback;
2630 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2631 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2633 rbd_osd_req_format_read(stat_request);
2635 osdc = &rbd_dev->rbd_client->client->osdc;
2636 ret = rbd_obj_request_submit(osdc, stat_request);
2639 rbd_obj_request_put(obj_request);
2644 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2646 struct rbd_img_request *img_request;
2647 struct rbd_device *rbd_dev;
2650 rbd_assert(obj_request_img_data_test(obj_request));
2652 img_request = obj_request->img_request;
2653 rbd_assert(img_request);
2654 rbd_dev = img_request->rbd_dev;
2657 * Only writes to layered images need special handling.
2658 * Reads and non-layered writes are simple object requests.
2659 * Layered writes that start beyond the end of the overlap
2660 * with the parent have no parent data, so they too are
2661 * simple object requests. Finally, if the target object is
2662 * known to already exist, its parent data has already been
2663 * copied, so a write to the object can also be handled as a
2664 * simple object request.
2666 if (!img_request_write_test(img_request) ||
2667 !img_request_layered_test(img_request) ||
2668 rbd_dev->parent_overlap <= obj_request->img_offset ||
2669 ((known = obj_request_known_test(obj_request)) &&
2670 obj_request_exists_test(obj_request))) {
2672 struct rbd_device *rbd_dev;
2673 struct ceph_osd_client *osdc;
2675 rbd_dev = obj_request->img_request->rbd_dev;
2676 osdc = &rbd_dev->rbd_client->client->osdc;
2678 return rbd_obj_request_submit(osdc, obj_request);
2682 * It's a layered write. The target object might exist but
2683 * we may not know that yet. If we know it doesn't exist,
2684 * start by reading the data for the full target object from
2685 * the parent so we can use it for a copyup to the target.
2688 return rbd_img_obj_parent_read_full(obj_request);
2690 /* We don't know whether the target exists. Go find out. */
2692 return rbd_img_obj_exists_submit(obj_request);
2695 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2697 struct rbd_obj_request *obj_request;
2698 struct rbd_obj_request *next_obj_request;
2700 dout("%s: img %p\n", __func__, img_request);
2701 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2704 ret = rbd_img_obj_request_submit(obj_request);
2712 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2714 struct rbd_obj_request *obj_request;
2715 struct rbd_device *rbd_dev;
2720 rbd_assert(img_request_child_test(img_request));
2722 /* First get what we need from the image request and release it */
2724 obj_request = img_request->obj_request;
2725 img_xferred = img_request->xferred;
2726 img_result = img_request->result;
2727 rbd_img_request_put(img_request);
2730 * If the overlap has become 0 (most likely because the
2731 * image has been flattened) we need to re-submit the
2734 rbd_assert(obj_request);
2735 rbd_assert(obj_request->img_request);
2736 rbd_dev = obj_request->img_request->rbd_dev;
2737 if (!rbd_dev->parent_overlap) {
2738 struct ceph_osd_client *osdc;
2740 osdc = &rbd_dev->rbd_client->client->osdc;
2741 img_result = rbd_obj_request_submit(osdc, obj_request);
2746 obj_request->result = img_result;
2747 if (obj_request->result)
2751 * We need to zero anything beyond the parent overlap
2752 * boundary. Since rbd_img_obj_request_read_callback()
2753 * will zero anything beyond the end of a short read, an
2754 * easy way to do this is to pretend the data from the
2755 * parent came up short--ending at the overlap boundary.
2757 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2758 obj_end = obj_request->img_offset + obj_request->length;
2759 if (obj_end > rbd_dev->parent_overlap) {
2762 if (obj_request->img_offset < rbd_dev->parent_overlap)
2763 xferred = rbd_dev->parent_overlap -
2764 obj_request->img_offset;
2766 obj_request->xferred = min(img_xferred, xferred);
2768 obj_request->xferred = img_xferred;
2771 rbd_img_obj_request_read_callback(obj_request);
2772 rbd_obj_request_complete(obj_request);
2775 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2777 struct rbd_img_request *img_request;
2780 rbd_assert(obj_request_img_data_test(obj_request));
2781 rbd_assert(obj_request->img_request != NULL);
2782 rbd_assert(obj_request->result == (s32) -ENOENT);
2783 rbd_assert(obj_request_type_valid(obj_request->type));
2785 /* rbd_read_finish(obj_request, obj_request->length); */
2786 img_request = rbd_parent_request_create(obj_request,
2787 obj_request->img_offset,
2788 obj_request->length);
2793 if (obj_request->type == OBJ_REQUEST_BIO)
2794 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2795 obj_request->bio_list);
2797 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2798 obj_request->pages);
2802 img_request->callback = rbd_img_parent_read_callback;
2803 result = rbd_img_request_submit(img_request);
2810 rbd_img_request_put(img_request);
2811 obj_request->result = result;
2812 obj_request->xferred = 0;
2813 obj_request_done_set(obj_request);
2816 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2818 struct rbd_obj_request *obj_request;
2819 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2822 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2823 OBJ_REQUEST_NODATA);
2828 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2829 if (!obj_request->osd_req)
2831 obj_request->callback = rbd_obj_request_put;
2833 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2835 rbd_osd_req_format_read(obj_request);
2837 ret = rbd_obj_request_submit(osdc, obj_request);
2840 rbd_obj_request_put(obj_request);
2845 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2847 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2853 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2854 rbd_dev->header_name, (unsigned long long)notify_id,
2855 (unsigned int)opcode);
2856 ret = rbd_dev_refresh(rbd_dev);
2858 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2860 rbd_obj_notify_ack(rbd_dev, notify_id);
2864 * Request sync osd watch/unwatch. The value of "start" determines
2865 * whether a watch request is being initiated or torn down.
2867 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2869 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2870 struct rbd_obj_request *obj_request;
2873 rbd_assert(start ^ !!rbd_dev->watch_event);
2874 rbd_assert(start ^ !!rbd_dev->watch_request);
2877 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2878 &rbd_dev->watch_event);
2881 rbd_assert(rbd_dev->watch_event != NULL);
2885 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2886 OBJ_REQUEST_NODATA);
2890 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2891 if (!obj_request->osd_req)
2895 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2897 ceph_osdc_unregister_linger_request(osdc,
2898 rbd_dev->watch_request->osd_req);
2900 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2901 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2902 rbd_osd_req_format_write(obj_request);
2904 ret = rbd_obj_request_submit(osdc, obj_request);
2907 ret = rbd_obj_request_wait(obj_request);
2910 ret = obj_request->result;
2915 * A watch request is set to linger, so the underlying osd
2916 * request won't go away until we unregister it. We retain
2917 * a pointer to the object request during that time (in
2918 * rbd_dev->watch_request), so we'll keep a reference to
2919 * it. We'll drop that reference (below) after we've
2923 rbd_dev->watch_request = obj_request;
2928 /* We have successfully torn down the watch request */
2930 rbd_obj_request_put(rbd_dev->watch_request);
2931 rbd_dev->watch_request = NULL;
2933 /* Cancel the event if we're tearing down, or on error */
2934 ceph_osdc_cancel_event(rbd_dev->watch_event);
2935 rbd_dev->watch_event = NULL;
2937 rbd_obj_request_put(obj_request);
2943 * Synchronous osd object method call. Returns the number of bytes
2944 * returned in the outbound buffer, or a negative error code.
2946 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2947 const char *object_name,
2948 const char *class_name,
2949 const char *method_name,
2950 const void *outbound,
2951 size_t outbound_size,
2953 size_t inbound_size)
2955 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2956 struct rbd_obj_request *obj_request;
2957 struct page **pages;
2962 * Method calls are ultimately read operations. The result
2963 * should placed into the inbound buffer provided. They
2964 * also supply outbound data--parameters for the object
2965 * method. Currently if this is present it will be a
2968 page_count = (u32)calc_pages_for(0, inbound_size);
2969 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2971 return PTR_ERR(pages);
2974 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2979 obj_request->pages = pages;
2980 obj_request->page_count = page_count;
2982 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2983 if (!obj_request->osd_req)
2986 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2987 class_name, method_name);
2988 if (outbound_size) {
2989 struct ceph_pagelist *pagelist;
2991 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2995 ceph_pagelist_init(pagelist);
2996 ceph_pagelist_append(pagelist, outbound, outbound_size);
2997 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3000 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3001 obj_request->pages, inbound_size,
3003 rbd_osd_req_format_read(obj_request);
3005 ret = rbd_obj_request_submit(osdc, obj_request);
3008 ret = rbd_obj_request_wait(obj_request);
3012 ret = obj_request->result;
3016 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3017 ret = (int)obj_request->xferred;
3018 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3021 rbd_obj_request_put(obj_request);
3023 ceph_release_page_vector(pages, page_count);
3028 static void rbd_request_fn(struct request_queue *q)
3029 __releases(q->queue_lock) __acquires(q->queue_lock)
3031 struct rbd_device *rbd_dev = q->queuedata;
3032 bool read_only = rbd_dev->mapping.read_only;
3036 while ((rq = blk_fetch_request(q))) {
3037 bool write_request = rq_data_dir(rq) == WRITE;
3038 struct rbd_img_request *img_request;
3042 /* Ignore any non-FS requests that filter through. */
3044 if (rq->cmd_type != REQ_TYPE_FS) {
3045 dout("%s: non-fs request type %d\n", __func__,
3046 (int) rq->cmd_type);
3047 __blk_end_request_all(rq, 0);
3051 /* Ignore/skip any zero-length requests */
3053 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3054 length = (u64) blk_rq_bytes(rq);
3057 dout("%s: zero-length request\n", __func__);
3058 __blk_end_request_all(rq, 0);
3062 spin_unlock_irq(q->queue_lock);
3064 /* Disallow writes to a read-only device */
3066 if (write_request) {
3070 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3074 * Quit early if the mapped snapshot no longer
3075 * exists. It's still possible the snapshot will
3076 * have disappeared by the time our request arrives
3077 * at the osd, but there's no sense in sending it if
3080 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3081 dout("request for non-existent snapshot");
3082 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3088 if (offset && length > U64_MAX - offset + 1) {
3089 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3091 goto end_request; /* Shouldn't happen */
3095 if (offset + length > rbd_dev->mapping.size) {
3096 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3097 offset, length, rbd_dev->mapping.size);
3102 img_request = rbd_img_request_create(rbd_dev, offset, length,
3107 img_request->rq = rq;
3109 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3112 result = rbd_img_request_submit(img_request);
3114 rbd_img_request_put(img_request);
3116 spin_lock_irq(q->queue_lock);
3118 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3119 write_request ? "write" : "read",
3120 length, offset, result);
3122 __blk_end_request_all(rq, result);
3128 * a queue callback. Makes sure that we don't create a bio that spans across
3129 * multiple osd objects. One exception would be with a single page bios,
3130 * which we handle later at bio_chain_clone_range()
3132 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3133 struct bio_vec *bvec)
3135 struct rbd_device *rbd_dev = q->queuedata;
3136 sector_t sector_offset;
3137 sector_t sectors_per_obj;
3138 sector_t obj_sector_offset;
3142 * Find how far into its rbd object the partition-relative
3143 * bio start sector is to offset relative to the enclosing
3146 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3147 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3148 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3151 * Compute the number of bytes from that offset to the end
3152 * of the object. Account for what's already used by the bio.
3154 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3155 if (ret > bmd->bi_size)
3156 ret -= bmd->bi_size;
3161 * Don't send back more than was asked for. And if the bio
3162 * was empty, let the whole thing through because: "Note
3163 * that a block device *must* allow a single page to be
3164 * added to an empty bio."
3166 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3167 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3168 ret = (int) bvec->bv_len;
3173 static void rbd_free_disk(struct rbd_device *rbd_dev)
3175 struct gendisk *disk = rbd_dev->disk;
3180 rbd_dev->disk = NULL;
3181 if (disk->flags & GENHD_FL_UP) {
3184 blk_cleanup_queue(disk->queue);
3189 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3190 const char *object_name,
3191 u64 offset, u64 length, void *buf)
3194 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3195 struct rbd_obj_request *obj_request;
3196 struct page **pages = NULL;
3201 page_count = (u32) calc_pages_for(offset, length);
3202 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3204 ret = PTR_ERR(pages);
3207 obj_request = rbd_obj_request_create(object_name, offset, length,
3212 obj_request->pages = pages;
3213 obj_request->page_count = page_count;
3215 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3216 if (!obj_request->osd_req)
3219 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3220 offset, length, 0, 0);
3221 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3223 obj_request->length,
3224 obj_request->offset & ~PAGE_MASK,
3226 rbd_osd_req_format_read(obj_request);
3228 ret = rbd_obj_request_submit(osdc, obj_request);
3231 ret = rbd_obj_request_wait(obj_request);
3235 ret = obj_request->result;
3239 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3240 size = (size_t) obj_request->xferred;
3241 ceph_copy_from_page_vector(pages, buf, 0, size);
3242 rbd_assert(size <= (size_t)INT_MAX);
3246 rbd_obj_request_put(obj_request);
3248 ceph_release_page_vector(pages, page_count);
3254 * Read the complete header for the given rbd device. On successful
3255 * return, the rbd_dev->header field will contain up-to-date
3256 * information about the image.
3258 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3260 struct rbd_image_header_ondisk *ondisk = NULL;
3267 * The complete header will include an array of its 64-bit
3268 * snapshot ids, followed by the names of those snapshots as
3269 * a contiguous block of NUL-terminated strings. Note that
3270 * the number of snapshots could change by the time we read
3271 * it in, in which case we re-read it.
3278 size = sizeof (*ondisk);
3279 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3281 ondisk = kmalloc(size, GFP_KERNEL);
3285 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3289 if ((size_t)ret < size) {
3291 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3295 if (!rbd_dev_ondisk_valid(ondisk)) {
3297 rbd_warn(rbd_dev, "invalid header");
3301 names_size = le64_to_cpu(ondisk->snap_names_len);
3302 want_count = snap_count;
3303 snap_count = le32_to_cpu(ondisk->snap_count);
3304 } while (snap_count != want_count);
3306 ret = rbd_header_from_disk(rbd_dev, ondisk);
3314 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3315 * has disappeared from the (just updated) snapshot context.
3317 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3321 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3324 snap_id = rbd_dev->spec->snap_id;
3325 if (snap_id == CEPH_NOSNAP)
3328 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3329 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3332 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3337 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3338 mapping_size = rbd_dev->mapping.size;
3339 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3340 if (rbd_dev->image_format == 1)
3341 ret = rbd_dev_v1_header_info(rbd_dev);
3343 ret = rbd_dev_v2_header_info(rbd_dev);
3345 /* If it's a mapped snapshot, validate its EXISTS flag */
3347 rbd_exists_validate(rbd_dev);
3348 mutex_unlock(&ctl_mutex);
3349 if (mapping_size != rbd_dev->mapping.size) {
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3361 static int rbd_init_disk(struct rbd_device *rbd_dev)
3363 struct gendisk *disk;
3364 struct request_queue *q;
3367 /* create gendisk info */
3368 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3372 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3374 disk->major = rbd_dev->major;
3375 disk->first_minor = 0;
3376 disk->fops = &rbd_bd_ops;
3377 disk->private_data = rbd_dev;
3379 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3383 /* We use the default size, but let's be explicit about it. */
3384 blk_queue_physical_block_size(q, SECTOR_SIZE);
3386 /* set io sizes to object size */
3387 segment_size = rbd_obj_bytes(&rbd_dev->header);
3388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3389 blk_queue_max_segment_size(q, segment_size);
3390 blk_queue_io_min(q, segment_size);
3391 blk_queue_io_opt(q, segment_size);
3393 blk_queue_merge_bvec(q, rbd_merge_bvec);
3396 q->queuedata = rbd_dev;
3398 rbd_dev->disk = disk;
3411 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3413 return container_of(dev, struct rbd_device, dev);
3416 static ssize_t rbd_size_show(struct device *dev,
3417 struct device_attribute *attr, char *buf)
3419 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3421 return sprintf(buf, "%llu\n",
3422 (unsigned long long)rbd_dev->mapping.size);
3426 * Note this shows the features for whatever's mapped, which is not
3427 * necessarily the base image.
3429 static ssize_t rbd_features_show(struct device *dev,
3430 struct device_attribute *attr, char *buf)
3432 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3434 return sprintf(buf, "0x%016llx\n",
3435 (unsigned long long)rbd_dev->mapping.features);
3438 static ssize_t rbd_major_show(struct device *dev,
3439 struct device_attribute *attr, char *buf)
3441 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3444 return sprintf(buf, "%d\n", rbd_dev->major);
3446 return sprintf(buf, "(none)\n");
3450 static ssize_t rbd_client_id_show(struct device *dev,
3451 struct device_attribute *attr, char *buf)
3453 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3455 return sprintf(buf, "client%lld\n",
3456 ceph_client_id(rbd_dev->rbd_client->client));
3459 static ssize_t rbd_pool_show(struct device *dev,
3460 struct device_attribute *attr, char *buf)
3462 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3464 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3467 static ssize_t rbd_pool_id_show(struct device *dev,
3468 struct device_attribute *attr, char *buf)
3470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3472 return sprintf(buf, "%llu\n",
3473 (unsigned long long) rbd_dev->spec->pool_id);
3476 static ssize_t rbd_name_show(struct device *dev,
3477 struct device_attribute *attr, char *buf)
3479 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3481 if (rbd_dev->spec->image_name)
3482 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3484 return sprintf(buf, "(unknown)\n");
3487 static ssize_t rbd_image_id_show(struct device *dev,
3488 struct device_attribute *attr, char *buf)
3490 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3492 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3496 * Shows the name of the currently-mapped snapshot (or
3497 * RBD_SNAP_HEAD_NAME for the base image).
3499 static ssize_t rbd_snap_show(struct device *dev,
3500 struct device_attribute *attr,
3503 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3505 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3509 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3510 * for the parent image. If there is no parent, simply shows
3511 * "(no parent image)".
3513 static ssize_t rbd_parent_show(struct device *dev,
3514 struct device_attribute *attr,
3517 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3518 struct rbd_spec *spec = rbd_dev->parent_spec;
3523 return sprintf(buf, "(no parent image)\n");
3525 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3526 (unsigned long long) spec->pool_id, spec->pool_name);
3531 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3532 spec->image_name ? spec->image_name : "(unknown)");
3537 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3538 (unsigned long long) spec->snap_id, spec->snap_name);
3543 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3548 return (ssize_t) (bufp - buf);
3551 static ssize_t rbd_image_refresh(struct device *dev,
3552 struct device_attribute *attr,
3556 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3559 ret = rbd_dev_refresh(rbd_dev);
3561 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3563 return ret < 0 ? ret : size;
3566 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3567 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3568 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3569 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3570 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3571 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3572 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3573 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3574 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3575 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3576 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3578 static struct attribute *rbd_attrs[] = {
3579 &dev_attr_size.attr,
3580 &dev_attr_features.attr,
3581 &dev_attr_major.attr,
3582 &dev_attr_client_id.attr,
3583 &dev_attr_pool.attr,
3584 &dev_attr_pool_id.attr,
3585 &dev_attr_name.attr,
3586 &dev_attr_image_id.attr,
3587 &dev_attr_current_snap.attr,
3588 &dev_attr_parent.attr,
3589 &dev_attr_refresh.attr,
3593 static struct attribute_group rbd_attr_group = {
3597 static const struct attribute_group *rbd_attr_groups[] = {
3602 static void rbd_sysfs_dev_release(struct device *dev)
3606 static struct device_type rbd_device_type = {
3608 .groups = rbd_attr_groups,
3609 .release = rbd_sysfs_dev_release,
3612 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3614 kref_get(&spec->kref);
3619 static void rbd_spec_free(struct kref *kref);
3620 static void rbd_spec_put(struct rbd_spec *spec)
3623 kref_put(&spec->kref, rbd_spec_free);
3626 static struct rbd_spec *rbd_spec_alloc(void)
3628 struct rbd_spec *spec;
3630 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3633 kref_init(&spec->kref);
3638 static void rbd_spec_free(struct kref *kref)
3640 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3642 kfree(spec->pool_name);
3643 kfree(spec->image_id);
3644 kfree(spec->image_name);
3645 kfree(spec->snap_name);
3649 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3650 struct rbd_spec *spec)
3652 struct rbd_device *rbd_dev;
3654 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3658 spin_lock_init(&rbd_dev->lock);
3660 atomic_set(&rbd_dev->parent_ref, 0);
3661 INIT_LIST_HEAD(&rbd_dev->node);
3662 init_rwsem(&rbd_dev->header_rwsem);
3664 rbd_dev->spec = spec;
3665 rbd_dev->rbd_client = rbdc;
3667 /* Initialize the layout used for all rbd requests */
3669 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3670 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3671 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3672 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3677 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3679 rbd_put_client(rbd_dev->rbd_client);
3680 rbd_spec_put(rbd_dev->spec);
3685 * Get the size and object order for an image snapshot, or if
3686 * snap_id is CEPH_NOSNAP, gets this information for the base
3689 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3690 u8 *order, u64 *snap_size)
3692 __le64 snapid = cpu_to_le64(snap_id);
3697 } __attribute__ ((packed)) size_buf = { 0 };
3699 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3701 &snapid, sizeof (snapid),
3702 &size_buf, sizeof (size_buf));
3703 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3706 if (ret < sizeof (size_buf))
3710 *order = size_buf.order;
3711 *snap_size = le64_to_cpu(size_buf.size);
3713 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3714 (unsigned long long)snap_id, (unsigned int)*order,
3715 (unsigned long long)*snap_size);
3720 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3722 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3723 &rbd_dev->header.obj_order,
3724 &rbd_dev->header.image_size);
3727 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3733 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3737 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3738 "rbd", "get_object_prefix", NULL, 0,
3739 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3740 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3745 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3746 p + ret, NULL, GFP_NOIO);
3749 if (IS_ERR(rbd_dev->header.object_prefix)) {
3750 ret = PTR_ERR(rbd_dev->header.object_prefix);
3751 rbd_dev->header.object_prefix = NULL;
3753 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3761 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3764 __le64 snapid = cpu_to_le64(snap_id);
3768 } __attribute__ ((packed)) features_buf = { 0 };
3772 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3773 "rbd", "get_features",
3774 &snapid, sizeof (snapid),
3775 &features_buf, sizeof (features_buf));
3776 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3779 if (ret < sizeof (features_buf))
3782 incompat = le64_to_cpu(features_buf.incompat);
3783 if (incompat & ~RBD_FEATURES_SUPPORTED)
3786 *snap_features = le64_to_cpu(features_buf.features);
3788 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3789 (unsigned long long)snap_id,
3790 (unsigned long long)*snap_features,
3791 (unsigned long long)le64_to_cpu(features_buf.incompat));
3796 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3798 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3799 &rbd_dev->header.features);
3802 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3804 struct rbd_spec *parent_spec;
3806 void *reply_buf = NULL;
3815 parent_spec = rbd_spec_alloc();
3819 size = sizeof (__le64) + /* pool_id */
3820 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3821 sizeof (__le64) + /* snap_id */
3822 sizeof (__le64); /* overlap */
3823 reply_buf = kmalloc(size, GFP_KERNEL);
3829 snapid = cpu_to_le64(CEPH_NOSNAP);
3830 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3831 "rbd", "get_parent",
3832 &snapid, sizeof (snapid),
3834 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3839 end = reply_buf + ret;
3841 ceph_decode_64_safe(&p, end, pool_id, out_err);
3842 if (pool_id == CEPH_NOPOOL) {
3844 * Either the parent never existed, or we have
3845 * record of it but the image got flattened so it no
3846 * longer has a parent. When the parent of a
3847 * layered image disappears we immediately set the
3848 * overlap to 0. The effect of this is that all new
3849 * requests will be treated as if the image had no
3852 if (rbd_dev->parent_overlap) {
3853 rbd_dev->parent_overlap = 0;
3855 rbd_dev_parent_put(rbd_dev);
3856 pr_info("%s: clone image has been flattened\n",
3857 rbd_dev->disk->disk_name);
3860 goto out; /* No parent? No problem. */
3863 /* The ceph file layout needs to fit pool id in 32 bits */
3866 if (pool_id > (u64)U32_MAX) {
3867 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3868 (unsigned long long)pool_id, U32_MAX);
3871 parent_spec->pool_id = pool_id;
3873 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3874 if (IS_ERR(image_id)) {
3875 ret = PTR_ERR(image_id);
3878 parent_spec->image_id = image_id;
3879 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3880 ceph_decode_64_safe(&p, end, overlap, out_err);
3883 rbd_spec_put(rbd_dev->parent_spec);
3884 rbd_dev->parent_spec = parent_spec;
3885 parent_spec = NULL; /* rbd_dev now owns this */
3886 rbd_dev->parent_overlap = overlap;
3888 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3894 rbd_spec_put(parent_spec);
3899 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3903 __le64 stripe_count;
3904 } __attribute__ ((packed)) striping_info_buf = { 0 };
3905 size_t size = sizeof (striping_info_buf);
3912 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3913 "rbd", "get_stripe_unit_count", NULL, 0,
3914 (char *)&striping_info_buf, size);
3915 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3922 * We don't actually support the "fancy striping" feature
3923 * (STRIPINGV2) yet, but if the striping sizes are the
3924 * defaults the behavior is the same as before. So find
3925 * out, and only fail if the image has non-default values.
3928 obj_size = (u64)1 << rbd_dev->header.obj_order;
3929 p = &striping_info_buf;
3930 stripe_unit = ceph_decode_64(&p);
3931 if (stripe_unit != obj_size) {
3932 rbd_warn(rbd_dev, "unsupported stripe unit "
3933 "(got %llu want %llu)",
3934 stripe_unit, obj_size);
3937 stripe_count = ceph_decode_64(&p);
3938 if (stripe_count != 1) {
3939 rbd_warn(rbd_dev, "unsupported stripe count "
3940 "(got %llu want 1)", stripe_count);
3943 rbd_dev->header.stripe_unit = stripe_unit;
3944 rbd_dev->header.stripe_count = stripe_count;
3949 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3951 size_t image_id_size;
3956 void *reply_buf = NULL;
3958 char *image_name = NULL;
3961 rbd_assert(!rbd_dev->spec->image_name);
3963 len = strlen(rbd_dev->spec->image_id);
3964 image_id_size = sizeof (__le32) + len;
3965 image_id = kmalloc(image_id_size, GFP_KERNEL);
3970 end = image_id + image_id_size;
3971 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3973 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3974 reply_buf = kmalloc(size, GFP_KERNEL);
3978 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3979 "rbd", "dir_get_name",
3980 image_id, image_id_size,
3985 end = reply_buf + ret;
3987 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3988 if (IS_ERR(image_name))
3991 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3999 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4001 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4002 const char *snap_name;
4005 /* Skip over names until we find the one we are looking for */
4007 snap_name = rbd_dev->header.snap_names;
4008 while (which < snapc->num_snaps) {
4009 if (!strcmp(name, snap_name))
4010 return snapc->snaps[which];
4011 snap_name += strlen(snap_name) + 1;
4017 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4019 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4024 for (which = 0; !found && which < snapc->num_snaps; which++) {
4025 const char *snap_name;
4027 snap_id = snapc->snaps[which];
4028 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4029 if (IS_ERR(snap_name))
4031 found = !strcmp(name, snap_name);
4034 return found ? snap_id : CEPH_NOSNAP;
4038 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4039 * no snapshot by that name is found, or if an error occurs.
4041 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4043 if (rbd_dev->image_format == 1)
4044 return rbd_v1_snap_id_by_name(rbd_dev, name);
4046 return rbd_v2_snap_id_by_name(rbd_dev, name);
4050 * When an rbd image has a parent image, it is identified by the
4051 * pool, image, and snapshot ids (not names). This function fills
4052 * in the names for those ids. (It's OK if we can't figure out the
4053 * name for an image id, but the pool and snapshot ids should always
4054 * exist and have names.) All names in an rbd spec are dynamically
4057 * When an image being mapped (not a parent) is probed, we have the
4058 * pool name and pool id, image name and image id, and the snapshot
4059 * name. The only thing we're missing is the snapshot id.
4061 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4063 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4064 struct rbd_spec *spec = rbd_dev->spec;
4065 const char *pool_name;
4066 const char *image_name;
4067 const char *snap_name;
4071 * An image being mapped will have the pool name (etc.), but
4072 * we need to look up the snapshot id.
4074 if (spec->pool_name) {
4075 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4078 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4079 if (snap_id == CEPH_NOSNAP)
4081 spec->snap_id = snap_id;
4083 spec->snap_id = CEPH_NOSNAP;
4089 /* Get the pool name; we have to make our own copy of this */
4091 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4093 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4096 pool_name = kstrdup(pool_name, GFP_KERNEL);
4100 /* Fetch the image name; tolerate failure here */
4102 image_name = rbd_dev_image_name(rbd_dev);
4104 rbd_warn(rbd_dev, "unable to get image name");
4106 /* Look up the snapshot name, and make a copy */
4108 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4114 spec->pool_name = pool_name;
4115 spec->image_name = image_name;
4116 spec->snap_name = snap_name;
4126 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4135 struct ceph_snap_context *snapc;
4139 * We'll need room for the seq value (maximum snapshot id),
4140 * snapshot count, and array of that many snapshot ids.
4141 * For now we have a fixed upper limit on the number we're
4142 * prepared to receive.
4144 size = sizeof (__le64) + sizeof (__le32) +
4145 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4146 reply_buf = kzalloc(size, GFP_KERNEL);
4150 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4151 "rbd", "get_snapcontext", NULL, 0,
4153 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4158 end = reply_buf + ret;
4160 ceph_decode_64_safe(&p, end, seq, out);
4161 ceph_decode_32_safe(&p, end, snap_count, out);
4164 * Make sure the reported number of snapshot ids wouldn't go
4165 * beyond the end of our buffer. But before checking that,
4166 * make sure the computed size of the snapshot context we
4167 * allocate is representable in a size_t.
4169 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4174 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4178 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4184 for (i = 0; i < snap_count; i++)
4185 snapc->snaps[i] = ceph_decode_64(&p);
4187 ceph_put_snap_context(rbd_dev->header.snapc);
4188 rbd_dev->header.snapc = snapc;
4190 dout(" snap context seq = %llu, snap_count = %u\n",
4191 (unsigned long long)seq, (unsigned int)snap_count);
4198 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4209 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4210 reply_buf = kmalloc(size, GFP_KERNEL);
4212 return ERR_PTR(-ENOMEM);
4214 snapid = cpu_to_le64(snap_id);
4215 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4216 "rbd", "get_snapshot_name",
4217 &snapid, sizeof (snapid),
4219 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4221 snap_name = ERR_PTR(ret);
4226 end = reply_buf + ret;
4227 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4228 if (IS_ERR(snap_name))
4231 dout(" snap_id 0x%016llx snap_name = %s\n",
4232 (unsigned long long)snap_id, snap_name);
4239 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4241 bool first_time = rbd_dev->header.object_prefix == NULL;
4244 down_write(&rbd_dev->header_rwsem);
4247 ret = rbd_dev_v2_header_onetime(rbd_dev);
4253 * If the image supports layering, get the parent info. We
4254 * need to probe the first time regardless. Thereafter we
4255 * only need to if there's a parent, to see if it has
4256 * disappeared due to the mapped image getting flattened.
4258 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4259 (first_time || rbd_dev->parent_spec)) {
4262 ret = rbd_dev_v2_parent_info(rbd_dev);
4267 * Print a warning if this is the initial probe and
4268 * the image has a parent. Don't print it if the
4269 * image now being probed is itself a parent. We
4270 * can tell at this point because we won't know its
4271 * pool name yet (just its pool id).
4273 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4274 if (first_time && warn)
4275 rbd_warn(rbd_dev, "WARNING: kernel layering "
4276 "is EXPERIMENTAL!");
4279 ret = rbd_dev_v2_image_size(rbd_dev);
4283 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4284 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4285 rbd_dev->mapping.size = rbd_dev->header.image_size;
4287 ret = rbd_dev_v2_snap_context(rbd_dev);
4288 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4290 up_write(&rbd_dev->header_rwsem);
4295 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4300 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4302 dev = &rbd_dev->dev;
4303 dev->bus = &rbd_bus_type;
4304 dev->type = &rbd_device_type;
4305 dev->parent = &rbd_root_dev;
4306 dev->release = rbd_dev_device_release;
4307 dev_set_name(dev, "%d", rbd_dev->dev_id);
4308 ret = device_register(dev);
4310 mutex_unlock(&ctl_mutex);
4315 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4317 device_unregister(&rbd_dev->dev);
4320 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4323 * Get a unique rbd identifier for the given new rbd_dev, and add
4324 * the rbd_dev to the global list. The minimum rbd id is 1.
4326 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4328 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4330 spin_lock(&rbd_dev_list_lock);
4331 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4332 spin_unlock(&rbd_dev_list_lock);
4333 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4334 (unsigned long long) rbd_dev->dev_id);
4338 * Remove an rbd_dev from the global list, and record that its
4339 * identifier is no longer in use.
4341 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4343 struct list_head *tmp;
4344 int rbd_id = rbd_dev->dev_id;
4347 rbd_assert(rbd_id > 0);
4349 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4350 (unsigned long long) rbd_dev->dev_id);
4351 spin_lock(&rbd_dev_list_lock);
4352 list_del_init(&rbd_dev->node);
4355 * If the id being "put" is not the current maximum, there
4356 * is nothing special we need to do.
4358 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4359 spin_unlock(&rbd_dev_list_lock);
4364 * We need to update the current maximum id. Search the
4365 * list to find out what it is. We're more likely to find
4366 * the maximum at the end, so search the list backward.
4369 list_for_each_prev(tmp, &rbd_dev_list) {
4370 struct rbd_device *rbd_dev;
4372 rbd_dev = list_entry(tmp, struct rbd_device, node);
4373 if (rbd_dev->dev_id > max_id)
4374 max_id = rbd_dev->dev_id;
4376 spin_unlock(&rbd_dev_list_lock);
4379 * The max id could have been updated by rbd_dev_id_get(), in
4380 * which case it now accurately reflects the new maximum.
4381 * Be careful not to overwrite the maximum value in that
4384 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4385 dout(" max dev id has been reset\n");
4389 * Skips over white space at *buf, and updates *buf to point to the
4390 * first found non-space character (if any). Returns the length of
4391 * the token (string of non-white space characters) found. Note
4392 * that *buf must be terminated with '\0'.
4394 static inline size_t next_token(const char **buf)
4397 * These are the characters that produce nonzero for
4398 * isspace() in the "C" and "POSIX" locales.
4400 const char *spaces = " \f\n\r\t\v";
4402 *buf += strspn(*buf, spaces); /* Find start of token */
4404 return strcspn(*buf, spaces); /* Return token length */
4408 * Finds the next token in *buf, and if the provided token buffer is
4409 * big enough, copies the found token into it. The result, if
4410 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4411 * must be terminated with '\0' on entry.
4413 * Returns the length of the token found (not including the '\0').
4414 * Return value will be 0 if no token is found, and it will be >=
4415 * token_size if the token would not fit.
4417 * The *buf pointer will be updated to point beyond the end of the
4418 * found token. Note that this occurs even if the token buffer is
4419 * too small to hold it.
4421 static inline size_t copy_token(const char **buf,
4427 len = next_token(buf);
4428 if (len < token_size) {
4429 memcpy(token, *buf, len);
4430 *(token + len) = '\0';
4438 * Finds the next token in *buf, dynamically allocates a buffer big
4439 * enough to hold a copy of it, and copies the token into the new
4440 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4441 * that a duplicate buffer is created even for a zero-length token.
4443 * Returns a pointer to the newly-allocated duplicate, or a null
4444 * pointer if memory for the duplicate was not available. If
4445 * the lenp argument is a non-null pointer, the length of the token
4446 * (not including the '\0') is returned in *lenp.
4448 * If successful, the *buf pointer will be updated to point beyond
4449 * the end of the found token.
4451 * Note: uses GFP_KERNEL for allocation.
4453 static inline char *dup_token(const char **buf, size_t *lenp)
4458 len = next_token(buf);
4459 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4462 *(dup + len) = '\0';
4472 * Parse the options provided for an "rbd add" (i.e., rbd image
4473 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4474 * and the data written is passed here via a NUL-terminated buffer.
4475 * Returns 0 if successful or an error code otherwise.
4477 * The information extracted from these options is recorded in
4478 * the other parameters which return dynamically-allocated
4481 * The address of a pointer that will refer to a ceph options
4482 * structure. Caller must release the returned pointer using
4483 * ceph_destroy_options() when it is no longer needed.
4485 * Address of an rbd options pointer. Fully initialized by
4486 * this function; caller must release with kfree().
4488 * Address of an rbd image specification pointer. Fully
4489 * initialized by this function based on parsed options.
4490 * Caller must release with rbd_spec_put().
4492 * The options passed take this form:
4493 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4496 * A comma-separated list of one or more monitor addresses.
4497 * A monitor address is an ip address, optionally followed
4498 * by a port number (separated by a colon).
4499 * I.e.: ip1[:port1][,ip2[:port2]...]
4501 * A comma-separated list of ceph and/or rbd options.
4503 * The name of the rados pool containing the rbd image.
4505 * The name of the image in that pool to map.
4507 * An optional snapshot id. If provided, the mapping will
4508 * present data from the image at the time that snapshot was
4509 * created. The image head is used if no snapshot id is
4510 * provided. Snapshot mappings are always read-only.
4512 static int rbd_add_parse_args(const char *buf,
4513 struct ceph_options **ceph_opts,
4514 struct rbd_options **opts,
4515 struct rbd_spec **rbd_spec)
4519 const char *mon_addrs;
4521 size_t mon_addrs_size;
4522 struct rbd_spec *spec = NULL;
4523 struct rbd_options *rbd_opts = NULL;
4524 struct ceph_options *copts;
4527 /* The first four tokens are required */
4529 len = next_token(&buf);
4531 rbd_warn(NULL, "no monitor address(es) provided");
4535 mon_addrs_size = len + 1;
4539 options = dup_token(&buf, NULL);
4543 rbd_warn(NULL, "no options provided");
4547 spec = rbd_spec_alloc();
4551 spec->pool_name = dup_token(&buf, NULL);
4552 if (!spec->pool_name)
4554 if (!*spec->pool_name) {
4555 rbd_warn(NULL, "no pool name provided");
4559 spec->image_name = dup_token(&buf, NULL);
4560 if (!spec->image_name)
4562 if (!*spec->image_name) {
4563 rbd_warn(NULL, "no image name provided");
4568 * Snapshot name is optional; default is to use "-"
4569 * (indicating the head/no snapshot).
4571 len = next_token(&buf);
4573 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4574 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4575 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4576 ret = -ENAMETOOLONG;
4579 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4582 *(snap_name + len) = '\0';
4583 spec->snap_name = snap_name;
4585 /* Initialize all rbd options to the defaults */
4587 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4591 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4593 copts = ceph_parse_options(options, mon_addrs,
4594 mon_addrs + mon_addrs_size - 1,
4595 parse_rbd_opts_token, rbd_opts);
4596 if (IS_ERR(copts)) {
4597 ret = PTR_ERR(copts);
4618 * An rbd format 2 image has a unique identifier, distinct from the
4619 * name given to it by the user. Internally, that identifier is
4620 * what's used to specify the names of objects related to the image.
4622 * A special "rbd id" object is used to map an rbd image name to its
4623 * id. If that object doesn't exist, then there is no v2 rbd image
4624 * with the supplied name.
4626 * This function will record the given rbd_dev's image_id field if
4627 * it can be determined, and in that case will return 0. If any
4628 * errors occur a negative errno will be returned and the rbd_dev's
4629 * image_id field will be unchanged (and should be NULL).
4631 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4640 * When probing a parent image, the image id is already
4641 * known (and the image name likely is not). There's no
4642 * need to fetch the image id again in this case. We
4643 * do still need to set the image format though.
4645 if (rbd_dev->spec->image_id) {
4646 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4652 * First, see if the format 2 image id file exists, and if
4653 * so, get the image's persistent id from it.
4655 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4656 object_name = kmalloc(size, GFP_NOIO);
4659 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4660 dout("rbd id object name is %s\n", object_name);
4662 /* Response will be an encoded string, which includes a length */
4664 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4665 response = kzalloc(size, GFP_NOIO);
4671 /* If it doesn't exist we'll assume it's a format 1 image */
4673 ret = rbd_obj_method_sync(rbd_dev, object_name,
4674 "rbd", "get_id", NULL, 0,
4675 response, RBD_IMAGE_ID_LEN_MAX);
4676 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4677 if (ret == -ENOENT) {
4678 image_id = kstrdup("", GFP_KERNEL);
4679 ret = image_id ? 0 : -ENOMEM;
4681 rbd_dev->image_format = 1;
4682 } else if (ret > sizeof (__le32)) {
4685 image_id = ceph_extract_encoded_string(&p, p + ret,
4687 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4689 rbd_dev->image_format = 2;
4695 rbd_dev->spec->image_id = image_id;
4696 dout("image_id is %s\n", image_id);
4706 * Undo whatever state changes are made by v1 or v2 header info
4709 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4711 struct rbd_image_header *header;
4713 /* Drop parent reference unless it's already been done (or none) */
4715 if (rbd_dev->parent_overlap)
4716 rbd_dev_parent_put(rbd_dev);
4718 /* Free dynamic fields from the header, then zero it out */
4720 header = &rbd_dev->header;
4721 ceph_put_snap_context(header->snapc);
4722 kfree(header->snap_sizes);
4723 kfree(header->snap_names);
4724 kfree(header->object_prefix);
4725 memset(header, 0, sizeof (*header));
4728 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4732 ret = rbd_dev_v2_object_prefix(rbd_dev);
4737 * Get the and check features for the image. Currently the
4738 * features are assumed to never change.
4740 ret = rbd_dev_v2_features(rbd_dev);
4744 /* If the image supports fancy striping, get its parameters */
4746 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4747 ret = rbd_dev_v2_striping_info(rbd_dev);
4751 /* No support for crypto and compression type format 2 images */
4755 rbd_dev->header.features = 0;
4756 kfree(rbd_dev->header.object_prefix);
4757 rbd_dev->header.object_prefix = NULL;
4762 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4764 struct rbd_device *parent = NULL;
4765 struct rbd_spec *parent_spec;
4766 struct rbd_client *rbdc;
4769 if (!rbd_dev->parent_spec)
4772 * We need to pass a reference to the client and the parent
4773 * spec when creating the parent rbd_dev. Images related by
4774 * parent/child relationships always share both.
4776 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4777 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4780 parent = rbd_dev_create(rbdc, parent_spec);
4784 ret = rbd_dev_image_probe(parent, false);
4787 rbd_dev->parent = parent;
4788 atomic_set(&rbd_dev->parent_ref, 1);
4793 rbd_dev_unparent(rbd_dev);
4794 kfree(rbd_dev->header_name);
4795 rbd_dev_destroy(parent);
4797 rbd_put_client(rbdc);
4798 rbd_spec_put(parent_spec);
4804 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4808 /* generate unique id: find highest unique id, add one */
4809 rbd_dev_id_get(rbd_dev);
4811 /* Fill in the device name, now that we have its id. */
4812 BUILD_BUG_ON(DEV_NAME_LEN
4813 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4814 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4816 /* Get our block major device number. */
4818 ret = register_blkdev(0, rbd_dev->name);
4821 rbd_dev->major = ret;
4823 /* Set up the blkdev mapping. */
4825 ret = rbd_init_disk(rbd_dev);
4827 goto err_out_blkdev;
4829 ret = rbd_dev_mapping_set(rbd_dev);
4832 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4834 ret = rbd_bus_add_dev(rbd_dev);
4836 goto err_out_mapping;
4838 /* Everything's ready. Announce the disk to the world. */
4840 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4841 add_disk(rbd_dev->disk);
4843 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4844 (unsigned long long) rbd_dev->mapping.size);
4849 rbd_dev_mapping_clear(rbd_dev);
4851 rbd_free_disk(rbd_dev);
4853 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4855 rbd_dev_id_put(rbd_dev);
4856 rbd_dev_mapping_clear(rbd_dev);
4861 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4863 struct rbd_spec *spec = rbd_dev->spec;
4866 /* Record the header object name for this rbd image. */
4868 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4870 if (rbd_dev->image_format == 1)
4871 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4873 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4875 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4876 if (!rbd_dev->header_name)
4879 if (rbd_dev->image_format == 1)
4880 sprintf(rbd_dev->header_name, "%s%s",
4881 spec->image_name, RBD_SUFFIX);
4883 sprintf(rbd_dev->header_name, "%s%s",
4884 RBD_HEADER_PREFIX, spec->image_id);
4888 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4890 rbd_dev_unprobe(rbd_dev);
4891 kfree(rbd_dev->header_name);
4892 rbd_dev->header_name = NULL;
4893 rbd_dev->image_format = 0;
4894 kfree(rbd_dev->spec->image_id);
4895 rbd_dev->spec->image_id = NULL;
4897 rbd_dev_destroy(rbd_dev);
4901 * Probe for the existence of the header object for the given rbd
4902 * device. If this image is the one being mapped (i.e., not a
4903 * parent), initiate a watch on its header object before using that
4904 * object to get detailed information about the rbd image.
4906 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4912 * Get the id from the image id object. Unless there's an
4913 * error, rbd_dev->spec->image_id will be filled in with
4914 * a dynamically-allocated string, and rbd_dev->image_format
4915 * will be set to either 1 or 2.
4917 ret = rbd_dev_image_id(rbd_dev);
4920 rbd_assert(rbd_dev->spec->image_id);
4921 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4923 ret = rbd_dev_header_name(rbd_dev);
4925 goto err_out_format;
4928 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4930 goto out_header_name;
4933 if (rbd_dev->image_format == 1)
4934 ret = rbd_dev_v1_header_info(rbd_dev);
4936 ret = rbd_dev_v2_header_info(rbd_dev);
4940 ret = rbd_dev_spec_update(rbd_dev);
4944 ret = rbd_dev_probe_parent(rbd_dev);
4948 dout("discovered format %u image, header name is %s\n",
4949 rbd_dev->image_format, rbd_dev->header_name);
4953 rbd_dev_unprobe(rbd_dev);
4956 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4958 rbd_warn(rbd_dev, "unable to tear down "
4959 "watch request (%d)\n", tmp);
4962 kfree(rbd_dev->header_name);
4963 rbd_dev->header_name = NULL;
4965 rbd_dev->image_format = 0;
4966 kfree(rbd_dev->spec->image_id);
4967 rbd_dev->spec->image_id = NULL;
4969 dout("probe failed, returning %d\n", ret);
4974 static ssize_t rbd_add(struct bus_type *bus,
4978 struct rbd_device *rbd_dev = NULL;
4979 struct ceph_options *ceph_opts = NULL;
4980 struct rbd_options *rbd_opts = NULL;
4981 struct rbd_spec *spec = NULL;
4982 struct rbd_client *rbdc;
4983 struct ceph_osd_client *osdc;
4987 if (!try_module_get(THIS_MODULE))
4990 /* parse add command */
4991 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4993 goto err_out_module;
4994 read_only = rbd_opts->read_only;
4996 rbd_opts = NULL; /* done with this */
4998 rbdc = rbd_get_client(ceph_opts);
5005 osdc = &rbdc->client->osdc;
5006 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5008 goto err_out_client;
5009 spec->pool_id = (u64)rc;
5011 /* The ceph file layout needs to fit pool id in 32 bits */
5013 if (spec->pool_id > (u64)U32_MAX) {
5014 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5015 (unsigned long long)spec->pool_id, U32_MAX);
5017 goto err_out_client;
5020 rbd_dev = rbd_dev_create(rbdc, spec);
5022 goto err_out_client;
5023 rbdc = NULL; /* rbd_dev now owns this */
5024 spec = NULL; /* rbd_dev now owns this */
5026 rc = rbd_dev_image_probe(rbd_dev, true);
5028 goto err_out_rbd_dev;
5030 /* If we are mapping a snapshot it must be marked read-only */
5032 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5034 rbd_dev->mapping.read_only = read_only;
5036 rc = rbd_dev_device_setup(rbd_dev);
5038 rbd_dev_image_release(rbd_dev);
5039 goto err_out_module;
5045 rbd_dev_destroy(rbd_dev);
5047 rbd_put_client(rbdc);
5051 module_put(THIS_MODULE);
5053 dout("Error adding device %s\n", buf);
5058 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5060 struct list_head *tmp;
5061 struct rbd_device *rbd_dev;
5063 spin_lock(&rbd_dev_list_lock);
5064 list_for_each(tmp, &rbd_dev_list) {
5065 rbd_dev = list_entry(tmp, struct rbd_device, node);
5066 if (rbd_dev->dev_id == dev_id) {
5067 spin_unlock(&rbd_dev_list_lock);
5071 spin_unlock(&rbd_dev_list_lock);
5075 static void rbd_dev_device_release(struct device *dev)
5077 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5079 rbd_free_disk(rbd_dev);
5080 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5081 rbd_dev_mapping_clear(rbd_dev);
5082 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5084 rbd_dev_id_put(rbd_dev);
5085 rbd_dev_mapping_clear(rbd_dev);
5088 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5090 while (rbd_dev->parent) {
5091 struct rbd_device *first = rbd_dev;
5092 struct rbd_device *second = first->parent;
5093 struct rbd_device *third;
5096 * Follow to the parent with no grandparent and
5099 while (second && (third = second->parent)) {
5104 rbd_dev_image_release(second);
5105 first->parent = NULL;
5106 first->parent_overlap = 0;
5108 rbd_assert(first->parent_spec);
5109 rbd_spec_put(first->parent_spec);
5110 first->parent_spec = NULL;
5114 static ssize_t rbd_remove(struct bus_type *bus,
5118 struct rbd_device *rbd_dev = NULL;
5123 ret = strict_strtoul(buf, 10, &ul);
5127 /* convert to int; abort if we lost anything in the conversion */
5128 target_id = (int) ul;
5129 if (target_id != ul)
5132 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5134 rbd_dev = __rbd_get_dev(target_id);
5140 spin_lock_irq(&rbd_dev->lock);
5141 if (rbd_dev->open_count)
5144 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5145 spin_unlock_irq(&rbd_dev->lock);
5148 rbd_bus_del_dev(rbd_dev);
5149 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5151 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5152 rbd_dev_image_release(rbd_dev);
5153 module_put(THIS_MODULE);
5156 mutex_unlock(&ctl_mutex);
5162 * create control files in sysfs
5165 static int rbd_sysfs_init(void)
5169 ret = device_register(&rbd_root_dev);
5173 ret = bus_register(&rbd_bus_type);
5175 device_unregister(&rbd_root_dev);
5180 static void rbd_sysfs_cleanup(void)
5182 bus_unregister(&rbd_bus_type);
5183 device_unregister(&rbd_root_dev);
5186 static int rbd_slab_init(void)
5188 rbd_assert(!rbd_img_request_cache);
5189 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5190 sizeof (struct rbd_img_request),
5191 __alignof__(struct rbd_img_request),
5193 if (!rbd_img_request_cache)
5196 rbd_assert(!rbd_obj_request_cache);
5197 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5198 sizeof (struct rbd_obj_request),
5199 __alignof__(struct rbd_obj_request),
5201 if (!rbd_obj_request_cache)
5204 rbd_assert(!rbd_segment_name_cache);
5205 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5206 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5207 if (rbd_segment_name_cache)
5210 if (rbd_obj_request_cache) {
5211 kmem_cache_destroy(rbd_obj_request_cache);
5212 rbd_obj_request_cache = NULL;
5215 kmem_cache_destroy(rbd_img_request_cache);
5216 rbd_img_request_cache = NULL;
5221 static void rbd_slab_exit(void)
5223 rbd_assert(rbd_segment_name_cache);
5224 kmem_cache_destroy(rbd_segment_name_cache);
5225 rbd_segment_name_cache = NULL;
5227 rbd_assert(rbd_obj_request_cache);
5228 kmem_cache_destroy(rbd_obj_request_cache);
5229 rbd_obj_request_cache = NULL;
5231 rbd_assert(rbd_img_request_cache);
5232 kmem_cache_destroy(rbd_img_request_cache);
5233 rbd_img_request_cache = NULL;
5236 static int __init rbd_init(void)
5240 if (!libceph_compatible(NULL)) {
5241 rbd_warn(NULL, "libceph incompatibility (quitting)");
5245 rc = rbd_slab_init();
5248 rc = rbd_sysfs_init();
5252 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5257 static void __exit rbd_exit(void)
5259 rbd_sysfs_cleanup();
5263 module_init(rbd_init);
5264 module_exit(rbd_exit);
5266 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5267 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5268 MODULE_DESCRIPTION("rados block device");
5270 /* following authorship retained from original osdblk.c */
5271 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5273 MODULE_LICENSE("GPL");