2 rbd.c -- Export ceph rados objects as a Linux block device
5 based on drivers/block/osdblk.c:
7 Copyright 2009 Red Hat, Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 For usage instructions, please refer to:
26 Documentation/ABI/testing/sysfs-bus-rbd
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "rbd_types.h"
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55 #define RBD_DRV_NAME "rbd"
56 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
58 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
60 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
61 #define RBD_MAX_SNAP_NAME_LEN \
62 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
64 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
66 #define RBD_SNAP_HEAD_NAME "-"
68 /* This allows a single page to hold an image name sent by OSD */
69 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
70 #define RBD_IMAGE_ID_LEN_MAX 64
72 #define RBD_OBJ_PREFIX_LEN_MAX 64
76 #define RBD_FEATURE_LAYERING 1
78 /* Features supported by this (client software) implementation. */
80 #define RBD_FEATURES_ALL (0)
83 * An RBD device name will be "rbd#", where the "rbd" comes from
84 * RBD_DRV_NAME above, and # is a unique integer identifier.
85 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
86 * enough to hold all possible device names.
88 #define DEV_NAME_LEN 32
89 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
92 * block device image metadata (in-memory version)
94 struct rbd_image_header {
95 /* These four fields never change for a given rbd image */
102 /* The remaining fields need to be updated occasionally */
104 struct ceph_snap_context *snapc;
112 * An rbd image specification.
114 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
115 * identify an image. Each rbd_dev structure includes a pointer to
116 * an rbd_spec structure that encapsulates this identity.
118 * Each of the id's in an rbd_spec has an associated name. For a
119 * user-mapped image, the names are supplied and the id's associated
120 * with them are looked up. For a layered image, a parent image is
121 * defined by the tuple, and the names are looked up.
123 * An rbd_dev structure contains a parent_spec pointer which is
124 * non-null if the image it represents is a child in a layered
125 * image. This pointer will refer to the rbd_spec structure used
126 * by the parent rbd_dev for its own identity (i.e., the structure
127 * is shared between the parent and child).
129 * Since these structures are populated once, during the discovery
130 * phase of image construction, they are effectively immutable so
131 * we make no effort to synchronize access to them.
133 * Note that code herein does not assume the image name is known (it
134 * could be a null pointer).
150 * an instance of the client. multiple devices may share an rbd client.
153 struct ceph_client *client;
155 struct list_head node;
158 struct rbd_img_request;
159 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
161 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
163 struct rbd_obj_request;
164 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
166 enum obj_request_type {
167 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
170 struct rbd_obj_request {
171 const char *object_name;
172 u64 offset; /* object start byte */
173 u64 length; /* bytes from offset */
175 struct rbd_img_request *img_request;
176 struct list_head links; /* img_request->obj_requests */
177 u32 which; /* posn image request list */
179 enum obj_request_type type;
181 struct bio *bio_list;
188 struct ceph_osd_request *osd_req;
190 u64 xferred; /* bytes transferred */
195 rbd_obj_callback_t callback;
196 struct completion completion;
201 struct rbd_img_request {
203 struct rbd_device *rbd_dev;
204 u64 offset; /* starting image byte offset */
205 u64 length; /* byte count from offset */
206 bool write_request; /* false for read */
208 struct ceph_snap_context *snapc; /* for writes */
209 u64 snap_id; /* for reads */
211 spinlock_t completion_lock;/* protects next_completion */
213 rbd_img_callback_t callback;
215 u32 obj_request_count;
216 struct list_head obj_requests; /* rbd_obj_request structs */
221 #define for_each_obj_request(ireq, oreq) \
222 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
223 #define for_each_obj_request_from(ireq, oreq) \
224 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
225 #define for_each_obj_request_safe(ireq, oreq, n) \
226 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
232 struct list_head node;
247 int dev_id; /* blkdev unique id */
249 int major; /* blkdev assigned major */
250 struct gendisk *disk; /* blkdev's gendisk and rq */
252 u32 image_format; /* Either 1 or 2 */
253 struct rbd_client *rbd_client;
255 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
257 spinlock_t lock; /* queue, flags, open_count */
259 struct rbd_image_header header;
260 unsigned long flags; /* possibly lock protected */
261 struct rbd_spec *spec;
265 struct ceph_file_layout layout;
267 struct ceph_osd_event *watch_event;
268 struct rbd_obj_request *watch_request;
270 struct rbd_spec *parent_spec;
273 /* protects updating the header */
274 struct rw_semaphore header_rwsem;
276 struct rbd_mapping mapping;
278 struct list_head node;
280 /* list of snapshots */
281 struct list_head snaps;
285 unsigned long open_count; /* protected by lock */
289 * Flag bits for rbd_dev->flags. If atomicity is required,
290 * rbd_dev->lock is used to protect access.
292 * Currently, only the "removing" flag (which is coupled with the
293 * "open_count" field) requires atomic access.
296 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
297 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
300 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
302 static LIST_HEAD(rbd_dev_list); /* devices */
303 static DEFINE_SPINLOCK(rbd_dev_list_lock);
305 static LIST_HEAD(rbd_client_list); /* clients */
306 static DEFINE_SPINLOCK(rbd_client_list_lock);
308 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
309 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
311 static void rbd_dev_release(struct device *dev);
312 static void rbd_remove_snap_dev(struct rbd_snap *snap);
314 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
316 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
319 static struct bus_attribute rbd_bus_attrs[] = {
320 __ATTR(add, S_IWUSR, NULL, rbd_add),
321 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
325 static struct bus_type rbd_bus_type = {
327 .bus_attrs = rbd_bus_attrs,
330 static void rbd_root_dev_release(struct device *dev)
334 static struct device rbd_root_dev = {
336 .release = rbd_root_dev_release,
339 static __printf(2, 3)
340 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
342 struct va_format vaf;
350 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
351 else if (rbd_dev->disk)
352 printk(KERN_WARNING "%s: %s: %pV\n",
353 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
354 else if (rbd_dev->spec && rbd_dev->spec->image_name)
355 printk(KERN_WARNING "%s: image %s: %pV\n",
356 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
357 else if (rbd_dev->spec && rbd_dev->spec->image_id)
358 printk(KERN_WARNING "%s: id %s: %pV\n",
359 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
361 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
362 RBD_DRV_NAME, rbd_dev, &vaf);
367 #define rbd_assert(expr) \
368 if (unlikely(!(expr))) { \
369 printk(KERN_ERR "\nAssertion failure in %s() " \
371 "\trbd_assert(%s);\n\n", \
372 __func__, __LINE__, #expr); \
375 #else /* !RBD_DEBUG */
376 # define rbd_assert(expr) ((void) 0)
377 #endif /* !RBD_DEBUG */
379 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
380 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
382 static int rbd_open(struct block_device *bdev, fmode_t mode)
384 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
385 bool removing = false;
387 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
390 spin_lock_irq(&rbd_dev->lock);
391 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
394 rbd_dev->open_count++;
395 spin_unlock_irq(&rbd_dev->lock);
399 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
400 (void) get_device(&rbd_dev->dev);
401 set_device_ro(bdev, rbd_dev->mapping.read_only);
402 mutex_unlock(&ctl_mutex);
407 static int rbd_release(struct gendisk *disk, fmode_t mode)
409 struct rbd_device *rbd_dev = disk->private_data;
410 unsigned long open_count_before;
412 spin_lock_irq(&rbd_dev->lock);
413 open_count_before = rbd_dev->open_count--;
414 spin_unlock_irq(&rbd_dev->lock);
415 rbd_assert(open_count_before > 0);
417 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
418 put_device(&rbd_dev->dev);
419 mutex_unlock(&ctl_mutex);
424 static const struct block_device_operations rbd_bd_ops = {
425 .owner = THIS_MODULE,
427 .release = rbd_release,
431 * Initialize an rbd client instance.
434 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
436 struct rbd_client *rbdc;
439 dout("%s:\n", __func__);
440 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
444 kref_init(&rbdc->kref);
445 INIT_LIST_HEAD(&rbdc->node);
447 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
449 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
450 if (IS_ERR(rbdc->client))
452 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
454 ret = ceph_open_session(rbdc->client);
458 spin_lock(&rbd_client_list_lock);
459 list_add_tail(&rbdc->node, &rbd_client_list);
460 spin_unlock(&rbd_client_list_lock);
462 mutex_unlock(&ctl_mutex);
463 dout("%s: rbdc %p\n", __func__, rbdc);
468 ceph_destroy_client(rbdc->client);
470 mutex_unlock(&ctl_mutex);
474 ceph_destroy_options(ceph_opts);
475 dout("%s: error %d\n", __func__, ret);
481 * Find a ceph client with specific addr and configuration. If
482 * found, bump its reference count.
484 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
486 struct rbd_client *client_node;
489 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
492 spin_lock(&rbd_client_list_lock);
493 list_for_each_entry(client_node, &rbd_client_list, node) {
494 if (!ceph_compare_options(ceph_opts, client_node->client)) {
495 kref_get(&client_node->kref);
500 spin_unlock(&rbd_client_list_lock);
502 return found ? client_node : NULL;
512 /* string args above */
515 /* Boolean args above */
519 static match_table_t rbd_opts_tokens = {
521 /* string args above */
522 {Opt_read_only, "read_only"},
523 {Opt_read_only, "ro"}, /* Alternate spelling */
524 {Opt_read_write, "read_write"},
525 {Opt_read_write, "rw"}, /* Alternate spelling */
526 /* Boolean args above */
534 #define RBD_READ_ONLY_DEFAULT false
536 static int parse_rbd_opts_token(char *c, void *private)
538 struct rbd_options *rbd_opts = private;
539 substring_t argstr[MAX_OPT_ARGS];
540 int token, intval, ret;
542 token = match_token(c, rbd_opts_tokens, argstr);
546 if (token < Opt_last_int) {
547 ret = match_int(&argstr[0], &intval);
549 pr_err("bad mount option arg (not int) "
553 dout("got int token %d val %d\n", token, intval);
554 } else if (token > Opt_last_int && token < Opt_last_string) {
555 dout("got string token %d val %s\n", token,
557 } else if (token > Opt_last_string && token < Opt_last_bool) {
558 dout("got Boolean token %d\n", token);
560 dout("got token %d\n", token);
565 rbd_opts->read_only = true;
568 rbd_opts->read_only = false;
578 * Get a ceph client with specific addr and configuration, if one does
579 * not exist create it.
581 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
583 struct rbd_client *rbdc;
585 rbdc = rbd_client_find(ceph_opts);
586 if (rbdc) /* using an existing client */
587 ceph_destroy_options(ceph_opts);
589 rbdc = rbd_client_create(ceph_opts);
595 * Destroy ceph client
597 * Caller must hold rbd_client_list_lock.
599 static void rbd_client_release(struct kref *kref)
601 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
603 dout("%s: rbdc %p\n", __func__, rbdc);
604 spin_lock(&rbd_client_list_lock);
605 list_del(&rbdc->node);
606 spin_unlock(&rbd_client_list_lock);
608 ceph_destroy_client(rbdc->client);
613 * Drop reference to ceph client node. If it's not referenced anymore, release
616 static void rbd_put_client(struct rbd_client *rbdc)
619 kref_put(&rbdc->kref, rbd_client_release);
622 static bool rbd_image_format_valid(u32 image_format)
624 return image_format == 1 || image_format == 2;
627 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
632 /* The header has to start with the magic rbd header text */
633 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
636 /* The bio layer requires at least sector-sized I/O */
638 if (ondisk->options.order < SECTOR_SHIFT)
641 /* If we use u64 in a few spots we may be able to loosen this */
643 if (ondisk->options.order > 8 * sizeof (int) - 1)
647 * The size of a snapshot header has to fit in a size_t, and
648 * that limits the number of snapshots.
650 snap_count = le32_to_cpu(ondisk->snap_count);
651 size = SIZE_MAX - sizeof (struct ceph_snap_context);
652 if (snap_count > size / sizeof (__le64))
656 * Not only that, but the size of the entire the snapshot
657 * header must also be representable in a size_t.
659 size -= snap_count * sizeof (__le64);
660 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
667 * Create a new header structure, translate header format from the on-disk
670 static int rbd_header_from_disk(struct rbd_image_header *header,
671 struct rbd_image_header_ondisk *ondisk)
678 memset(header, 0, sizeof (*header));
680 snap_count = le32_to_cpu(ondisk->snap_count);
682 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
683 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
684 if (!header->object_prefix)
686 memcpy(header->object_prefix, ondisk->object_prefix, len);
687 header->object_prefix[len] = '\0';
690 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
692 /* Save a copy of the snapshot names */
694 if (snap_names_len > (u64) SIZE_MAX)
696 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
697 if (!header->snap_names)
700 * Note that rbd_dev_v1_header_read() guarantees
701 * the ondisk buffer we're working with has
702 * snap_names_len bytes beyond the end of the
703 * snapshot id array, this memcpy() is safe.
705 memcpy(header->snap_names, &ondisk->snaps[snap_count],
708 /* Record each snapshot's size */
710 size = snap_count * sizeof (*header->snap_sizes);
711 header->snap_sizes = kmalloc(size, GFP_KERNEL);
712 if (!header->snap_sizes)
714 for (i = 0; i < snap_count; i++)
715 header->snap_sizes[i] =
716 le64_to_cpu(ondisk->snaps[i].image_size);
718 WARN_ON(ondisk->snap_names_len);
719 header->snap_names = NULL;
720 header->snap_sizes = NULL;
723 header->features = 0; /* No features support in v1 images */
724 header->obj_order = ondisk->options.order;
725 header->crypt_type = ondisk->options.crypt_type;
726 header->comp_type = ondisk->options.comp_type;
728 /* Allocate and fill in the snapshot context */
730 header->image_size = le64_to_cpu(ondisk->image_size);
731 size = sizeof (struct ceph_snap_context);
732 size += snap_count * sizeof (header->snapc->snaps[0]);
733 header->snapc = kzalloc(size, GFP_KERNEL);
737 atomic_set(&header->snapc->nref, 1);
738 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
739 header->snapc->num_snaps = snap_count;
740 for (i = 0; i < snap_count; i++)
741 header->snapc->snaps[i] =
742 le64_to_cpu(ondisk->snaps[i].id);
747 kfree(header->snap_sizes);
748 header->snap_sizes = NULL;
749 kfree(header->snap_names);
750 header->snap_names = NULL;
751 kfree(header->object_prefix);
752 header->object_prefix = NULL;
757 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
759 struct rbd_snap *snap;
761 if (snap_id == CEPH_NOSNAP)
762 return RBD_SNAP_HEAD_NAME;
764 list_for_each_entry(snap, &rbd_dev->snaps, node)
765 if (snap_id == snap->id)
771 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
774 struct rbd_snap *snap;
776 list_for_each_entry(snap, &rbd_dev->snaps, node) {
777 if (!strcmp(snap_name, snap->name)) {
778 rbd_dev->spec->snap_id = snap->id;
779 rbd_dev->mapping.size = snap->size;
780 rbd_dev->mapping.features = snap->features;
789 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
793 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
794 sizeof (RBD_SNAP_HEAD_NAME))) {
795 rbd_dev->spec->snap_id = CEPH_NOSNAP;
796 rbd_dev->mapping.size = rbd_dev->header.image_size;
797 rbd_dev->mapping.features = rbd_dev->header.features;
800 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
803 rbd_dev->mapping.read_only = true;
805 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811 static void rbd_header_free(struct rbd_image_header *header)
813 kfree(header->object_prefix);
814 header->object_prefix = NULL;
815 kfree(header->snap_sizes);
816 header->snap_sizes = NULL;
817 kfree(header->snap_names);
818 header->snap_names = NULL;
819 ceph_put_snap_context(header->snapc);
820 header->snapc = NULL;
823 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
829 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
832 segment = offset >> rbd_dev->header.obj_order;
833 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
834 rbd_dev->header.object_prefix, segment);
835 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
836 pr_err("error formatting segment name for #%llu (%d)\n",
845 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
847 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
849 return offset & (segment_size - 1);
852 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
853 u64 offset, u64 length)
855 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
857 offset &= segment_size - 1;
859 rbd_assert(length <= U64_MAX - offset);
860 if (offset + length > segment_size)
861 length = segment_size - offset;
867 * returns the size of an object in the image
869 static u64 rbd_obj_bytes(struct rbd_image_header *header)
871 return 1 << header->obj_order;
878 static void bio_chain_put(struct bio *chain)
884 chain = chain->bi_next;
890 * zeros a bio chain, starting at specific offset
892 static void zero_bio_chain(struct bio *chain, int start_ofs)
901 bio_for_each_segment(bv, chain, i) {
902 if (pos + bv->bv_len > start_ofs) {
903 int remainder = max(start_ofs - pos, 0);
904 buf = bvec_kmap_irq(bv, &flags);
905 memset(buf + remainder, 0,
906 bv->bv_len - remainder);
907 bvec_kunmap_irq(buf, &flags);
912 chain = chain->bi_next;
917 * Clone a portion of a bio, starting at the given byte offset
918 * and continuing for the number of bytes indicated.
920 static struct bio *bio_clone_range(struct bio *bio_src,
929 unsigned short end_idx;
933 /* Handle the easy case for the caller */
935 if (!offset && len == bio_src->bi_size)
936 return bio_clone(bio_src, gfpmask);
938 if (WARN_ON_ONCE(!len))
940 if (WARN_ON_ONCE(len > bio_src->bi_size))
942 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
945 /* Find first affected segment... */
948 __bio_for_each_segment(bv, bio_src, idx, 0) {
949 if (resid < bv->bv_len)
955 /* ...and the last affected segment */
958 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
959 if (resid <= bv->bv_len)
963 vcnt = end_idx - idx + 1;
965 /* Build the clone */
967 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
969 return NULL; /* ENOMEM */
971 bio->bi_bdev = bio_src->bi_bdev;
972 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
973 bio->bi_rw = bio_src->bi_rw;
974 bio->bi_flags |= 1 << BIO_CLONED;
977 * Copy over our part of the bio_vec, then update the first
978 * and last (or only) entries.
980 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
981 vcnt * sizeof (struct bio_vec));
982 bio->bi_io_vec[0].bv_offset += voff;
984 bio->bi_io_vec[0].bv_len -= voff;
985 bio->bi_io_vec[vcnt - 1].bv_len = resid;
987 bio->bi_io_vec[0].bv_len = len;
998 * Clone a portion of a bio chain, starting at the given byte offset
999 * into the first bio in the source chain and continuing for the
1000 * number of bytes indicated. The result is another bio chain of
1001 * exactly the given length, or a null pointer on error.
1003 * The bio_src and offset parameters are both in-out. On entry they
1004 * refer to the first source bio and the offset into that bio where
1005 * the start of data to be cloned is located.
1007 * On return, bio_src is updated to refer to the bio in the source
1008 * chain that contains first un-cloned byte, and *offset will
1009 * contain the offset of that byte within that bio.
1011 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1012 unsigned int *offset,
1016 struct bio *bi = *bio_src;
1017 unsigned int off = *offset;
1018 struct bio *chain = NULL;
1021 /* Build up a chain of clone bios up to the limit */
1023 if (!bi || off >= bi->bi_size || !len)
1024 return NULL; /* Nothing to clone */
1028 unsigned int bi_size;
1032 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1033 goto out_err; /* EINVAL; ran out of bio's */
1035 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1036 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1038 goto out_err; /* ENOMEM */
1041 end = &bio->bi_next;
1044 if (off == bi->bi_size) {
1055 bio_chain_put(chain);
1060 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1062 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1063 atomic_read(&obj_request->kref.refcount));
1064 kref_get(&obj_request->kref);
1067 static void rbd_obj_request_destroy(struct kref *kref);
1068 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1070 rbd_assert(obj_request != NULL);
1071 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1072 atomic_read(&obj_request->kref.refcount));
1073 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1076 static void rbd_img_request_get(struct rbd_img_request *img_request)
1078 dout("%s: img %p (was %d)\n", __func__, img_request,
1079 atomic_read(&img_request->kref.refcount));
1080 kref_get(&img_request->kref);
1083 static void rbd_img_request_destroy(struct kref *kref);
1084 static void rbd_img_request_put(struct rbd_img_request *img_request)
1086 rbd_assert(img_request != NULL);
1087 dout("%s: img %p (was %d)\n", __func__, img_request,
1088 atomic_read(&img_request->kref.refcount));
1089 kref_put(&img_request->kref, rbd_img_request_destroy);
1092 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1093 struct rbd_obj_request *obj_request)
1095 rbd_assert(obj_request->img_request == NULL);
1097 rbd_obj_request_get(obj_request);
1098 obj_request->img_request = img_request;
1099 obj_request->which = img_request->obj_request_count;
1100 rbd_assert(obj_request->which != BAD_WHICH);
1101 img_request->obj_request_count++;
1102 list_add_tail(&obj_request->links, &img_request->obj_requests);
1103 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1104 obj_request->which);
1107 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1108 struct rbd_obj_request *obj_request)
1110 rbd_assert(obj_request->which != BAD_WHICH);
1112 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1113 obj_request->which);
1114 list_del(&obj_request->links);
1115 rbd_assert(img_request->obj_request_count > 0);
1116 img_request->obj_request_count--;
1117 rbd_assert(obj_request->which == img_request->obj_request_count);
1118 obj_request->which = BAD_WHICH;
1119 rbd_assert(obj_request->img_request == img_request);
1120 obj_request->img_request = NULL;
1121 obj_request->callback = NULL;
1122 rbd_obj_request_put(obj_request);
1125 static bool obj_request_type_valid(enum obj_request_type type)
1128 case OBJ_REQUEST_NODATA:
1129 case OBJ_REQUEST_BIO:
1130 case OBJ_REQUEST_PAGES:
1137 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1138 struct rbd_obj_request *obj_request)
1140 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1142 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1145 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1147 dout("%s: img %p\n", __func__, img_request);
1148 if (img_request->callback)
1149 img_request->callback(img_request);
1151 rbd_img_request_put(img_request);
1154 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1156 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1158 dout("%s: obj %p\n", __func__, obj_request);
1160 return wait_for_completion_interruptible(&obj_request->completion);
1163 static void obj_request_done_init(struct rbd_obj_request *obj_request)
1165 atomic_set(&obj_request->done, 0);
1169 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1173 done = atomic_inc_return(&obj_request->done);
1175 struct rbd_img_request *img_request = obj_request->img_request;
1176 struct rbd_device *rbd_dev;
1178 rbd_dev = img_request ? img_request->rbd_dev : NULL;
1179 rbd_warn(rbd_dev, "obj_request %p was already done\n",
1184 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1187 return atomic_read(&obj_request->done) != 0;
1191 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1193 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1194 obj_request, obj_request->img_request, obj_request->result,
1195 obj_request->xferred, obj_request->length);
1197 * ENOENT means a hole in the image. We zero-fill the
1198 * entire length of the request. A short read also implies
1199 * zero-fill to the end of the request. Either way we
1200 * update the xferred count to indicate the whole request
1203 BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1204 if (obj_request->result == -ENOENT) {
1205 zero_bio_chain(obj_request->bio_list, 0);
1206 obj_request->result = 0;
1207 obj_request->xferred = obj_request->length;
1208 } else if (obj_request->xferred < obj_request->length &&
1209 !obj_request->result) {
1210 zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1211 obj_request->xferred = obj_request->length;
1213 obj_request_done_set(obj_request);
1216 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1218 dout("%s: obj %p cb %p\n", __func__, obj_request,
1219 obj_request->callback);
1220 if (obj_request->callback)
1221 obj_request->callback(obj_request);
1223 complete_all(&obj_request->completion);
1226 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1228 dout("%s: obj %p\n", __func__, obj_request);
1229 obj_request_done_set(obj_request);
1232 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1234 dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1235 obj_request->result, obj_request->xferred, obj_request->length);
1236 if (obj_request->img_request)
1237 rbd_img_obj_request_read_callback(obj_request);
1239 obj_request_done_set(obj_request);
1242 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1244 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1245 obj_request->result, obj_request->length);
1247 * There is no such thing as a successful short write.
1248 * Our xferred value is the number of bytes transferred
1249 * back. Set it to our originally-requested length.
1251 obj_request->xferred = obj_request->length;
1252 obj_request_done_set(obj_request);
1256 * For a simple stat call there's nothing to do. We'll do more if
1257 * this is part of a write sequence for a layered image.
1259 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1261 dout("%s: obj %p\n", __func__, obj_request);
1262 obj_request_done_set(obj_request);
1265 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1266 struct ceph_msg *msg)
1268 struct rbd_obj_request *obj_request = osd_req->r_priv;
1271 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1272 rbd_assert(osd_req == obj_request->osd_req);
1273 rbd_assert(!!obj_request->img_request ^
1274 (obj_request->which == BAD_WHICH));
1276 if (osd_req->r_result < 0)
1277 obj_request->result = osd_req->r_result;
1278 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1280 WARN_ON(osd_req->r_num_ops != 1); /* For now */
1283 * We support a 64-bit length, but ultimately it has to be
1284 * passed to blk_end_request(), which takes an unsigned int.
1286 obj_request->xferred = osd_req->r_reply_op_len[0];
1287 rbd_assert(obj_request->xferred < (u64) UINT_MAX);
1288 opcode = osd_req->r_ops[0].op;
1290 case CEPH_OSD_OP_READ:
1291 rbd_osd_read_callback(obj_request);
1293 case CEPH_OSD_OP_WRITE:
1294 rbd_osd_write_callback(obj_request);
1296 case CEPH_OSD_OP_STAT:
1297 rbd_osd_stat_callback(obj_request);
1299 case CEPH_OSD_OP_CALL:
1300 case CEPH_OSD_OP_NOTIFY_ACK:
1301 case CEPH_OSD_OP_WATCH:
1302 rbd_osd_trivial_callback(obj_request);
1305 rbd_warn(NULL, "%s: unsupported op %hu\n",
1306 obj_request->object_name, (unsigned short) opcode);
1310 if (obj_request_done_test(obj_request))
1311 rbd_obj_request_complete(obj_request);
1314 static void rbd_osd_req_format(struct rbd_obj_request *obj_request,
1317 struct rbd_img_request *img_request = obj_request->img_request;
1318 struct ceph_osd_request *osd_req = obj_request->osd_req;
1319 struct ceph_snap_context *snapc = NULL;
1320 u64 snap_id = CEPH_NOSNAP;
1321 struct timespec *mtime = NULL;
1322 struct timespec now;
1324 rbd_assert(osd_req != NULL);
1326 if (write_request) {
1330 snapc = img_request->snapc;
1331 } else if (img_request) {
1332 snap_id = img_request->snap_id;
1334 ceph_osdc_build_request(osd_req, obj_request->offset,
1335 snapc, snap_id, mtime);
1338 static struct ceph_osd_request *rbd_osd_req_create(
1339 struct rbd_device *rbd_dev,
1341 struct rbd_obj_request *obj_request)
1343 struct rbd_img_request *img_request = obj_request->img_request;
1344 struct ceph_snap_context *snapc = NULL;
1345 struct ceph_osd_client *osdc;
1346 struct ceph_osd_request *osd_req;
1349 rbd_assert(img_request->write_request == write_request);
1350 if (img_request->write_request)
1351 snapc = img_request->snapc;
1354 /* Allocate and initialize the request, for the single op */
1356 osdc = &rbd_dev->rbd_client->client->osdc;
1357 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1359 return NULL; /* ENOMEM */
1362 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1364 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1366 osd_req->r_callback = rbd_osd_req_callback;
1367 osd_req->r_priv = obj_request;
1369 osd_req->r_oid_len = strlen(obj_request->object_name);
1370 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1371 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1373 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1378 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1380 ceph_osdc_put_request(osd_req);
1383 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1385 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1386 u64 offset, u64 length,
1387 enum obj_request_type type)
1389 struct rbd_obj_request *obj_request;
1393 rbd_assert(obj_request_type_valid(type));
1395 size = strlen(object_name) + 1;
1396 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1400 name = (char *)(obj_request + 1);
1401 obj_request->object_name = memcpy(name, object_name, size);
1402 obj_request->offset = offset;
1403 obj_request->length = length;
1404 obj_request->which = BAD_WHICH;
1405 obj_request->type = type;
1406 INIT_LIST_HEAD(&obj_request->links);
1407 obj_request_done_init(obj_request);
1408 init_completion(&obj_request->completion);
1409 kref_init(&obj_request->kref);
1411 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1412 offset, length, (int)type, obj_request);
1417 static void rbd_obj_request_destroy(struct kref *kref)
1419 struct rbd_obj_request *obj_request;
1421 obj_request = container_of(kref, struct rbd_obj_request, kref);
1423 dout("%s: obj %p\n", __func__, obj_request);
1425 rbd_assert(obj_request->img_request == NULL);
1426 rbd_assert(obj_request->which == BAD_WHICH);
1428 if (obj_request->osd_req)
1429 rbd_osd_req_destroy(obj_request->osd_req);
1431 rbd_assert(obj_request_type_valid(obj_request->type));
1432 switch (obj_request->type) {
1433 case OBJ_REQUEST_NODATA:
1434 break; /* Nothing to do */
1435 case OBJ_REQUEST_BIO:
1436 if (obj_request->bio_list)
1437 bio_chain_put(obj_request->bio_list);
1439 case OBJ_REQUEST_PAGES:
1440 if (obj_request->pages)
1441 ceph_release_page_vector(obj_request->pages,
1442 obj_request->page_count);
1450 * Caller is responsible for filling in the list of object requests
1451 * that comprises the image request, and the Linux request pointer
1452 * (if there is one).
1454 static struct rbd_img_request *rbd_img_request_create(
1455 struct rbd_device *rbd_dev,
1456 u64 offset, u64 length,
1459 struct rbd_img_request *img_request;
1460 struct ceph_snap_context *snapc = NULL;
1462 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1466 if (write_request) {
1467 down_read(&rbd_dev->header_rwsem);
1468 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1469 up_read(&rbd_dev->header_rwsem);
1470 if (WARN_ON(!snapc)) {
1472 return NULL; /* Shouldn't happen */
1476 img_request->rq = NULL;
1477 img_request->rbd_dev = rbd_dev;
1478 img_request->offset = offset;
1479 img_request->length = length;
1480 img_request->write_request = write_request;
1482 img_request->snapc = snapc;
1484 img_request->snap_id = rbd_dev->spec->snap_id;
1485 spin_lock_init(&img_request->completion_lock);
1486 img_request->next_completion = 0;
1487 img_request->callback = NULL;
1488 img_request->obj_request_count = 0;
1489 INIT_LIST_HEAD(&img_request->obj_requests);
1490 kref_init(&img_request->kref);
1492 rbd_img_request_get(img_request); /* Avoid a warning */
1493 rbd_img_request_put(img_request); /* TEMPORARY */
1495 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1496 write_request ? "write" : "read", offset, length,
1502 static void rbd_img_request_destroy(struct kref *kref)
1504 struct rbd_img_request *img_request;
1505 struct rbd_obj_request *obj_request;
1506 struct rbd_obj_request *next_obj_request;
1508 img_request = container_of(kref, struct rbd_img_request, kref);
1510 dout("%s: img %p\n", __func__, img_request);
1512 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1513 rbd_img_obj_request_del(img_request, obj_request);
1514 rbd_assert(img_request->obj_request_count == 0);
1516 if (img_request->write_request)
1517 ceph_put_snap_context(img_request->snapc);
1522 static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1523 struct bio *bio_list)
1525 struct rbd_device *rbd_dev = img_request->rbd_dev;
1526 struct rbd_obj_request *obj_request = NULL;
1527 struct rbd_obj_request *next_obj_request;
1528 bool write_request = img_request->write_request;
1529 unsigned int bio_offset;
1534 dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
1536 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
1538 image_offset = img_request->offset;
1539 rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1540 resid = img_request->length;
1541 rbd_assert(resid > 0);
1543 struct ceph_osd_request *osd_req;
1544 struct ceph_osd_data *osd_data;
1545 const char *object_name;
1546 unsigned int clone_size;
1550 object_name = rbd_segment_name(rbd_dev, image_offset);
1553 offset = rbd_segment_offset(rbd_dev, image_offset);
1554 length = rbd_segment_length(rbd_dev, image_offset, resid);
1555 obj_request = rbd_obj_request_create(object_name,
1558 kfree(object_name); /* object request has its own copy */
1562 rbd_assert(length <= (u64) UINT_MAX);
1563 clone_size = (unsigned int) length;
1564 obj_request->bio_list = bio_chain_clone_range(&bio_list,
1565 &bio_offset, clone_size,
1567 if (!obj_request->bio_list)
1570 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1574 obj_request->osd_req = osd_req;
1576 osd_data = write_request ? &osd_req->r_data_out
1577 : &osd_req->r_data_in;
1578 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1580 ceph_osd_data_bio_init(osd_data, obj_request->bio_list,
1581 obj_request->length);
1582 osd_req_op_extent_osd_data(osd_req, 0, osd_data);
1583 rbd_osd_req_format(obj_request, write_request);
1585 /* status and version are initially zero-filled */
1587 rbd_img_obj_request_add(img_request, obj_request);
1589 image_offset += length;
1596 rbd_obj_request_put(obj_request);
1598 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1599 rbd_obj_request_put(obj_request);
1604 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1606 struct rbd_img_request *img_request;
1607 u32 which = obj_request->which;
1610 img_request = obj_request->img_request;
1612 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1613 rbd_assert(img_request != NULL);
1614 rbd_assert(img_request->rq != NULL);
1615 rbd_assert(img_request->obj_request_count > 0);
1616 rbd_assert(which != BAD_WHICH);
1617 rbd_assert(which < img_request->obj_request_count);
1618 rbd_assert(which >= img_request->next_completion);
1620 spin_lock_irq(&img_request->completion_lock);
1621 if (which != img_request->next_completion)
1624 for_each_obj_request_from(img_request, obj_request) {
1625 unsigned int xferred;
1629 rbd_assert(which < img_request->obj_request_count);
1631 if (!obj_request_done_test(obj_request))
1634 rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1635 xferred = (unsigned int) obj_request->xferred;
1636 result = (int) obj_request->result;
1638 rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1639 img_request->write_request ? "write" : "read",
1642 more = blk_end_request(img_request->rq, result, xferred);
1646 rbd_assert(more ^ (which == img_request->obj_request_count));
1647 img_request->next_completion = which;
1649 spin_unlock_irq(&img_request->completion_lock);
1652 rbd_img_request_complete(img_request);
1655 static int rbd_img_request_submit(struct rbd_img_request *img_request)
1657 struct rbd_device *rbd_dev = img_request->rbd_dev;
1658 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1659 struct rbd_obj_request *obj_request;
1660 struct rbd_obj_request *next_obj_request;
1662 dout("%s: img %p\n", __func__, img_request);
1663 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
1666 obj_request->callback = rbd_img_obj_callback;
1667 ret = rbd_obj_request_submit(osdc, obj_request);
1671 * The image request has its own reference to each
1672 * of its object requests, so we can safely drop the
1675 rbd_obj_request_put(obj_request);
1681 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
1682 u64 ver, u64 notify_id)
1684 struct rbd_obj_request *obj_request;
1685 struct ceph_osd_client *osdc;
1688 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1689 OBJ_REQUEST_NODATA);
1694 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
1695 if (!obj_request->osd_req)
1698 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
1700 rbd_osd_req_format(obj_request, false);
1702 osdc = &rbd_dev->rbd_client->client->osdc;
1703 obj_request->callback = rbd_obj_request_put;
1704 ret = rbd_obj_request_submit(osdc, obj_request);
1707 rbd_obj_request_put(obj_request);
1712 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1714 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1721 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
1722 rbd_dev->header_name, (unsigned long long) notify_id,
1723 (unsigned int) opcode);
1724 rc = rbd_dev_refresh(rbd_dev, &hver);
1726 rbd_warn(rbd_dev, "got notification but failed to "
1727 " update snaps: %d\n", rc);
1729 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
1733 * Request sync osd watch/unwatch. The value of "start" determines
1734 * whether a watch request is being initiated or torn down.
1736 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1738 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1739 struct rbd_obj_request *obj_request;
1742 rbd_assert(start ^ !!rbd_dev->watch_event);
1743 rbd_assert(start ^ !!rbd_dev->watch_request);
1746 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
1747 &rbd_dev->watch_event);
1750 rbd_assert(rbd_dev->watch_event != NULL);
1754 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1755 OBJ_REQUEST_NODATA);
1759 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
1760 if (!obj_request->osd_req)
1763 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
1764 rbd_dev->watch_event->cookie,
1765 rbd_dev->header.obj_version, start);
1766 rbd_osd_req_format(obj_request, true);
1769 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
1771 ceph_osdc_unregister_linger_request(osdc,
1772 rbd_dev->watch_request->osd_req);
1773 ret = rbd_obj_request_submit(osdc, obj_request);
1776 ret = rbd_obj_request_wait(obj_request);
1779 ret = obj_request->result;
1784 * A watch request is set to linger, so the underlying osd
1785 * request won't go away until we unregister it. We retain
1786 * a pointer to the object request during that time (in
1787 * rbd_dev->watch_request), so we'll keep a reference to
1788 * it. We'll drop that reference (below) after we've
1792 rbd_dev->watch_request = obj_request;
1797 /* We have successfully torn down the watch request */
1799 rbd_obj_request_put(rbd_dev->watch_request);
1800 rbd_dev->watch_request = NULL;
1802 /* Cancel the event if we're tearing down, or on error */
1803 ceph_osdc_cancel_event(rbd_dev->watch_event);
1804 rbd_dev->watch_event = NULL;
1806 rbd_obj_request_put(obj_request);
1812 * Synchronous osd object method call
1814 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1815 const char *object_name,
1816 const char *class_name,
1817 const char *method_name,
1818 const char *outbound,
1819 size_t outbound_size,
1821 size_t inbound_size,
1824 struct rbd_obj_request *obj_request;
1825 struct ceph_osd_data *osd_data;
1826 struct ceph_osd_client *osdc;
1827 struct page **pages;
1832 * Method calls are ultimately read operations. The result
1833 * should placed into the inbound buffer provided. They
1834 * also supply outbound data--parameters for the object
1835 * method. Currently if this is present it will be a
1838 page_count = (u32) calc_pages_for(0, inbound_size);
1839 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1841 return PTR_ERR(pages);
1844 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
1849 obj_request->pages = pages;
1850 obj_request->page_count = page_count;
1852 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
1853 if (!obj_request->osd_req)
1856 osd_data = &obj_request->osd_req->r_data_in;
1857 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
1858 class_name, method_name,
1859 outbound, outbound_size);
1860 ceph_osd_data_pages_init(osd_data, obj_request->pages, inbound_size,
1862 osd_req_op_cls_response_data(obj_request->osd_req, 0, osd_data);
1863 rbd_osd_req_format(obj_request, false);
1865 osdc = &rbd_dev->rbd_client->client->osdc;
1866 ret = rbd_obj_request_submit(osdc, obj_request);
1869 ret = rbd_obj_request_wait(obj_request);
1873 ret = obj_request->result;
1877 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
1879 *version = obj_request->version;
1882 rbd_obj_request_put(obj_request);
1884 ceph_release_page_vector(pages, page_count);
1889 static void rbd_request_fn(struct request_queue *q)
1890 __releases(q->queue_lock) __acquires(q->queue_lock)
1892 struct rbd_device *rbd_dev = q->queuedata;
1893 bool read_only = rbd_dev->mapping.read_only;
1897 while ((rq = blk_fetch_request(q))) {
1898 bool write_request = rq_data_dir(rq) == WRITE;
1899 struct rbd_img_request *img_request;
1903 /* Ignore any non-FS requests that filter through. */
1905 if (rq->cmd_type != REQ_TYPE_FS) {
1906 dout("%s: non-fs request type %d\n", __func__,
1907 (int) rq->cmd_type);
1908 __blk_end_request_all(rq, 0);
1912 /* Ignore/skip any zero-length requests */
1914 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
1915 length = (u64) blk_rq_bytes(rq);
1918 dout("%s: zero-length request\n", __func__);
1919 __blk_end_request_all(rq, 0);
1923 spin_unlock_irq(q->queue_lock);
1925 /* Disallow writes to a read-only device */
1927 if (write_request) {
1931 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
1935 * Quit early if the mapped snapshot no longer
1936 * exists. It's still possible the snapshot will
1937 * have disappeared by the time our request arrives
1938 * at the osd, but there's no sense in sending it if
1941 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
1942 dout("request for non-existent snapshot");
1943 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
1949 if (WARN_ON(offset && length > U64_MAX - offset + 1))
1950 goto end_request; /* Shouldn't happen */
1953 img_request = rbd_img_request_create(rbd_dev, offset, length,
1958 img_request->rq = rq;
1960 result = rbd_img_request_fill_bio(img_request, rq->bio);
1962 result = rbd_img_request_submit(img_request);
1964 rbd_img_request_put(img_request);
1966 spin_lock_irq(q->queue_lock);
1968 rbd_warn(rbd_dev, "obj_request %s result %d\n",
1969 write_request ? "write" : "read", result);
1970 __blk_end_request_all(rq, result);
1976 * a queue callback. Makes sure that we don't create a bio that spans across
1977 * multiple osd objects. One exception would be with a single page bios,
1978 * which we handle later at bio_chain_clone_range()
1980 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1981 struct bio_vec *bvec)
1983 struct rbd_device *rbd_dev = q->queuedata;
1984 sector_t sector_offset;
1985 sector_t sectors_per_obj;
1986 sector_t obj_sector_offset;
1990 * Find how far into its rbd object the partition-relative
1991 * bio start sector is to offset relative to the enclosing
1994 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
1995 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
1996 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
1999 * Compute the number of bytes from that offset to the end
2000 * of the object. Account for what's already used by the bio.
2002 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2003 if (ret > bmd->bi_size)
2004 ret -= bmd->bi_size;
2009 * Don't send back more than was asked for. And if the bio
2010 * was empty, let the whole thing through because: "Note
2011 * that a block device *must* allow a single page to be
2012 * added to an empty bio."
2014 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2015 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2016 ret = (int) bvec->bv_len;
2021 static void rbd_free_disk(struct rbd_device *rbd_dev)
2023 struct gendisk *disk = rbd_dev->disk;
2028 if (disk->flags & GENHD_FL_UP)
2031 blk_cleanup_queue(disk->queue);
2035 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2036 const char *object_name,
2037 u64 offset, u64 length,
2038 char *buf, u64 *version)
2041 struct rbd_obj_request *obj_request;
2042 struct ceph_osd_data *osd_data;
2043 struct ceph_osd_client *osdc;
2044 struct page **pages = NULL;
2049 page_count = (u32) calc_pages_for(offset, length);
2050 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2052 ret = PTR_ERR(pages);
2055 obj_request = rbd_obj_request_create(object_name, offset, length,
2060 obj_request->pages = pages;
2061 obj_request->page_count = page_count;
2063 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2064 if (!obj_request->osd_req)
2067 osd_data = &obj_request->osd_req->r_data_in;
2068 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2069 offset, length, 0, 0);
2070 ceph_osd_data_pages_init(osd_data, obj_request->pages,
2071 obj_request->length,
2072 obj_request->offset & ~PAGE_MASK,
2074 osd_req_op_extent_osd_data(obj_request->osd_req, 0, osd_data);
2075 rbd_osd_req_format(obj_request, false);
2077 osdc = &rbd_dev->rbd_client->client->osdc;
2078 ret = rbd_obj_request_submit(osdc, obj_request);
2081 ret = rbd_obj_request_wait(obj_request);
2085 ret = obj_request->result;
2089 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2090 size = (size_t) obj_request->xferred;
2091 ceph_copy_from_page_vector(pages, buf, 0, size);
2092 rbd_assert(size <= (size_t) INT_MAX);
2095 *version = obj_request->version;
2098 rbd_obj_request_put(obj_request);
2100 ceph_release_page_vector(pages, page_count);
2106 * Read the complete header for the given rbd device.
2108 * Returns a pointer to a dynamically-allocated buffer containing
2109 * the complete and validated header. Caller can pass the address
2110 * of a variable that will be filled in with the version of the
2111 * header object at the time it was read.
2113 * Returns a pointer-coded errno if a failure occurs.
2115 static struct rbd_image_header_ondisk *
2116 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2118 struct rbd_image_header_ondisk *ondisk = NULL;
2125 * The complete header will include an array of its 64-bit
2126 * snapshot ids, followed by the names of those snapshots as
2127 * a contiguous block of NUL-terminated strings. Note that
2128 * the number of snapshots could change by the time we read
2129 * it in, in which case we re-read it.
2136 size = sizeof (*ondisk);
2137 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2139 ondisk = kmalloc(size, GFP_KERNEL);
2141 return ERR_PTR(-ENOMEM);
2143 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2145 (char *) ondisk, version);
2148 if (WARN_ON((size_t) ret < size)) {
2150 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2154 if (!rbd_dev_ondisk_valid(ondisk)) {
2156 rbd_warn(rbd_dev, "invalid header");
2160 names_size = le64_to_cpu(ondisk->snap_names_len);
2161 want_count = snap_count;
2162 snap_count = le32_to_cpu(ondisk->snap_count);
2163 } while (snap_count != want_count);
2170 return ERR_PTR(ret);
2174 * reload the ondisk the header
2176 static int rbd_read_header(struct rbd_device *rbd_dev,
2177 struct rbd_image_header *header)
2179 struct rbd_image_header_ondisk *ondisk;
2183 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2185 return PTR_ERR(ondisk);
2186 ret = rbd_header_from_disk(header, ondisk);
2188 header->obj_version = ver;
2194 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2196 struct rbd_snap *snap;
2197 struct rbd_snap *next;
2199 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
2200 rbd_remove_snap_dev(snap);
2203 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2207 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2210 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2211 dout("setting size to %llu sectors", (unsigned long long) size);
2212 rbd_dev->mapping.size = (u64) size;
2213 set_capacity(rbd_dev->disk, size);
2217 * only read the first part of the ondisk header, without the snaps info
2219 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
2222 struct rbd_image_header h;
2224 ret = rbd_read_header(rbd_dev, &h);
2228 down_write(&rbd_dev->header_rwsem);
2230 /* Update image size, and check for resize of mapped image */
2231 rbd_dev->header.image_size = h.image_size;
2232 rbd_update_mapping_size(rbd_dev);
2234 /* rbd_dev->header.object_prefix shouldn't change */
2235 kfree(rbd_dev->header.snap_sizes);
2236 kfree(rbd_dev->header.snap_names);
2237 /* osd requests may still refer to snapc */
2238 ceph_put_snap_context(rbd_dev->header.snapc);
2241 *hver = h.obj_version;
2242 rbd_dev->header.obj_version = h.obj_version;
2243 rbd_dev->header.image_size = h.image_size;
2244 rbd_dev->header.snapc = h.snapc;
2245 rbd_dev->header.snap_names = h.snap_names;
2246 rbd_dev->header.snap_sizes = h.snap_sizes;
2247 /* Free the extra copy of the object prefix */
2248 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2249 kfree(h.object_prefix);
2251 ret = rbd_dev_snaps_update(rbd_dev);
2253 ret = rbd_dev_snaps_register(rbd_dev);
2255 up_write(&rbd_dev->header_rwsem);
2260 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
2264 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
2265 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2266 if (rbd_dev->image_format == 1)
2267 ret = rbd_dev_v1_refresh(rbd_dev, hver);
2269 ret = rbd_dev_v2_refresh(rbd_dev, hver);
2270 mutex_unlock(&ctl_mutex);
2275 static int rbd_init_disk(struct rbd_device *rbd_dev)
2277 struct gendisk *disk;
2278 struct request_queue *q;
2281 /* create gendisk info */
2282 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2286 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
2288 disk->major = rbd_dev->major;
2289 disk->first_minor = 0;
2290 disk->fops = &rbd_bd_ops;
2291 disk->private_data = rbd_dev;
2293 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
2297 /* We use the default size, but let's be explicit about it. */
2298 blk_queue_physical_block_size(q, SECTOR_SIZE);
2300 /* set io sizes to object size */
2301 segment_size = rbd_obj_bytes(&rbd_dev->header);
2302 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2303 blk_queue_max_segment_size(q, segment_size);
2304 blk_queue_io_min(q, segment_size);
2305 blk_queue_io_opt(q, segment_size);
2307 blk_queue_merge_bvec(q, rbd_merge_bvec);
2310 q->queuedata = rbd_dev;
2312 rbd_dev->disk = disk;
2314 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2327 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2329 return container_of(dev, struct rbd_device, dev);
2332 static ssize_t rbd_size_show(struct device *dev,
2333 struct device_attribute *attr, char *buf)
2335 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2338 down_read(&rbd_dev->header_rwsem);
2339 size = get_capacity(rbd_dev->disk);
2340 up_read(&rbd_dev->header_rwsem);
2342 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
2346 * Note this shows the features for whatever's mapped, which is not
2347 * necessarily the base image.
2349 static ssize_t rbd_features_show(struct device *dev,
2350 struct device_attribute *attr, char *buf)
2352 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2354 return sprintf(buf, "0x%016llx\n",
2355 (unsigned long long) rbd_dev->mapping.features);
2358 static ssize_t rbd_major_show(struct device *dev,
2359 struct device_attribute *attr, char *buf)
2361 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2363 return sprintf(buf, "%d\n", rbd_dev->major);
2366 static ssize_t rbd_client_id_show(struct device *dev,
2367 struct device_attribute *attr, char *buf)
2369 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2371 return sprintf(buf, "client%lld\n",
2372 ceph_client_id(rbd_dev->rbd_client->client));
2375 static ssize_t rbd_pool_show(struct device *dev,
2376 struct device_attribute *attr, char *buf)
2378 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2380 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
2383 static ssize_t rbd_pool_id_show(struct device *dev,
2384 struct device_attribute *attr, char *buf)
2386 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2388 return sprintf(buf, "%llu\n",
2389 (unsigned long long) rbd_dev->spec->pool_id);
2392 static ssize_t rbd_name_show(struct device *dev,
2393 struct device_attribute *attr, char *buf)
2395 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2397 if (rbd_dev->spec->image_name)
2398 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2400 return sprintf(buf, "(unknown)\n");
2403 static ssize_t rbd_image_id_show(struct device *dev,
2404 struct device_attribute *attr, char *buf)
2406 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2408 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
2412 * Shows the name of the currently-mapped snapshot (or
2413 * RBD_SNAP_HEAD_NAME for the base image).
2415 static ssize_t rbd_snap_show(struct device *dev,
2416 struct device_attribute *attr,
2419 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2421 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
2425 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2426 * for the parent image. If there is no parent, simply shows
2427 * "(no parent image)".
2429 static ssize_t rbd_parent_show(struct device *dev,
2430 struct device_attribute *attr,
2433 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2434 struct rbd_spec *spec = rbd_dev->parent_spec;
2439 return sprintf(buf, "(no parent image)\n");
2441 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2442 (unsigned long long) spec->pool_id, spec->pool_name);
2447 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2448 spec->image_name ? spec->image_name : "(unknown)");
2453 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2454 (unsigned long long) spec->snap_id, spec->snap_name);
2459 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2464 return (ssize_t) (bufp - buf);
2467 static ssize_t rbd_image_refresh(struct device *dev,
2468 struct device_attribute *attr,
2472 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2475 ret = rbd_dev_refresh(rbd_dev, NULL);
2477 return ret < 0 ? ret : size;
2480 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
2481 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
2482 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2483 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2484 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
2485 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
2486 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
2487 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
2488 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2489 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
2490 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
2492 static struct attribute *rbd_attrs[] = {
2493 &dev_attr_size.attr,
2494 &dev_attr_features.attr,
2495 &dev_attr_major.attr,
2496 &dev_attr_client_id.attr,
2497 &dev_attr_pool.attr,
2498 &dev_attr_pool_id.attr,
2499 &dev_attr_name.attr,
2500 &dev_attr_image_id.attr,
2501 &dev_attr_current_snap.attr,
2502 &dev_attr_parent.attr,
2503 &dev_attr_refresh.attr,
2507 static struct attribute_group rbd_attr_group = {
2511 static const struct attribute_group *rbd_attr_groups[] = {
2516 static void rbd_sysfs_dev_release(struct device *dev)
2520 static struct device_type rbd_device_type = {
2522 .groups = rbd_attr_groups,
2523 .release = rbd_sysfs_dev_release,
2531 static ssize_t rbd_snap_size_show(struct device *dev,
2532 struct device_attribute *attr,
2535 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2537 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2540 static ssize_t rbd_snap_id_show(struct device *dev,
2541 struct device_attribute *attr,
2544 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2546 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2549 static ssize_t rbd_snap_features_show(struct device *dev,
2550 struct device_attribute *attr,
2553 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2555 return sprintf(buf, "0x%016llx\n",
2556 (unsigned long long) snap->features);
2559 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2560 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2561 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2563 static struct attribute *rbd_snap_attrs[] = {
2564 &dev_attr_snap_size.attr,
2565 &dev_attr_snap_id.attr,
2566 &dev_attr_snap_features.attr,
2570 static struct attribute_group rbd_snap_attr_group = {
2571 .attrs = rbd_snap_attrs,
2574 static void rbd_snap_dev_release(struct device *dev)
2576 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2581 static const struct attribute_group *rbd_snap_attr_groups[] = {
2582 &rbd_snap_attr_group,
2586 static struct device_type rbd_snap_device_type = {
2587 .groups = rbd_snap_attr_groups,
2588 .release = rbd_snap_dev_release,
2591 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2593 kref_get(&spec->kref);
2598 static void rbd_spec_free(struct kref *kref);
2599 static void rbd_spec_put(struct rbd_spec *spec)
2602 kref_put(&spec->kref, rbd_spec_free);
2605 static struct rbd_spec *rbd_spec_alloc(void)
2607 struct rbd_spec *spec;
2609 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2612 kref_init(&spec->kref);
2614 rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
2619 static void rbd_spec_free(struct kref *kref)
2621 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2623 kfree(spec->pool_name);
2624 kfree(spec->image_id);
2625 kfree(spec->image_name);
2626 kfree(spec->snap_name);
2630 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
2631 struct rbd_spec *spec)
2633 struct rbd_device *rbd_dev;
2635 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2639 spin_lock_init(&rbd_dev->lock);
2641 INIT_LIST_HEAD(&rbd_dev->node);
2642 INIT_LIST_HEAD(&rbd_dev->snaps);
2643 init_rwsem(&rbd_dev->header_rwsem);
2645 rbd_dev->spec = spec;
2646 rbd_dev->rbd_client = rbdc;
2648 /* Initialize the layout used for all rbd requests */
2650 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2651 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2652 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2653 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2658 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2660 rbd_spec_put(rbd_dev->parent_spec);
2661 kfree(rbd_dev->header_name);
2662 rbd_put_client(rbd_dev->rbd_client);
2663 rbd_spec_put(rbd_dev->spec);
2667 static bool rbd_snap_registered(struct rbd_snap *snap)
2669 bool ret = snap->dev.type == &rbd_snap_device_type;
2670 bool reg = device_is_registered(&snap->dev);
2672 rbd_assert(!ret ^ reg);
2677 static void rbd_remove_snap_dev(struct rbd_snap *snap)
2679 list_del(&snap->node);
2680 if (device_is_registered(&snap->dev))
2681 device_unregister(&snap->dev);
2684 static int rbd_register_snap_dev(struct rbd_snap *snap,
2685 struct device *parent)
2687 struct device *dev = &snap->dev;
2690 dev->type = &rbd_snap_device_type;
2691 dev->parent = parent;
2692 dev->release = rbd_snap_dev_release;
2693 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
2694 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2696 ret = device_register(dev);
2701 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2702 const char *snap_name,
2703 u64 snap_id, u64 snap_size,
2706 struct rbd_snap *snap;
2709 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2711 return ERR_PTR(-ENOMEM);
2714 snap->name = kstrdup(snap_name, GFP_KERNEL);
2719 snap->size = snap_size;
2720 snap->features = snap_features;
2728 return ERR_PTR(ret);
2731 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2732 u64 *snap_size, u64 *snap_features)
2736 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2738 *snap_size = rbd_dev->header.snap_sizes[which];
2739 *snap_features = 0; /* No features for v1 */
2741 /* Skip over names until we find the one we are looking for */
2743 snap_name = rbd_dev->header.snap_names;
2745 snap_name += strlen(snap_name) + 1;
2751 * Get the size and object order for an image snapshot, or if
2752 * snap_id is CEPH_NOSNAP, gets this information for the base
2755 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2756 u8 *order, u64 *snap_size)
2758 __le64 snapid = cpu_to_le64(snap_id);
2763 } __attribute__ ((packed)) size_buf = { 0 };
2765 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2767 (char *) &snapid, sizeof (snapid),
2768 (char *) &size_buf, sizeof (size_buf), NULL);
2769 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2773 *order = size_buf.order;
2774 *snap_size = le64_to_cpu(size_buf.size);
2776 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2777 (unsigned long long) snap_id, (unsigned int) *order,
2778 (unsigned long long) *snap_size);
2783 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2785 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2786 &rbd_dev->header.obj_order,
2787 &rbd_dev->header.image_size);
2790 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2796 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2800 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2801 "rbd", "get_object_prefix",
2803 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
2804 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2809 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2810 p + RBD_OBJ_PREFIX_LEN_MAX,
2813 if (IS_ERR(rbd_dev->header.object_prefix)) {
2814 ret = PTR_ERR(rbd_dev->header.object_prefix);
2815 rbd_dev->header.object_prefix = NULL;
2817 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2826 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2829 __le64 snapid = cpu_to_le64(snap_id);
2833 } features_buf = { 0 };
2837 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2838 "rbd", "get_features",
2839 (char *) &snapid, sizeof (snapid),
2840 (char *) &features_buf, sizeof (features_buf),
2842 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2846 incompat = le64_to_cpu(features_buf.incompat);
2847 if (incompat & ~RBD_FEATURES_ALL)
2850 *snap_features = le64_to_cpu(features_buf.features);
2852 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2853 (unsigned long long) snap_id,
2854 (unsigned long long) *snap_features,
2855 (unsigned long long) le64_to_cpu(features_buf.incompat));
2860 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2862 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2863 &rbd_dev->header.features);
2866 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2868 struct rbd_spec *parent_spec;
2870 void *reply_buf = NULL;
2878 parent_spec = rbd_spec_alloc();
2882 size = sizeof (__le64) + /* pool_id */
2883 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
2884 sizeof (__le64) + /* snap_id */
2885 sizeof (__le64); /* overlap */
2886 reply_buf = kmalloc(size, GFP_KERNEL);
2892 snapid = cpu_to_le64(CEPH_NOSNAP);
2893 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2894 "rbd", "get_parent",
2895 (char *) &snapid, sizeof (snapid),
2896 (char *) reply_buf, size, NULL);
2897 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2903 end = (char *) reply_buf + size;
2904 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2905 if (parent_spec->pool_id == CEPH_NOPOOL)
2906 goto out; /* No parent? No problem. */
2908 /* The ceph file layout needs to fit pool id in 32 bits */
2911 if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2914 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
2915 if (IS_ERR(image_id)) {
2916 ret = PTR_ERR(image_id);
2919 parent_spec->image_id = image_id;
2920 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
2921 ceph_decode_64_safe(&p, end, overlap, out_err);
2923 rbd_dev->parent_overlap = overlap;
2924 rbd_dev->parent_spec = parent_spec;
2925 parent_spec = NULL; /* rbd_dev now owns this */
2930 rbd_spec_put(parent_spec);
2935 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
2937 size_t image_id_size;
2942 void *reply_buf = NULL;
2944 char *image_name = NULL;
2947 rbd_assert(!rbd_dev->spec->image_name);
2949 len = strlen(rbd_dev->spec->image_id);
2950 image_id_size = sizeof (__le32) + len;
2951 image_id = kmalloc(image_id_size, GFP_KERNEL);
2956 end = (char *) image_id + image_id_size;
2957 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
2959 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
2960 reply_buf = kmalloc(size, GFP_KERNEL);
2964 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
2965 "rbd", "dir_get_name",
2966 image_id, image_id_size,
2967 (char *) reply_buf, size, NULL);
2971 end = (char *) reply_buf + size;
2972 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
2973 if (IS_ERR(image_name))
2976 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
2985 * When a parent image gets probed, we only have the pool, image,
2986 * and snapshot ids but not the names of any of them. This call
2987 * is made later to fill in those names. It has to be done after
2988 * rbd_dev_snaps_update() has completed because some of the
2989 * information (in particular, snapshot name) is not available
2992 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
2994 struct ceph_osd_client *osdc;
2996 void *reply_buf = NULL;
2999 if (rbd_dev->spec->pool_name)
3000 return 0; /* Already have the names */
3002 /* Look up the pool name */
3004 osdc = &rbd_dev->rbd_client->client->osdc;
3005 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
3007 rbd_warn(rbd_dev, "there is no pool with id %llu",
3008 rbd_dev->spec->pool_id); /* Really a BUG() */
3012 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3013 if (!rbd_dev->spec->pool_name)
3016 /* Fetch the image name; tolerate failure here */
3018 name = rbd_dev_image_name(rbd_dev);
3020 rbd_dev->spec->image_name = (char *) name;
3022 rbd_warn(rbd_dev, "unable to get image name");
3024 /* Look up the snapshot name. */
3026 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3028 rbd_warn(rbd_dev, "no snapshot with id %llu",
3029 rbd_dev->spec->snap_id); /* Really a BUG() */
3033 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3034 if(!rbd_dev->spec->snap_name)
3040 kfree(rbd_dev->spec->pool_name);
3041 rbd_dev->spec->pool_name = NULL;
3046 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3055 struct ceph_snap_context *snapc;
3059 * We'll need room for the seq value (maximum snapshot id),
3060 * snapshot count, and array of that many snapshot ids.
3061 * For now we have a fixed upper limit on the number we're
3062 * prepared to receive.
3064 size = sizeof (__le64) + sizeof (__le32) +
3065 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3066 reply_buf = kzalloc(size, GFP_KERNEL);
3070 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3071 "rbd", "get_snapcontext",
3073 reply_buf, size, ver);
3074 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3080 end = (char *) reply_buf + size;
3081 ceph_decode_64_safe(&p, end, seq, out);
3082 ceph_decode_32_safe(&p, end, snap_count, out);
3085 * Make sure the reported number of snapshot ids wouldn't go
3086 * beyond the end of our buffer. But before checking that,
3087 * make sure the computed size of the snapshot context we
3088 * allocate is representable in a size_t.
3090 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3095 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3098 size = sizeof (struct ceph_snap_context) +
3099 snap_count * sizeof (snapc->snaps[0]);
3100 snapc = kmalloc(size, GFP_KERNEL);
3106 atomic_set(&snapc->nref, 1);
3108 snapc->num_snaps = snap_count;
3109 for (i = 0; i < snap_count; i++)
3110 snapc->snaps[i] = ceph_decode_64(&p);
3112 rbd_dev->header.snapc = snapc;
3114 dout(" snap context seq = %llu, snap_count = %u\n",
3115 (unsigned long long) seq, (unsigned int) snap_count);
3123 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3133 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3134 reply_buf = kmalloc(size, GFP_KERNEL);
3136 return ERR_PTR(-ENOMEM);
3138 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3139 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3140 "rbd", "get_snapshot_name",
3141 (char *) &snap_id, sizeof (snap_id),
3142 reply_buf, size, NULL);
3143 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3148 end = (char *) reply_buf + size;
3149 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3150 if (IS_ERR(snap_name)) {
3151 ret = PTR_ERR(snap_name);
3154 dout(" snap_id 0x%016llx snap_name = %s\n",
3155 (unsigned long long) le64_to_cpu(snap_id), snap_name);
3163 return ERR_PTR(ret);
3166 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3167 u64 *snap_size, u64 *snap_features)
3173 snap_id = rbd_dev->header.snapc->snaps[which];
3174 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3176 return ERR_PTR(ret);
3177 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3179 return ERR_PTR(ret);
3181 return rbd_dev_v2_snap_name(rbd_dev, which);
3184 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3185 u64 *snap_size, u64 *snap_features)
3187 if (rbd_dev->image_format == 1)
3188 return rbd_dev_v1_snap_info(rbd_dev, which,
3189 snap_size, snap_features);
3190 if (rbd_dev->image_format == 2)
3191 return rbd_dev_v2_snap_info(rbd_dev, which,
3192 snap_size, snap_features);
3193 return ERR_PTR(-EINVAL);
3196 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3201 down_write(&rbd_dev->header_rwsem);
3203 /* Grab old order first, to see if it changes */
3205 obj_order = rbd_dev->header.obj_order,
3206 ret = rbd_dev_v2_image_size(rbd_dev);
3209 if (rbd_dev->header.obj_order != obj_order) {
3213 rbd_update_mapping_size(rbd_dev);
3215 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3216 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3219 ret = rbd_dev_snaps_update(rbd_dev);
3220 dout("rbd_dev_snaps_update returned %d\n", ret);
3223 ret = rbd_dev_snaps_register(rbd_dev);
3224 dout("rbd_dev_snaps_register returned %d\n", ret);
3226 up_write(&rbd_dev->header_rwsem);
3232 * Scan the rbd device's current snapshot list and compare it to the
3233 * newly-received snapshot context. Remove any existing snapshots
3234 * not present in the new snapshot context. Add a new snapshot for
3235 * any snaphots in the snapshot context not in the current list.
3236 * And verify there are no changes to snapshots we already know
3239 * Assumes the snapshots in the snapshot context are sorted by
3240 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3241 * are also maintained in that order.)
3243 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
3245 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3246 const u32 snap_count = snapc->num_snaps;
3247 struct list_head *head = &rbd_dev->snaps;
3248 struct list_head *links = head->next;
3251 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
3252 while (index < snap_count || links != head) {
3254 struct rbd_snap *snap;
3257 u64 snap_features = 0;
3259 snap_id = index < snap_count ? snapc->snaps[index]
3261 snap = links != head ? list_entry(links, struct rbd_snap, node)
3263 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
3265 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3266 struct list_head *next = links->next;
3269 * A previously-existing snapshot is not in
3270 * the new snap context.
3272 * If the now missing snapshot is the one the
3273 * image is mapped to, clear its exists flag
3274 * so we can avoid sending any more requests
3277 if (rbd_dev->spec->snap_id == snap->id)
3278 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3279 rbd_remove_snap_dev(snap);
3280 dout("%ssnap id %llu has been removed\n",
3281 rbd_dev->spec->snap_id == snap->id ?
3283 (unsigned long long) snap->id);
3285 /* Done with this list entry; advance */
3291 snap_name = rbd_dev_snap_info(rbd_dev, index,
3292 &snap_size, &snap_features);
3293 if (IS_ERR(snap_name))
3294 return PTR_ERR(snap_name);
3296 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3297 (unsigned long long) snap_id);
3298 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3299 struct rbd_snap *new_snap;
3301 /* We haven't seen this snapshot before */
3303 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
3304 snap_id, snap_size, snap_features);
3305 if (IS_ERR(new_snap)) {
3306 int err = PTR_ERR(new_snap);
3308 dout(" failed to add dev, error %d\n", err);
3313 /* New goes before existing, or at end of list */
3315 dout(" added dev%s\n", snap ? "" : " at end\n");
3317 list_add_tail(&new_snap->node, &snap->node);
3319 list_add_tail(&new_snap->node, head);
3321 /* Already have this one */
3323 dout(" already present\n");
3325 rbd_assert(snap->size == snap_size);
3326 rbd_assert(!strcmp(snap->name, snap_name));
3327 rbd_assert(snap->features == snap_features);
3329 /* Done with this list entry; advance */
3331 links = links->next;
3334 /* Advance to the next entry in the snapshot context */
3338 dout("%s: done\n", __func__);
3344 * Scan the list of snapshots and register the devices for any that
3345 * have not already been registered.
3347 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3349 struct rbd_snap *snap;
3352 dout("%s:\n", __func__);
3353 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3356 list_for_each_entry(snap, &rbd_dev->snaps, node) {
3357 if (!rbd_snap_registered(snap)) {
3358 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3363 dout("%s: returning %d\n", __func__, ret);
3368 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3373 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3375 dev = &rbd_dev->dev;
3376 dev->bus = &rbd_bus_type;
3377 dev->type = &rbd_device_type;
3378 dev->parent = &rbd_root_dev;
3379 dev->release = rbd_dev_release;
3380 dev_set_name(dev, "%d", rbd_dev->dev_id);
3381 ret = device_register(dev);
3383 mutex_unlock(&ctl_mutex);
3388 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3390 device_unregister(&rbd_dev->dev);
3393 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
3396 * Get a unique rbd identifier for the given new rbd_dev, and add
3397 * the rbd_dev to the global list. The minimum rbd id is 1.
3399 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
3401 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
3403 spin_lock(&rbd_dev_list_lock);
3404 list_add_tail(&rbd_dev->node, &rbd_dev_list);
3405 spin_unlock(&rbd_dev_list_lock);
3406 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3407 (unsigned long long) rbd_dev->dev_id);
3411 * Remove an rbd_dev from the global list, and record that its
3412 * identifier is no longer in use.
3414 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
3416 struct list_head *tmp;
3417 int rbd_id = rbd_dev->dev_id;
3420 rbd_assert(rbd_id > 0);
3422 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3423 (unsigned long long) rbd_dev->dev_id);
3424 spin_lock(&rbd_dev_list_lock);
3425 list_del_init(&rbd_dev->node);
3428 * If the id being "put" is not the current maximum, there
3429 * is nothing special we need to do.
3431 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
3432 spin_unlock(&rbd_dev_list_lock);
3437 * We need to update the current maximum id. Search the
3438 * list to find out what it is. We're more likely to find
3439 * the maximum at the end, so search the list backward.
3442 list_for_each_prev(tmp, &rbd_dev_list) {
3443 struct rbd_device *rbd_dev;
3445 rbd_dev = list_entry(tmp, struct rbd_device, node);
3446 if (rbd_dev->dev_id > max_id)
3447 max_id = rbd_dev->dev_id;
3449 spin_unlock(&rbd_dev_list_lock);
3452 * The max id could have been updated by rbd_dev_id_get(), in
3453 * which case it now accurately reflects the new maximum.
3454 * Be careful not to overwrite the maximum value in that
3457 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3458 dout(" max dev id has been reset\n");
3462 * Skips over white space at *buf, and updates *buf to point to the
3463 * first found non-space character (if any). Returns the length of
3464 * the token (string of non-white space characters) found. Note
3465 * that *buf must be terminated with '\0'.
3467 static inline size_t next_token(const char **buf)
3470 * These are the characters that produce nonzero for
3471 * isspace() in the "C" and "POSIX" locales.
3473 const char *spaces = " \f\n\r\t\v";
3475 *buf += strspn(*buf, spaces); /* Find start of token */
3477 return strcspn(*buf, spaces); /* Return token length */
3481 * Finds the next token in *buf, and if the provided token buffer is
3482 * big enough, copies the found token into it. The result, if
3483 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3484 * must be terminated with '\0' on entry.
3486 * Returns the length of the token found (not including the '\0').
3487 * Return value will be 0 if no token is found, and it will be >=
3488 * token_size if the token would not fit.
3490 * The *buf pointer will be updated to point beyond the end of the
3491 * found token. Note that this occurs even if the token buffer is
3492 * too small to hold it.
3494 static inline size_t copy_token(const char **buf,
3500 len = next_token(buf);
3501 if (len < token_size) {
3502 memcpy(token, *buf, len);
3503 *(token + len) = '\0';
3511 * Finds the next token in *buf, dynamically allocates a buffer big
3512 * enough to hold a copy of it, and copies the token into the new
3513 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3514 * that a duplicate buffer is created even for a zero-length token.
3516 * Returns a pointer to the newly-allocated duplicate, or a null
3517 * pointer if memory for the duplicate was not available. If
3518 * the lenp argument is a non-null pointer, the length of the token
3519 * (not including the '\0') is returned in *lenp.
3521 * If successful, the *buf pointer will be updated to point beyond
3522 * the end of the found token.
3524 * Note: uses GFP_KERNEL for allocation.
3526 static inline char *dup_token(const char **buf, size_t *lenp)
3531 len = next_token(buf);
3532 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
3535 *(dup + len) = '\0';
3545 * Parse the options provided for an "rbd add" (i.e., rbd image
3546 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3547 * and the data written is passed here via a NUL-terminated buffer.
3548 * Returns 0 if successful or an error code otherwise.
3550 * The information extracted from these options is recorded in
3551 * the other parameters which return dynamically-allocated
3554 * The address of a pointer that will refer to a ceph options
3555 * structure. Caller must release the returned pointer using
3556 * ceph_destroy_options() when it is no longer needed.
3558 * Address of an rbd options pointer. Fully initialized by
3559 * this function; caller must release with kfree().
3561 * Address of an rbd image specification pointer. Fully
3562 * initialized by this function based on parsed options.
3563 * Caller must release with rbd_spec_put().
3565 * The options passed take this form:
3566 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3569 * A comma-separated list of one or more monitor addresses.
3570 * A monitor address is an ip address, optionally followed
3571 * by a port number (separated by a colon).
3572 * I.e.: ip1[:port1][,ip2[:port2]...]
3574 * A comma-separated list of ceph and/or rbd options.
3576 * The name of the rados pool containing the rbd image.
3578 * The name of the image in that pool to map.
3580 * An optional snapshot id. If provided, the mapping will
3581 * present data from the image at the time that snapshot was
3582 * created. The image head is used if no snapshot id is
3583 * provided. Snapshot mappings are always read-only.
3585 static int rbd_add_parse_args(const char *buf,
3586 struct ceph_options **ceph_opts,
3587 struct rbd_options **opts,
3588 struct rbd_spec **rbd_spec)
3592 const char *mon_addrs;
3593 size_t mon_addrs_size;
3594 struct rbd_spec *spec = NULL;
3595 struct rbd_options *rbd_opts = NULL;
3596 struct ceph_options *copts;
3599 /* The first four tokens are required */
3601 len = next_token(&buf);
3603 rbd_warn(NULL, "no monitor address(es) provided");
3607 mon_addrs_size = len + 1;
3611 options = dup_token(&buf, NULL);
3615 rbd_warn(NULL, "no options provided");
3619 spec = rbd_spec_alloc();
3623 spec->pool_name = dup_token(&buf, NULL);
3624 if (!spec->pool_name)
3626 if (!*spec->pool_name) {
3627 rbd_warn(NULL, "no pool name provided");
3631 spec->image_name = dup_token(&buf, NULL);
3632 if (!spec->image_name)
3634 if (!*spec->image_name) {
3635 rbd_warn(NULL, "no image name provided");
3640 * Snapshot name is optional; default is to use "-"
3641 * (indicating the head/no snapshot).
3643 len = next_token(&buf);
3645 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3646 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
3647 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
3648 ret = -ENAMETOOLONG;
3651 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
3652 if (!spec->snap_name)
3654 *(spec->snap_name + len) = '\0';
3656 /* Initialize all rbd options to the defaults */
3658 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3662 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
3664 copts = ceph_parse_options(options, mon_addrs,
3665 mon_addrs + mon_addrs_size - 1,
3666 parse_rbd_opts_token, rbd_opts);
3667 if (IS_ERR(copts)) {
3668 ret = PTR_ERR(copts);
3689 * An rbd format 2 image has a unique identifier, distinct from the
3690 * name given to it by the user. Internally, that identifier is
3691 * what's used to specify the names of objects related to the image.
3693 * A special "rbd id" object is used to map an rbd image name to its
3694 * id. If that object doesn't exist, then there is no v2 rbd image
3695 * with the supplied name.
3697 * This function will record the given rbd_dev's image_id field if
3698 * it can be determined, and in that case will return 0. If any
3699 * errors occur a negative errno will be returned and the rbd_dev's
3700 * image_id field will be unchanged (and should be NULL).
3702 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3711 * When probing a parent image, the image id is already
3712 * known (and the image name likely is not). There's no
3713 * need to fetch the image id again in this case.
3715 if (rbd_dev->spec->image_id)
3719 * First, see if the format 2 image id file exists, and if
3720 * so, get the image's persistent id from it.
3722 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
3723 object_name = kmalloc(size, GFP_NOIO);
3726 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
3727 dout("rbd id object name is %s\n", object_name);
3729 /* Response will be an encoded string, which includes a length */
3731 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3732 response = kzalloc(size, GFP_NOIO);
3738 ret = rbd_obj_method_sync(rbd_dev, object_name,
3741 response, RBD_IMAGE_ID_LEN_MAX, NULL);
3742 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3747 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
3748 p + RBD_IMAGE_ID_LEN_MAX,
3750 if (IS_ERR(rbd_dev->spec->image_id)) {
3751 ret = PTR_ERR(rbd_dev->spec->image_id);
3752 rbd_dev->spec->image_id = NULL;
3754 dout("image_id is %s\n", rbd_dev->spec->image_id);
3763 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3768 /* Version 1 images have no id; empty string is used */
3770 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3771 if (!rbd_dev->spec->image_id)
3774 /* Record the header object name for this rbd image. */
3776 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
3777 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3778 if (!rbd_dev->header_name) {
3782 sprintf(rbd_dev->header_name, "%s%s",
3783 rbd_dev->spec->image_name, RBD_SUFFIX);
3785 /* Populate rbd image metadata */
3787 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3791 /* Version 1 images have no parent (no layering) */
3793 rbd_dev->parent_spec = NULL;
3794 rbd_dev->parent_overlap = 0;
3796 rbd_dev->image_format = 1;
3798 dout("discovered version 1 image, header name is %s\n",
3799 rbd_dev->header_name);
3804 kfree(rbd_dev->header_name);
3805 rbd_dev->header_name = NULL;
3806 kfree(rbd_dev->spec->image_id);
3807 rbd_dev->spec->image_id = NULL;
3812 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3819 * Image id was filled in by the caller. Record the header
3820 * object name for this rbd image.
3822 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
3823 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3824 if (!rbd_dev->header_name)
3826 sprintf(rbd_dev->header_name, "%s%s",
3827 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
3829 /* Get the size and object order for the image */
3831 ret = rbd_dev_v2_image_size(rbd_dev);
3835 /* Get the object prefix (a.k.a. block_name) for the image */
3837 ret = rbd_dev_v2_object_prefix(rbd_dev);
3841 /* Get the and check features for the image */
3843 ret = rbd_dev_v2_features(rbd_dev);
3847 /* If the image supports layering, get the parent info */
3849 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3850 ret = rbd_dev_v2_parent_info(rbd_dev);
3855 /* crypto and compression type aren't (yet) supported for v2 images */
3857 rbd_dev->header.crypt_type = 0;
3858 rbd_dev->header.comp_type = 0;
3860 /* Get the snapshot context, plus the header version */
3862 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
3865 rbd_dev->header.obj_version = ver;
3867 rbd_dev->image_format = 2;
3869 dout("discovered version 2 image, header name is %s\n",
3870 rbd_dev->header_name);
3874 rbd_dev->parent_overlap = 0;
3875 rbd_spec_put(rbd_dev->parent_spec);
3876 rbd_dev->parent_spec = NULL;
3877 kfree(rbd_dev->header_name);
3878 rbd_dev->header_name = NULL;
3879 kfree(rbd_dev->header.object_prefix);
3880 rbd_dev->header.object_prefix = NULL;
3885 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3889 /* no need to lock here, as rbd_dev is not registered yet */
3890 ret = rbd_dev_snaps_update(rbd_dev);
3894 ret = rbd_dev_probe_update_spec(rbd_dev);
3898 ret = rbd_dev_set_mapping(rbd_dev);
3902 /* generate unique id: find highest unique id, add one */
3903 rbd_dev_id_get(rbd_dev);
3905 /* Fill in the device name, now that we have its id. */
3906 BUILD_BUG_ON(DEV_NAME_LEN
3907 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3908 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3910 /* Get our block major device number. */
3912 ret = register_blkdev(0, rbd_dev->name);
3915 rbd_dev->major = ret;
3917 /* Set up the blkdev mapping. */
3919 ret = rbd_init_disk(rbd_dev);
3921 goto err_out_blkdev;
3923 ret = rbd_bus_add_dev(rbd_dev);
3928 * At this point cleanup in the event of an error is the job
3929 * of the sysfs code (initiated by rbd_bus_del_dev()).
3931 down_write(&rbd_dev->header_rwsem);
3932 ret = rbd_dev_snaps_register(rbd_dev);
3933 up_write(&rbd_dev->header_rwsem);
3937 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
3941 /* Everything's ready. Announce the disk to the world. */
3943 add_disk(rbd_dev->disk);
3945 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3946 (unsigned long long) rbd_dev->mapping.size);
3950 /* this will also clean up rest of rbd_dev stuff */
3952 rbd_bus_del_dev(rbd_dev);
3956 rbd_free_disk(rbd_dev);
3958 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3960 rbd_dev_id_put(rbd_dev);
3962 rbd_remove_all_snaps(rbd_dev);
3968 * Probe for the existence of the header object for the given rbd
3969 * device. For format 2 images this includes determining the image
3972 static int rbd_dev_probe(struct rbd_device *rbd_dev)
3977 * Get the id from the image id object. If it's not a
3978 * format 2 image, we'll get ENOENT back, and we'll assume
3979 * it's a format 1 image.
3981 ret = rbd_dev_image_id(rbd_dev);
3983 ret = rbd_dev_v1_probe(rbd_dev);
3985 ret = rbd_dev_v2_probe(rbd_dev);
3987 dout("probe failed, returning %d\n", ret);
3992 ret = rbd_dev_probe_finish(rbd_dev);
3994 rbd_header_free(&rbd_dev->header);
3999 static ssize_t rbd_add(struct bus_type *bus,
4003 struct rbd_device *rbd_dev = NULL;
4004 struct ceph_options *ceph_opts = NULL;
4005 struct rbd_options *rbd_opts = NULL;
4006 struct rbd_spec *spec = NULL;
4007 struct rbd_client *rbdc;
4008 struct ceph_osd_client *osdc;
4011 if (!try_module_get(THIS_MODULE))
4014 /* parse add command */
4015 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4017 goto err_out_module;
4019 rbdc = rbd_get_client(ceph_opts);
4024 ceph_opts = NULL; /* rbd_dev client now owns this */
4027 osdc = &rbdc->client->osdc;
4028 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4030 goto err_out_client;
4031 spec->pool_id = (u64) rc;
4033 /* The ceph file layout needs to fit pool id in 32 bits */
4035 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4037 goto err_out_client;
4040 rbd_dev = rbd_dev_create(rbdc, spec);
4042 goto err_out_client;
4043 rbdc = NULL; /* rbd_dev now owns this */
4044 spec = NULL; /* rbd_dev now owns this */
4046 rbd_dev->mapping.read_only = rbd_opts->read_only;
4048 rbd_opts = NULL; /* done with this */
4050 rc = rbd_dev_probe(rbd_dev);
4052 goto err_out_rbd_dev;
4056 rbd_dev_destroy(rbd_dev);
4058 rbd_put_client(rbdc);
4061 ceph_destroy_options(ceph_opts);
4065 module_put(THIS_MODULE);
4067 dout("Error adding device %s\n", buf);
4069 return (ssize_t) rc;
4072 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4074 struct list_head *tmp;
4075 struct rbd_device *rbd_dev;
4077 spin_lock(&rbd_dev_list_lock);
4078 list_for_each(tmp, &rbd_dev_list) {
4079 rbd_dev = list_entry(tmp, struct rbd_device, node);
4080 if (rbd_dev->dev_id == dev_id) {
4081 spin_unlock(&rbd_dev_list_lock);
4085 spin_unlock(&rbd_dev_list_lock);
4089 static void rbd_dev_release(struct device *dev)
4091 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4093 if (rbd_dev->watch_event)
4094 rbd_dev_header_watch_sync(rbd_dev, 0);
4096 /* clean up and free blkdev */
4097 rbd_free_disk(rbd_dev);
4098 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4100 /* release allocated disk header fields */
4101 rbd_header_free(&rbd_dev->header);
4103 /* done with the id, and with the rbd_dev */
4104 rbd_dev_id_put(rbd_dev);
4105 rbd_assert(rbd_dev->rbd_client != NULL);
4106 rbd_dev_destroy(rbd_dev);
4108 /* release module ref */
4109 module_put(THIS_MODULE);
4112 static ssize_t rbd_remove(struct bus_type *bus,
4116 struct rbd_device *rbd_dev = NULL;
4121 rc = strict_strtoul(buf, 10, &ul);
4125 /* convert to int; abort if we lost anything in the conversion */
4126 target_id = (int) ul;
4127 if (target_id != ul)
4130 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4132 rbd_dev = __rbd_get_dev(target_id);
4138 spin_lock_irq(&rbd_dev->lock);
4139 if (rbd_dev->open_count)
4142 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4143 spin_unlock_irq(&rbd_dev->lock);
4147 rbd_remove_all_snaps(rbd_dev);
4148 rbd_bus_del_dev(rbd_dev);
4151 mutex_unlock(&ctl_mutex);
4157 * create control files in sysfs
4160 static int rbd_sysfs_init(void)
4164 ret = device_register(&rbd_root_dev);
4168 ret = bus_register(&rbd_bus_type);
4170 device_unregister(&rbd_root_dev);
4175 static void rbd_sysfs_cleanup(void)
4177 bus_unregister(&rbd_bus_type);
4178 device_unregister(&rbd_root_dev);
4181 static int __init rbd_init(void)
4185 if (!libceph_compatible(NULL)) {
4186 rbd_warn(NULL, "libceph incompatibility (quitting)");
4190 rc = rbd_sysfs_init();
4193 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
4197 static void __exit rbd_exit(void)
4199 rbd_sysfs_cleanup();
4202 module_init(rbd_init);
4203 module_exit(rbd_exit);
4205 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4206 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4207 MODULE_DESCRIPTION("rados block device");
4209 /* following authorship retained from original osdblk.c */
4210 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4212 MODULE_LICENSE("GPL");