1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/highmem.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
11 #include <linux/bio.h>
14 #include <linux/ceph/libceph.h>
15 #include <linux/ceph/osd_client.h>
16 #include <linux/ceph/messenger.h>
17 #include <linux/ceph/decode.h>
18 #include <linux/ceph/auth.h>
19 #include <linux/ceph/pagelist.h>
21 #define OSD_OP_FRONT_LEN 4096
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static const struct ceph_connection_operations osd_con_ops;
26 static void __send_queued(struct ceph_osd_client *osdc);
27 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
28 static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30 static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
32 static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
35 static int op_has_extent(int op)
37 return (op == CEPH_OSD_OP_READ ||
38 op == CEPH_OSD_OP_WRITE);
42 * Implement client access to distributed object storage cluster.
44 * All data objects are stored within a cluster/cloud of OSDs, or
45 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
46 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
47 * remote daemons serving up and coordinating consistent and safe
50 * Cluster membership and the mapping of data objects onto storage devices
51 * are described by the osd map.
53 * We keep track of pending OSD requests (read, write), resubmit
54 * requests to different OSDs when the cluster topology/data layout
55 * change, or retry the affected requests when the communications
56 * channel with an OSD is reset.
60 * calculate the mapping of a file extent onto an object, and fill out the
61 * request accordingly. shorten extent as necessary if it crosses an
64 * fill osd op in request message.
66 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
67 struct ceph_osd_req_op *op, u64 *bno)
75 r = ceph_calc_file_object_mapping(layout, off, orig_len, bno,
79 if (objlen < orig_len) {
81 dout(" skipping last %llu, final file extent %llu~%llu\n",
82 orig_len - *plen, off, *plen);
85 if (op_has_extent(op->op)) {
86 u32 osize = le32_to_cpu(layout->fl_object_size);
87 op->extent.offset = objoff;
88 op->extent.length = objlen;
89 if (op->extent.truncate_size <= off - objoff) {
90 op->extent.truncate_size = 0;
92 op->extent.truncate_size -= off - objoff;
93 if (op->extent.truncate_size > osize)
94 op->extent.truncate_size = osize;
97 if (op->op == CEPH_OSD_OP_WRITE)
98 op->payload_len = *plen;
100 dout("calc_layout bno=%llx %llu~%llu\n", *bno, objoff, objlen);
108 void ceph_osdc_release_request(struct kref *kref)
110 struct ceph_osd_request *req = container_of(kref,
111 struct ceph_osd_request,
115 ceph_msg_put(req->r_request);
116 if (req->r_con_filling_msg) {
117 dout("%s revoking msg %p from con %p\n", __func__,
118 req->r_reply, req->r_con_filling_msg);
119 ceph_msg_revoke_incoming(req->r_reply);
120 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
121 req->r_con_filling_msg = NULL;
124 ceph_msg_put(req->r_reply);
125 if (req->r_own_pages)
126 ceph_release_page_vector(req->r_pages,
128 ceph_put_snap_context(req->r_snapc);
129 ceph_pagelist_release(&req->r_trail);
131 mempool_free(req, req->r_osdc->req_mempool);
135 EXPORT_SYMBOL(ceph_osdc_release_request);
137 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
138 struct ceph_snap_context *snapc,
139 unsigned int num_ops,
143 struct ceph_osd_request *req;
144 struct ceph_msg *msg;
147 msg_size = 4 + 4 + 8 + 8 + 4+8;
148 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
149 msg_size += 1 + 8 + 4 + 4; /* pg_t */
150 msg_size += 4 + MAX_OBJ_NAME_SIZE;
151 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
152 msg_size += 8; /* snapid */
153 msg_size += 8; /* snap_seq */
154 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
158 req = mempool_alloc(osdc->req_mempool, gfp_flags);
159 memset(req, 0, sizeof(*req));
161 req = kzalloc(sizeof(*req), gfp_flags);
167 req->r_mempool = use_mempool;
169 kref_init(&req->r_kref);
170 init_completion(&req->r_completion);
171 init_completion(&req->r_safe_completion);
172 RB_CLEAR_NODE(&req->r_node);
173 INIT_LIST_HEAD(&req->r_unsafe_item);
174 INIT_LIST_HEAD(&req->r_linger_item);
175 INIT_LIST_HEAD(&req->r_linger_osd);
176 INIT_LIST_HEAD(&req->r_req_lru_item);
177 INIT_LIST_HEAD(&req->r_osd_item);
179 /* create reply message */
181 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
183 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
184 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
186 ceph_osdc_put_request(req);
191 ceph_pagelist_init(&req->r_trail);
193 /* create request message; allow space for oid */
195 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
197 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
199 ceph_osdc_put_request(req);
203 memset(msg->front.iov_base, 0, msg->front.iov_len);
205 req->r_request = msg;
209 EXPORT_SYMBOL(ceph_osdc_alloc_request);
211 static void osd_req_encode_op(struct ceph_osd_request *req,
212 struct ceph_osd_op *dst,
213 struct ceph_osd_req_op *src)
215 dst->op = cpu_to_le16(src->op);
218 case CEPH_OSD_OP_STAT:
220 case CEPH_OSD_OP_READ:
221 case CEPH_OSD_OP_WRITE:
223 cpu_to_le64(src->extent.offset);
225 cpu_to_le64(src->extent.length);
226 dst->extent.truncate_size =
227 cpu_to_le64(src->extent.truncate_size);
228 dst->extent.truncate_seq =
229 cpu_to_le32(src->extent.truncate_seq);
231 case CEPH_OSD_OP_CALL:
232 dst->cls.class_len = src->cls.class_len;
233 dst->cls.method_len = src->cls.method_len;
234 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
236 ceph_pagelist_append(&req->r_trail, src->cls.class_name,
238 ceph_pagelist_append(&req->r_trail, src->cls.method_name,
239 src->cls.method_len);
240 ceph_pagelist_append(&req->r_trail, src->cls.indata,
241 src->cls.indata_len);
243 case CEPH_OSD_OP_STARTSYNC:
245 case CEPH_OSD_OP_NOTIFY_ACK:
246 case CEPH_OSD_OP_WATCH:
247 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
248 dst->watch.ver = cpu_to_le64(src->watch.ver);
249 dst->watch.flag = src->watch.flag;
252 pr_err("unrecognized osd opcode %d\n", dst->op);
255 case CEPH_OSD_OP_MAPEXT:
256 case CEPH_OSD_OP_MASKTRUNC:
257 case CEPH_OSD_OP_SPARSE_READ:
258 case CEPH_OSD_OP_NOTIFY:
259 case CEPH_OSD_OP_ASSERT_VER:
260 case CEPH_OSD_OP_WRITEFULL:
261 case CEPH_OSD_OP_TRUNCATE:
262 case CEPH_OSD_OP_ZERO:
263 case CEPH_OSD_OP_DELETE:
264 case CEPH_OSD_OP_APPEND:
265 case CEPH_OSD_OP_SETTRUNC:
266 case CEPH_OSD_OP_TRIMTRUNC:
267 case CEPH_OSD_OP_TMAPUP:
268 case CEPH_OSD_OP_TMAPPUT:
269 case CEPH_OSD_OP_TMAPGET:
270 case CEPH_OSD_OP_CREATE:
271 case CEPH_OSD_OP_ROLLBACK:
272 case CEPH_OSD_OP_OMAPGETKEYS:
273 case CEPH_OSD_OP_OMAPGETVALS:
274 case CEPH_OSD_OP_OMAPGETHEADER:
275 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
276 case CEPH_OSD_OP_MODE_RD:
277 case CEPH_OSD_OP_OMAPSETVALS:
278 case CEPH_OSD_OP_OMAPSETHEADER:
279 case CEPH_OSD_OP_OMAPCLEAR:
280 case CEPH_OSD_OP_OMAPRMKEYS:
281 case CEPH_OSD_OP_OMAP_CMP:
282 case CEPH_OSD_OP_CLONERANGE:
283 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
284 case CEPH_OSD_OP_SRC_CMPXATTR:
285 case CEPH_OSD_OP_GETXATTR:
286 case CEPH_OSD_OP_GETXATTRS:
287 case CEPH_OSD_OP_CMPXATTR:
288 case CEPH_OSD_OP_SETXATTR:
289 case CEPH_OSD_OP_SETXATTRS:
290 case CEPH_OSD_OP_RESETXATTRS:
291 case CEPH_OSD_OP_RMXATTR:
292 case CEPH_OSD_OP_PULL:
293 case CEPH_OSD_OP_PUSH:
294 case CEPH_OSD_OP_BALANCEREADS:
295 case CEPH_OSD_OP_UNBALANCEREADS:
296 case CEPH_OSD_OP_SCRUB:
297 case CEPH_OSD_OP_SCRUB_RESERVE:
298 case CEPH_OSD_OP_SCRUB_UNRESERVE:
299 case CEPH_OSD_OP_SCRUB_STOP:
300 case CEPH_OSD_OP_SCRUB_MAP:
301 case CEPH_OSD_OP_WRLOCK:
302 case CEPH_OSD_OP_WRUNLOCK:
303 case CEPH_OSD_OP_RDLOCK:
304 case CEPH_OSD_OP_RDUNLOCK:
305 case CEPH_OSD_OP_UPLOCK:
306 case CEPH_OSD_OP_DNLOCK:
307 case CEPH_OSD_OP_PGLS:
308 case CEPH_OSD_OP_PGLS_FILTER:
309 pr_err("unsupported osd opcode %s\n",
310 ceph_osd_op_name(dst->op));
314 dst->payload_len = cpu_to_le32(src->payload_len);
318 * build new request AND message
321 void ceph_osdc_build_request(struct ceph_osd_request *req,
322 u64 off, u64 len, unsigned int num_ops,
323 struct ceph_osd_req_op *src_ops,
324 struct ceph_snap_context *snapc, u64 snap_id,
325 struct timespec *mtime)
327 struct ceph_msg *msg = req->r_request;
328 struct ceph_osd_req_op *src_op;
331 int flags = req->r_flags;
335 req->r_num_ops = num_ops;
336 req->r_snapid = snap_id;
337 req->r_snapc = ceph_get_snap_context(snapc);
340 msg->hdr.version = cpu_to_le16(4);
342 p = msg->front.iov_base;
343 ceph_encode_32(&p, 1); /* client_inc is always 1 */
344 req->r_request_osdmap_epoch = p;
346 req->r_request_flags = p;
348 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
349 ceph_encode_timespec(p, mtime);
350 p += sizeof(struct ceph_timespec);
351 req->r_request_reassert_version = p;
352 p += sizeof(struct ceph_eversion); /* will get filled in */
355 ceph_encode_8(&p, 4);
356 ceph_encode_8(&p, 4);
357 ceph_encode_32(&p, 8 + 4 + 4);
358 req->r_request_pool = p;
360 ceph_encode_32(&p, -1); /* preferred */
361 ceph_encode_32(&p, 0); /* key len */
363 ceph_encode_8(&p, 1);
364 req->r_request_pgid = p;
366 ceph_encode_32(&p, -1); /* preferred */
369 ceph_encode_32(&p, req->r_oid_len);
370 memcpy(p, req->r_oid, req->r_oid_len);
371 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
375 ceph_encode_16(&p, num_ops);
377 req->r_request_ops = p;
378 for (i = 0; i < num_ops; i++, src_op++) {
379 osd_req_encode_op(req, p, src_op);
380 p += sizeof(struct ceph_osd_op);
384 ceph_encode_64(&p, req->r_snapid);
385 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
386 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
388 for (i = 0; i < snapc->num_snaps; i++) {
389 ceph_encode_64(&p, req->r_snapc->snaps[i]);
393 req->r_request_attempts = p;
396 data_len = req->r_trail.length;
397 if (flags & CEPH_OSD_FLAG_WRITE) {
398 req->r_request->hdr.data_off = cpu_to_le16(off);
401 req->r_request->hdr.data_len = cpu_to_le32(data_len);
402 req->r_request->page_alignment = req->r_page_alignment;
404 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
405 msg_size = p - msg->front.iov_base;
406 msg->front.iov_len = msg_size;
407 msg->hdr.front_len = cpu_to_le32(msg_size);
409 dout("build_request msg_size was %d num_ops %d\n", (int)msg_size,
413 EXPORT_SYMBOL(ceph_osdc_build_request);
416 * build new request AND message, calculate layout, and adjust file
419 * if the file was recently truncated, we include information about its
420 * old and new size so that the object can be updated appropriately. (we
421 * avoid synchronously deleting truncated objects because it's slow.)
423 * if @do_sync, include a 'startsync' command so that the osd will flush
426 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
427 struct ceph_file_layout *layout,
428 struct ceph_vino vino,
430 int opcode, int flags,
431 struct ceph_snap_context *snapc,
435 struct timespec *mtime,
439 struct ceph_osd_req_op ops[2];
440 struct ceph_osd_request *req;
441 unsigned int num_op = 1;
445 memset(&ops, 0, sizeof ops);
448 ops[0].extent.truncate_seq = truncate_seq;
449 ops[0].extent.truncate_size = truncate_size;
452 ops[1].op = CEPH_OSD_OP_STARTSYNC;
456 req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
459 return ERR_PTR(-ENOMEM);
460 req->r_flags = flags;
462 /* calculate max write size */
463 r = calc_layout(layout, off, plen, ops, &bno);
465 ceph_osdc_put_request(req);
469 req->r_file_layout = *layout; /* keep a copy */
471 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
472 req->r_oid_len = strlen(req->r_oid);
474 /* The alignment may differ from the natural (file) alignment */
476 req->r_num_pages = calc_pages_for(page_align, *plen);
477 req->r_page_alignment = page_align;
479 ceph_osdc_build_request(req, off, *plen, num_op, ops,
480 snapc, vino.snap, mtime);
484 EXPORT_SYMBOL(ceph_osdc_new_request);
487 * We keep osd requests in an rbtree, sorted by ->r_tid.
489 static void __insert_request(struct ceph_osd_client *osdc,
490 struct ceph_osd_request *new)
492 struct rb_node **p = &osdc->requests.rb_node;
493 struct rb_node *parent = NULL;
494 struct ceph_osd_request *req = NULL;
498 req = rb_entry(parent, struct ceph_osd_request, r_node);
499 if (new->r_tid < req->r_tid)
501 else if (new->r_tid > req->r_tid)
507 rb_link_node(&new->r_node, parent, p);
508 rb_insert_color(&new->r_node, &osdc->requests);
511 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
514 struct ceph_osd_request *req;
515 struct rb_node *n = osdc->requests.rb_node;
518 req = rb_entry(n, struct ceph_osd_request, r_node);
519 if (tid < req->r_tid)
521 else if (tid > req->r_tid)
529 static struct ceph_osd_request *
530 __lookup_request_ge(struct ceph_osd_client *osdc,
533 struct ceph_osd_request *req;
534 struct rb_node *n = osdc->requests.rb_node;
537 req = rb_entry(n, struct ceph_osd_request, r_node);
538 if (tid < req->r_tid) {
542 } else if (tid > req->r_tid) {
552 * Resubmit requests pending on the given osd.
554 static void __kick_osd_requests(struct ceph_osd_client *osdc,
555 struct ceph_osd *osd)
557 struct ceph_osd_request *req, *nreq;
560 dout("__kick_osd_requests osd%d\n", osd->o_osd);
561 err = __reset_osd(osdc, osd);
565 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
566 list_move(&req->r_req_lru_item, &osdc->req_unsent);
567 dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
570 req->r_flags |= CEPH_OSD_FLAG_RETRY;
573 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
576 * reregister request prior to unregistering linger so
577 * that r_osd is preserved.
579 BUG_ON(!list_empty(&req->r_req_lru_item));
580 __register_request(osdc, req);
581 list_add(&req->r_req_lru_item, &osdc->req_unsent);
582 list_add(&req->r_osd_item, &req->r_osd->o_requests);
583 __unregister_linger_request(osdc, req);
584 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
590 * If the osd connection drops, we need to resubmit all requests.
592 static void osd_reset(struct ceph_connection *con)
594 struct ceph_osd *osd = con->private;
595 struct ceph_osd_client *osdc;
599 dout("osd_reset osd%d\n", osd->o_osd);
601 down_read(&osdc->map_sem);
602 mutex_lock(&osdc->request_mutex);
603 __kick_osd_requests(osdc, osd);
605 mutex_unlock(&osdc->request_mutex);
606 up_read(&osdc->map_sem);
610 * Track open sessions with osds.
612 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
614 struct ceph_osd *osd;
616 osd = kzalloc(sizeof(*osd), GFP_NOFS);
620 atomic_set(&osd->o_ref, 1);
623 RB_CLEAR_NODE(&osd->o_node);
624 INIT_LIST_HEAD(&osd->o_requests);
625 INIT_LIST_HEAD(&osd->o_linger_requests);
626 INIT_LIST_HEAD(&osd->o_osd_lru);
627 osd->o_incarnation = 1;
629 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
631 INIT_LIST_HEAD(&osd->o_keepalive_item);
635 static struct ceph_osd *get_osd(struct ceph_osd *osd)
637 if (atomic_inc_not_zero(&osd->o_ref)) {
638 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
639 atomic_read(&osd->o_ref));
642 dout("get_osd %p FAIL\n", osd);
647 static void put_osd(struct ceph_osd *osd)
649 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
650 atomic_read(&osd->o_ref) - 1);
651 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
652 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
654 if (ac->ops && ac->ops->destroy_authorizer)
655 ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
661 * remove an osd from our map
663 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
665 dout("__remove_osd %p\n", osd);
666 BUG_ON(!list_empty(&osd->o_requests));
667 rb_erase(&osd->o_node, &osdc->osds);
668 list_del_init(&osd->o_osd_lru);
669 ceph_con_close(&osd->o_con);
673 static void remove_all_osds(struct ceph_osd_client *osdc)
675 dout("%s %p\n", __func__, osdc);
676 mutex_lock(&osdc->request_mutex);
677 while (!RB_EMPTY_ROOT(&osdc->osds)) {
678 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
679 struct ceph_osd, o_node);
680 __remove_osd(osdc, osd);
682 mutex_unlock(&osdc->request_mutex);
685 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
686 struct ceph_osd *osd)
688 dout("__move_osd_to_lru %p\n", osd);
689 BUG_ON(!list_empty(&osd->o_osd_lru));
690 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
691 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
694 static void __remove_osd_from_lru(struct ceph_osd *osd)
696 dout("__remove_osd_from_lru %p\n", osd);
697 if (!list_empty(&osd->o_osd_lru))
698 list_del_init(&osd->o_osd_lru);
701 static void remove_old_osds(struct ceph_osd_client *osdc)
703 struct ceph_osd *osd, *nosd;
705 dout("__remove_old_osds %p\n", osdc);
706 mutex_lock(&osdc->request_mutex);
707 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
708 if (time_before(jiffies, osd->lru_ttl))
710 __remove_osd(osdc, osd);
712 mutex_unlock(&osdc->request_mutex);
718 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
720 struct ceph_entity_addr *peer_addr;
722 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
723 if (list_empty(&osd->o_requests) &&
724 list_empty(&osd->o_linger_requests)) {
725 __remove_osd(osdc, osd);
730 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
731 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
732 !ceph_con_opened(&osd->o_con)) {
733 struct ceph_osd_request *req;
735 dout(" osd addr hasn't changed and connection never opened,"
736 " letting msgr retry");
737 /* touch each r_stamp for handle_timeout()'s benfit */
738 list_for_each_entry(req, &osd->o_requests, r_osd_item)
739 req->r_stamp = jiffies;
744 ceph_con_close(&osd->o_con);
745 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
746 osd->o_incarnation++;
751 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
753 struct rb_node **p = &osdc->osds.rb_node;
754 struct rb_node *parent = NULL;
755 struct ceph_osd *osd = NULL;
757 dout("__insert_osd %p osd%d\n", new, new->o_osd);
760 osd = rb_entry(parent, struct ceph_osd, o_node);
761 if (new->o_osd < osd->o_osd)
763 else if (new->o_osd > osd->o_osd)
769 rb_link_node(&new->o_node, parent, p);
770 rb_insert_color(&new->o_node, &osdc->osds);
773 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
775 struct ceph_osd *osd;
776 struct rb_node *n = osdc->osds.rb_node;
779 osd = rb_entry(n, struct ceph_osd, o_node);
782 else if (o > osd->o_osd)
790 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
792 schedule_delayed_work(&osdc->timeout_work,
793 osdc->client->options->osd_keepalive_timeout * HZ);
796 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
798 cancel_delayed_work(&osdc->timeout_work);
802 * Register request, assign tid. If this is the first request, set up
805 static void __register_request(struct ceph_osd_client *osdc,
806 struct ceph_osd_request *req)
808 req->r_tid = ++osdc->last_tid;
809 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
810 dout("__register_request %p tid %lld\n", req, req->r_tid);
811 __insert_request(osdc, req);
812 ceph_osdc_get_request(req);
813 osdc->num_requests++;
814 if (osdc->num_requests == 1) {
815 dout(" first request, scheduling timeout\n");
816 __schedule_osd_timeout(osdc);
820 static void register_request(struct ceph_osd_client *osdc,
821 struct ceph_osd_request *req)
823 mutex_lock(&osdc->request_mutex);
824 __register_request(osdc, req);
825 mutex_unlock(&osdc->request_mutex);
829 * called under osdc->request_mutex
831 static void __unregister_request(struct ceph_osd_client *osdc,
832 struct ceph_osd_request *req)
834 if (RB_EMPTY_NODE(&req->r_node)) {
835 dout("__unregister_request %p tid %lld not registered\n",
840 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
841 rb_erase(&req->r_node, &osdc->requests);
842 osdc->num_requests--;
845 /* make sure the original request isn't in flight. */
846 ceph_msg_revoke(req->r_request);
848 list_del_init(&req->r_osd_item);
849 if (list_empty(&req->r_osd->o_requests) &&
850 list_empty(&req->r_osd->o_linger_requests)) {
851 dout("moving osd to %p lru\n", req->r_osd);
852 __move_osd_to_lru(osdc, req->r_osd);
854 if (list_empty(&req->r_linger_item))
858 list_del_init(&req->r_req_lru_item);
859 ceph_osdc_put_request(req);
861 if (osdc->num_requests == 0) {
862 dout(" no requests, canceling timeout\n");
863 __cancel_osd_timeout(osdc);
868 * Cancel a previously queued request message
870 static void __cancel_request(struct ceph_osd_request *req)
872 if (req->r_sent && req->r_osd) {
873 ceph_msg_revoke(req->r_request);
878 static void __register_linger_request(struct ceph_osd_client *osdc,
879 struct ceph_osd_request *req)
881 dout("__register_linger_request %p\n", req);
882 list_add_tail(&req->r_linger_item, &osdc->req_linger);
884 list_add_tail(&req->r_linger_osd,
885 &req->r_osd->o_linger_requests);
888 static void __unregister_linger_request(struct ceph_osd_client *osdc,
889 struct ceph_osd_request *req)
891 dout("__unregister_linger_request %p\n", req);
892 list_del_init(&req->r_linger_item);
894 list_del_init(&req->r_linger_osd);
896 if (list_empty(&req->r_osd->o_requests) &&
897 list_empty(&req->r_osd->o_linger_requests)) {
898 dout("moving osd to %p lru\n", req->r_osd);
899 __move_osd_to_lru(osdc, req->r_osd);
901 if (list_empty(&req->r_osd_item))
906 void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
907 struct ceph_osd_request *req)
909 mutex_lock(&osdc->request_mutex);
911 __unregister_linger_request(osdc, req);
912 ceph_osdc_put_request(req);
914 mutex_unlock(&osdc->request_mutex);
916 EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
918 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
919 struct ceph_osd_request *req)
921 if (!req->r_linger) {
922 dout("set_request_linger %p\n", req);
925 * caller is now responsible for calling
926 * unregister_linger_request
928 ceph_osdc_get_request(req);
931 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
934 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
935 * (as needed), and set the request r_osd appropriately. If there is
936 * no up osd, set r_osd to NULL. Move the request to the appropriate list
937 * (unsent, homeless) or leave on in-flight lru.
939 * Return 0 if unchanged, 1 if changed, or negative on error.
941 * Caller should hold map_sem for read and request_mutex.
943 static int __map_request(struct ceph_osd_client *osdc,
944 struct ceph_osd_request *req, int force_resend)
947 int acting[CEPH_PG_MAX_SIZE];
951 dout("map_request %p tid %lld\n", req, req->r_tid);
952 err = ceph_calc_object_layout(&pgid, req->r_oid,
953 &req->r_file_layout, osdc->osdmap);
955 list_move(&req->r_req_lru_item, &osdc->req_notarget);
960 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
966 if ((!force_resend &&
967 req->r_osd && req->r_osd->o_osd == o &&
968 req->r_sent >= req->r_osd->o_incarnation &&
969 req->r_num_pg_osds == num &&
970 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
971 (req->r_osd == NULL && o == -1))
972 return 0; /* no change */
974 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
975 req->r_tid, pgid.pool, pgid.seed, o,
976 req->r_osd ? req->r_osd->o_osd : -1);
978 /* record full pg acting set */
979 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
980 req->r_num_pg_osds = num;
983 __cancel_request(req);
984 list_del_init(&req->r_osd_item);
988 req->r_osd = __lookup_osd(osdc, o);
989 if (!req->r_osd && o >= 0) {
991 req->r_osd = create_osd(osdc, o);
993 list_move(&req->r_req_lru_item, &osdc->req_notarget);
997 dout("map_request osd %p is osd%d\n", req->r_osd, o);
998 __insert_osd(osdc, req->r_osd);
1000 ceph_con_open(&req->r_osd->o_con,
1001 CEPH_ENTITY_TYPE_OSD, o,
1002 &osdc->osdmap->osd_addr[o]);
1006 __remove_osd_from_lru(req->r_osd);
1007 list_add(&req->r_osd_item, &req->r_osd->o_requests);
1008 list_move(&req->r_req_lru_item, &osdc->req_unsent);
1010 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1012 err = 1; /* osd or pg changed */
1019 * caller should hold map_sem (for read) and request_mutex
1021 static void __send_request(struct ceph_osd_client *osdc,
1022 struct ceph_osd_request *req)
1026 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1027 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1028 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1030 /* fill in message content that changes each time we send it */
1031 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1032 put_unaligned_le32(req->r_flags, req->r_request_flags);
1033 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1034 p = req->r_request_pgid;
1035 ceph_encode_64(&p, req->r_pgid.pool);
1036 ceph_encode_32(&p, req->r_pgid.seed);
1037 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1038 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1039 sizeof(req->r_reassert_version));
1041 req->r_stamp = jiffies;
1042 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
1044 ceph_msg_get(req->r_request); /* send consumes a ref */
1045 ceph_con_send(&req->r_osd->o_con, req->r_request);
1046 req->r_sent = req->r_osd->o_incarnation;
1050 * Send any requests in the queue (req_unsent).
1052 static void __send_queued(struct ceph_osd_client *osdc)
1054 struct ceph_osd_request *req, *tmp;
1056 dout("__send_queued\n");
1057 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
1058 __send_request(osdc, req);
1062 * Timeout callback, called every N seconds when 1 or more osd
1063 * requests has been active for more than N seconds. When this
1064 * happens, we ping all OSDs with requests who have timed out to
1065 * ensure any communications channel reset is detected. Reset the
1066 * request timeouts another N seconds in the future as we go.
1067 * Reschedule the timeout event another N seconds in future (unless
1068 * there are no open requests).
1070 static void handle_timeout(struct work_struct *work)
1072 struct ceph_osd_client *osdc =
1073 container_of(work, struct ceph_osd_client, timeout_work.work);
1074 struct ceph_osd_request *req;
1075 struct ceph_osd *osd;
1076 unsigned long keepalive =
1077 osdc->client->options->osd_keepalive_timeout * HZ;
1078 struct list_head slow_osds;
1080 down_read(&osdc->map_sem);
1082 ceph_monc_request_next_osdmap(&osdc->client->monc);
1084 mutex_lock(&osdc->request_mutex);
1087 * ping osds that are a bit slow. this ensures that if there
1088 * is a break in the TCP connection we will notice, and reopen
1089 * a connection with that osd (from the fault callback).
1091 INIT_LIST_HEAD(&slow_osds);
1092 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
1093 if (time_before(jiffies, req->r_stamp + keepalive))
1098 dout(" tid %llu is slow, will send keepalive on osd%d\n",
1099 req->r_tid, osd->o_osd);
1100 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1102 while (!list_empty(&slow_osds)) {
1103 osd = list_entry(slow_osds.next, struct ceph_osd,
1105 list_del_init(&osd->o_keepalive_item);
1106 ceph_con_keepalive(&osd->o_con);
1109 __schedule_osd_timeout(osdc);
1110 __send_queued(osdc);
1111 mutex_unlock(&osdc->request_mutex);
1112 up_read(&osdc->map_sem);
1115 static void handle_osds_timeout(struct work_struct *work)
1117 struct ceph_osd_client *osdc =
1118 container_of(work, struct ceph_osd_client,
1119 osds_timeout_work.work);
1120 unsigned long delay =
1121 osdc->client->options->osd_idle_ttl * HZ >> 2;
1123 dout("osds timeout\n");
1124 down_read(&osdc->map_sem);
1125 remove_old_osds(osdc);
1126 up_read(&osdc->map_sem);
1128 schedule_delayed_work(&osdc->osds_timeout_work,
1129 round_jiffies_relative(delay));
1132 static void complete_request(struct ceph_osd_request *req)
1134 if (req->r_safe_callback)
1135 req->r_safe_callback(req, NULL);
1136 complete_all(&req->r_safe_completion); /* fsync waiter */
1139 static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid)
1143 ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad);
1144 v = ceph_decode_8(p);
1146 pr_warning("do not understand pg encoding %d > 1", v);
1149 pgid->pool = ceph_decode_64(p);
1150 pgid->seed = ceph_decode_32(p);
1155 pr_warning("incomplete pg encoding");
1160 * handle osd op reply. either call the callback if it is specified,
1161 * or do the completion to wake up the waiting thread.
1163 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1164 struct ceph_connection *con)
1167 struct ceph_osd_request *req;
1170 int numops, payload_len, flags;
1176 u64 reassert_version;
1180 tid = le64_to_cpu(msg->hdr.tid);
1181 dout("handle_reply %p tid %llu\n", msg, tid);
1183 p = msg->front.iov_base;
1184 end = p + msg->front.iov_len;
1186 ceph_decode_need(&p, end, 4, bad);
1187 object_len = ceph_decode_32(&p);
1188 ceph_decode_need(&p, end, object_len, bad);
1191 err = __decode_pgid(&p, end, &pg);
1195 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1196 flags = ceph_decode_64(&p);
1197 result = ceph_decode_32(&p);
1198 reassert_epoch = ceph_decode_32(&p);
1199 reassert_version = ceph_decode_64(&p);
1200 osdmap_epoch = ceph_decode_32(&p);
1203 mutex_lock(&osdc->request_mutex);
1204 req = __lookup_request(osdc, tid);
1206 dout("handle_reply tid %llu dne\n", tid);
1207 mutex_unlock(&osdc->request_mutex);
1210 ceph_osdc_get_request(req);
1212 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1215 ceph_decode_need(&p, end, 4, bad);
1216 numops = ceph_decode_32(&p);
1217 if (numops > CEPH_OSD_MAX_OP)
1219 if (numops != req->r_num_ops)
1222 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1223 for (i = 0; i < numops; i++) {
1224 struct ceph_osd_op *op = p;
1227 len = le32_to_cpu(op->payload_len);
1228 req->r_reply_op_len[i] = len;
1229 dout(" op %d has %d bytes\n", i, len);
1233 if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
1234 pr_warning("sum of op payload lens %d != data_len %d",
1235 payload_len, le32_to_cpu(msg->hdr.data_len));
1239 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1240 retry_attempt = ceph_decode_32(&p);
1241 for (i = 0; i < numops; i++)
1242 req->r_reply_op_result[i] = ceph_decode_32(&p);
1245 * if this connection filled our message, drop our reference now, to
1246 * avoid a (safe but slower) revoke later.
1248 if (req->r_con_filling_msg == con && req->r_reply == msg) {
1249 dout(" dropping con_filling_msg ref %p\n", con);
1250 req->r_con_filling_msg = NULL;
1254 if (!req->r_got_reply) {
1257 req->r_result = result;
1258 bytes = le32_to_cpu(msg->hdr.data_len);
1259 dout("handle_reply result %d bytes %d\n", req->r_result,
1261 if (req->r_result == 0)
1262 req->r_result = bytes;
1264 /* in case this is a write and we need to replay, */
1265 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1266 req->r_reassert_version.version = cpu_to_le64(reassert_version);
1268 req->r_got_reply = 1;
1269 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1270 dout("handle_reply tid %llu dup ack\n", tid);
1271 mutex_unlock(&osdc->request_mutex);
1275 dout("handle_reply tid %llu flags %d\n", tid, flags);
1277 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1278 __register_linger_request(osdc, req);
1280 /* either this is a read, or we got the safe response */
1282 (flags & CEPH_OSD_FLAG_ONDISK) ||
1283 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1284 __unregister_request(osdc, req);
1286 mutex_unlock(&osdc->request_mutex);
1288 if (req->r_callback)
1289 req->r_callback(req, msg);
1291 complete_all(&req->r_completion);
1293 if (flags & CEPH_OSD_FLAG_ONDISK)
1294 complete_request(req);
1297 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1298 ceph_osdc_put_request(req);
1302 ceph_osdc_put_request(req);
1304 pr_err("corrupt osd_op_reply got %d %d\n",
1305 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
1309 static void reset_changed_osds(struct ceph_osd_client *osdc)
1311 struct rb_node *p, *n;
1313 for (p = rb_first(&osdc->osds); p; p = n) {
1314 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1317 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1318 memcmp(&osd->o_con.peer_addr,
1319 ceph_osd_addr(osdc->osdmap,
1321 sizeof(struct ceph_entity_addr)) != 0)
1322 __reset_osd(osdc, osd);
1327 * Requeue requests whose mapping to an OSD has changed. If requests map to
1328 * no osd, request a new map.
1330 * Caller should hold map_sem for read.
1332 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1334 struct ceph_osd_request *req, *nreq;
1339 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1340 mutex_lock(&osdc->request_mutex);
1341 for (p = rb_first(&osdc->requests); p; ) {
1342 req = rb_entry(p, struct ceph_osd_request, r_node);
1346 * For linger requests that have not yet been
1347 * registered, move them to the linger list; they'll
1348 * be sent to the osd in the loop below. Unregister
1349 * the request before re-registering it as a linger
1350 * request to ensure the __map_request() below
1351 * will decide it needs to be sent.
1353 if (req->r_linger && list_empty(&req->r_linger_item)) {
1354 dout("%p tid %llu restart on osd%d\n",
1356 req->r_osd ? req->r_osd->o_osd : -1);
1357 __unregister_request(osdc, req);
1358 __register_linger_request(osdc, req);
1362 err = __map_request(osdc, req, force_resend);
1364 continue; /* error */
1365 if (req->r_osd == NULL) {
1366 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1367 needmap++; /* request a newer map */
1368 } else if (err > 0) {
1369 if (!req->r_linger) {
1370 dout("%p tid %llu requeued on osd%d\n", req,
1372 req->r_osd ? req->r_osd->o_osd : -1);
1373 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1378 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1380 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1382 err = __map_request(osdc, req, force_resend);
1383 dout("__map_request returned %d\n", err);
1385 continue; /* no change and no osd was specified */
1387 continue; /* hrm! */
1388 if (req->r_osd == NULL) {
1389 dout("tid %llu maps to no valid osd\n", req->r_tid);
1390 needmap++; /* request a newer map */
1394 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1395 req->r_osd ? req->r_osd->o_osd : -1);
1396 __register_request(osdc, req);
1397 __unregister_linger_request(osdc, req);
1399 mutex_unlock(&osdc->request_mutex);
1402 dout("%d requests for down osds, need new map\n", needmap);
1403 ceph_monc_request_next_osdmap(&osdc->client->monc);
1405 reset_changed_osds(osdc);
1410 * Process updated osd map.
1412 * The message contains any number of incremental and full maps, normally
1413 * indicating some sort of topology change in the cluster. Kick requests
1414 * off to different OSDs as needed.
1416 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1418 void *p, *end, *next;
1419 u32 nr_maps, maplen;
1421 struct ceph_osdmap *newmap = NULL, *oldmap;
1423 struct ceph_fsid fsid;
1425 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1426 p = msg->front.iov_base;
1427 end = p + msg->front.iov_len;
1430 ceph_decode_need(&p, end, sizeof(fsid), bad);
1431 ceph_decode_copy(&p, &fsid, sizeof(fsid));
1432 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1435 down_write(&osdc->map_sem);
1437 /* incremental maps */
1438 ceph_decode_32_safe(&p, end, nr_maps, bad);
1439 dout(" %d inc maps\n", nr_maps);
1440 while (nr_maps > 0) {
1441 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1442 epoch = ceph_decode_32(&p);
1443 maplen = ceph_decode_32(&p);
1444 ceph_decode_need(&p, end, maplen, bad);
1446 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1447 dout("applying incremental map %u len %d\n",
1449 newmap = osdmap_apply_incremental(&p, next,
1451 &osdc->client->msgr);
1452 if (IS_ERR(newmap)) {
1453 err = PTR_ERR(newmap);
1457 if (newmap != osdc->osdmap) {
1458 ceph_osdmap_destroy(osdc->osdmap);
1459 osdc->osdmap = newmap;
1461 kick_requests(osdc, 0);
1463 dout("ignoring incremental map %u len %d\n",
1473 ceph_decode_32_safe(&p, end, nr_maps, bad);
1474 dout(" %d full maps\n", nr_maps);
1476 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1477 epoch = ceph_decode_32(&p);
1478 maplen = ceph_decode_32(&p);
1479 ceph_decode_need(&p, end, maplen, bad);
1481 dout("skipping non-latest full map %u len %d\n",
1483 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1484 dout("skipping full map %u len %d, "
1485 "older than our %u\n", epoch, maplen,
1486 osdc->osdmap->epoch);
1488 int skipped_map = 0;
1490 dout("taking full map %u len %d\n", epoch, maplen);
1491 newmap = osdmap_decode(&p, p+maplen);
1492 if (IS_ERR(newmap)) {
1493 err = PTR_ERR(newmap);
1497 oldmap = osdc->osdmap;
1498 osdc->osdmap = newmap;
1500 if (oldmap->epoch + 1 < newmap->epoch)
1502 ceph_osdmap_destroy(oldmap);
1504 kick_requests(osdc, skipped_map);
1511 downgrade_write(&osdc->map_sem);
1512 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1515 * subscribe to subsequent osdmap updates if full to ensure
1516 * we find out when we are no longer full and stop returning
1519 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1520 ceph_monc_request_next_osdmap(&osdc->client->monc);
1522 mutex_lock(&osdc->request_mutex);
1523 __send_queued(osdc);
1524 mutex_unlock(&osdc->request_mutex);
1525 up_read(&osdc->map_sem);
1526 wake_up_all(&osdc->client->auth_wq);
1530 pr_err("osdc handle_map corrupt msg\n");
1532 up_write(&osdc->map_sem);
1537 * watch/notify callback event infrastructure
1539 * These callbacks are used both for watch and notify operations.
1541 static void __release_event(struct kref *kref)
1543 struct ceph_osd_event *event =
1544 container_of(kref, struct ceph_osd_event, kref);
1546 dout("__release_event %p\n", event);
1550 static void get_event(struct ceph_osd_event *event)
1552 kref_get(&event->kref);
1555 void ceph_osdc_put_event(struct ceph_osd_event *event)
1557 kref_put(&event->kref, __release_event);
1559 EXPORT_SYMBOL(ceph_osdc_put_event);
1561 static void __insert_event(struct ceph_osd_client *osdc,
1562 struct ceph_osd_event *new)
1564 struct rb_node **p = &osdc->event_tree.rb_node;
1565 struct rb_node *parent = NULL;
1566 struct ceph_osd_event *event = NULL;
1570 event = rb_entry(parent, struct ceph_osd_event, node);
1571 if (new->cookie < event->cookie)
1573 else if (new->cookie > event->cookie)
1574 p = &(*p)->rb_right;
1579 rb_link_node(&new->node, parent, p);
1580 rb_insert_color(&new->node, &osdc->event_tree);
1583 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1586 struct rb_node **p = &osdc->event_tree.rb_node;
1587 struct rb_node *parent = NULL;
1588 struct ceph_osd_event *event = NULL;
1592 event = rb_entry(parent, struct ceph_osd_event, node);
1593 if (cookie < event->cookie)
1595 else if (cookie > event->cookie)
1596 p = &(*p)->rb_right;
1603 static void __remove_event(struct ceph_osd_event *event)
1605 struct ceph_osd_client *osdc = event->osdc;
1607 if (!RB_EMPTY_NODE(&event->node)) {
1608 dout("__remove_event removed %p\n", event);
1609 rb_erase(&event->node, &osdc->event_tree);
1610 ceph_osdc_put_event(event);
1612 dout("__remove_event didn't remove %p\n", event);
1616 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1617 void (*event_cb)(u64, u64, u8, void *),
1618 void *data, struct ceph_osd_event **pevent)
1620 struct ceph_osd_event *event;
1622 event = kmalloc(sizeof(*event), GFP_NOIO);
1626 dout("create_event %p\n", event);
1627 event->cb = event_cb;
1628 event->one_shot = 0;
1631 INIT_LIST_HEAD(&event->osd_node);
1632 RB_CLEAR_NODE(&event->node);
1633 kref_init(&event->kref); /* one ref for us */
1634 kref_get(&event->kref); /* one ref for the caller */
1636 spin_lock(&osdc->event_lock);
1637 event->cookie = ++osdc->event_count;
1638 __insert_event(osdc, event);
1639 spin_unlock(&osdc->event_lock);
1644 EXPORT_SYMBOL(ceph_osdc_create_event);
1646 void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1648 struct ceph_osd_client *osdc = event->osdc;
1650 dout("cancel_event %p\n", event);
1651 spin_lock(&osdc->event_lock);
1652 __remove_event(event);
1653 spin_unlock(&osdc->event_lock);
1654 ceph_osdc_put_event(event); /* caller's */
1656 EXPORT_SYMBOL(ceph_osdc_cancel_event);
1659 static void do_event_work(struct work_struct *work)
1661 struct ceph_osd_event_work *event_work =
1662 container_of(work, struct ceph_osd_event_work, work);
1663 struct ceph_osd_event *event = event_work->event;
1664 u64 ver = event_work->ver;
1665 u64 notify_id = event_work->notify_id;
1666 u8 opcode = event_work->opcode;
1668 dout("do_event_work completing %p\n", event);
1669 event->cb(ver, notify_id, opcode, event->data);
1670 dout("do_event_work completed %p\n", event);
1671 ceph_osdc_put_event(event);
1677 * Process osd watch notifications
1679 static void handle_watch_notify(struct ceph_osd_client *osdc,
1680 struct ceph_msg *msg)
1684 u64 cookie, ver, notify_id;
1686 struct ceph_osd_event *event;
1687 struct ceph_osd_event_work *event_work;
1689 p = msg->front.iov_base;
1690 end = p + msg->front.iov_len;
1692 ceph_decode_8_safe(&p, end, proto_ver, bad);
1693 ceph_decode_8_safe(&p, end, opcode, bad);
1694 ceph_decode_64_safe(&p, end, cookie, bad);
1695 ceph_decode_64_safe(&p, end, ver, bad);
1696 ceph_decode_64_safe(&p, end, notify_id, bad);
1698 spin_lock(&osdc->event_lock);
1699 event = __find_event(osdc, cookie);
1701 BUG_ON(event->one_shot);
1704 spin_unlock(&osdc->event_lock);
1705 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1706 cookie, ver, event);
1708 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
1710 dout("ERROR: could not allocate event_work\n");
1713 INIT_WORK(&event_work->work, do_event_work);
1714 event_work->event = event;
1715 event_work->ver = ver;
1716 event_work->notify_id = notify_id;
1717 event_work->opcode = opcode;
1718 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1719 dout("WARNING: failed to queue notify event work\n");
1727 ceph_osdc_put_event(event);
1731 pr_err("osdc handle_watch_notify corrupt msg\n");
1736 * Register request, send initial attempt.
1738 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1739 struct ceph_osd_request *req,
1744 req->r_request->pages = req->r_pages;
1745 req->r_request->page_count = req->r_num_pages;
1747 req->r_request->bio = req->r_bio;
1749 req->r_request->trail = &req->r_trail;
1751 register_request(osdc, req);
1753 down_read(&osdc->map_sem);
1754 mutex_lock(&osdc->request_mutex);
1756 * a racing kick_requests() may have sent the message for us
1757 * while we dropped request_mutex above, so only send now if
1758 * the request still han't been touched yet.
1760 if (req->r_sent == 0) {
1761 rc = __map_request(osdc, req, 0);
1764 dout("osdc_start_request failed map, "
1765 " will retry %lld\n", req->r_tid);
1770 if (req->r_osd == NULL) {
1771 dout("send_request %p no up osds in pg\n", req);
1772 ceph_monc_request_next_osdmap(&osdc->client->monc);
1774 __send_request(osdc, req);
1780 mutex_unlock(&osdc->request_mutex);
1781 up_read(&osdc->map_sem);
1784 EXPORT_SYMBOL(ceph_osdc_start_request);
1787 * wait for a request to complete
1789 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1790 struct ceph_osd_request *req)
1794 rc = wait_for_completion_interruptible(&req->r_completion);
1796 mutex_lock(&osdc->request_mutex);
1797 __cancel_request(req);
1798 __unregister_request(osdc, req);
1799 mutex_unlock(&osdc->request_mutex);
1800 complete_request(req);
1801 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1805 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1806 return req->r_result;
1808 EXPORT_SYMBOL(ceph_osdc_wait_request);
1811 * sync - wait for all in-flight requests to flush. avoid starvation.
1813 void ceph_osdc_sync(struct ceph_osd_client *osdc)
1815 struct ceph_osd_request *req;
1816 u64 last_tid, next_tid = 0;
1818 mutex_lock(&osdc->request_mutex);
1819 last_tid = osdc->last_tid;
1821 req = __lookup_request_ge(osdc, next_tid);
1824 if (req->r_tid > last_tid)
1827 next_tid = req->r_tid + 1;
1828 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1831 ceph_osdc_get_request(req);
1832 mutex_unlock(&osdc->request_mutex);
1833 dout("sync waiting on tid %llu (last is %llu)\n",
1834 req->r_tid, last_tid);
1835 wait_for_completion(&req->r_safe_completion);
1836 mutex_lock(&osdc->request_mutex);
1837 ceph_osdc_put_request(req);
1839 mutex_unlock(&osdc->request_mutex);
1840 dout("sync done (thru tid %llu)\n", last_tid);
1842 EXPORT_SYMBOL(ceph_osdc_sync);
1847 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1852 osdc->client = client;
1853 osdc->osdmap = NULL;
1854 init_rwsem(&osdc->map_sem);
1855 init_completion(&osdc->map_waiters);
1856 osdc->last_requested_map = 0;
1857 mutex_init(&osdc->request_mutex);
1859 osdc->osds = RB_ROOT;
1860 INIT_LIST_HEAD(&osdc->osd_lru);
1861 osdc->requests = RB_ROOT;
1862 INIT_LIST_HEAD(&osdc->req_lru);
1863 INIT_LIST_HEAD(&osdc->req_unsent);
1864 INIT_LIST_HEAD(&osdc->req_notarget);
1865 INIT_LIST_HEAD(&osdc->req_linger);
1866 osdc->num_requests = 0;
1867 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1868 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1869 spin_lock_init(&osdc->event_lock);
1870 osdc->event_tree = RB_ROOT;
1871 osdc->event_count = 0;
1873 schedule_delayed_work(&osdc->osds_timeout_work,
1874 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
1877 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1878 sizeof(struct ceph_osd_request));
1879 if (!osdc->req_mempool)
1882 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1883 OSD_OP_FRONT_LEN, 10, true,
1887 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
1888 OSD_OPREPLY_FRONT_LEN, 10, true,
1893 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
1894 if (IS_ERR(osdc->notify_wq)) {
1895 err = PTR_ERR(osdc->notify_wq);
1896 osdc->notify_wq = NULL;
1902 ceph_msgpool_destroy(&osdc->msgpool_op);
1904 mempool_destroy(osdc->req_mempool);
1909 void ceph_osdc_stop(struct ceph_osd_client *osdc)
1911 flush_workqueue(osdc->notify_wq);
1912 destroy_workqueue(osdc->notify_wq);
1913 cancel_delayed_work_sync(&osdc->timeout_work);
1914 cancel_delayed_work_sync(&osdc->osds_timeout_work);
1916 ceph_osdmap_destroy(osdc->osdmap);
1917 osdc->osdmap = NULL;
1919 remove_all_osds(osdc);
1920 mempool_destroy(osdc->req_mempool);
1921 ceph_msgpool_destroy(&osdc->msgpool_op);
1922 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1926 * Read some contiguous pages. If we cross a stripe boundary, shorten
1927 * *plen. Return number of bytes read, or error.
1929 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1930 struct ceph_vino vino, struct ceph_file_layout *layout,
1932 u32 truncate_seq, u64 truncate_size,
1933 struct page **pages, int num_pages, int page_align)
1935 struct ceph_osd_request *req;
1938 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1939 vino.snap, off, *plen);
1940 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1941 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1942 NULL, 0, truncate_seq, truncate_size, NULL,
1945 return PTR_ERR(req);
1947 /* it may be a short read due to an object boundary */
1948 req->r_pages = pages;
1950 dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
1951 off, *plen, req->r_num_pages, page_align);
1953 rc = ceph_osdc_start_request(osdc, req, false);
1955 rc = ceph_osdc_wait_request(osdc, req);
1957 ceph_osdc_put_request(req);
1958 dout("readpages result %d\n", rc);
1961 EXPORT_SYMBOL(ceph_osdc_readpages);
1964 * do a synchronous write on N pages
1966 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1967 struct ceph_file_layout *layout,
1968 struct ceph_snap_context *snapc,
1970 u32 truncate_seq, u64 truncate_size,
1971 struct timespec *mtime,
1972 struct page **pages, int num_pages)
1974 struct ceph_osd_request *req;
1976 int page_align = off & ~PAGE_MASK;
1978 BUG_ON(vino.snap != CEPH_NOSNAP);
1979 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1981 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
1983 truncate_seq, truncate_size, mtime,
1986 return PTR_ERR(req);
1988 /* it may be a short write due to an object boundary */
1989 req->r_pages = pages;
1990 dout("writepages %llu~%llu (%d pages)\n", off, len,
1993 rc = ceph_osdc_start_request(osdc, req, true);
1995 rc = ceph_osdc_wait_request(osdc, req);
1997 ceph_osdc_put_request(req);
2000 dout("writepages result %d\n", rc);
2003 EXPORT_SYMBOL(ceph_osdc_writepages);
2006 * handle incoming message
2008 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2010 struct ceph_osd *osd = con->private;
2011 struct ceph_osd_client *osdc;
2012 int type = le16_to_cpu(msg->hdr.type);
2019 case CEPH_MSG_OSD_MAP:
2020 ceph_osdc_handle_map(osdc, msg);
2022 case CEPH_MSG_OSD_OPREPLY:
2023 handle_reply(osdc, msg, con);
2025 case CEPH_MSG_WATCH_NOTIFY:
2026 handle_watch_notify(osdc, msg);
2030 pr_err("received unknown message type %d %s\n", type,
2031 ceph_msg_type_name(type));
2038 * lookup and return message for incoming reply. set up reply message
2041 static struct ceph_msg *get_reply(struct ceph_connection *con,
2042 struct ceph_msg_header *hdr,
2045 struct ceph_osd *osd = con->private;
2046 struct ceph_osd_client *osdc = osd->o_osdc;
2048 struct ceph_osd_request *req;
2049 int front = le32_to_cpu(hdr->front_len);
2050 int data_len = le32_to_cpu(hdr->data_len);
2053 tid = le64_to_cpu(hdr->tid);
2054 mutex_lock(&osdc->request_mutex);
2055 req = __lookup_request(osdc, tid);
2059 dout("get_reply unknown tid %llu from osd%d\n", tid,
2064 if (req->r_con_filling_msg) {
2065 dout("%s revoking msg %p from old con %p\n", __func__,
2066 req->r_reply, req->r_con_filling_msg);
2067 ceph_msg_revoke_incoming(req->r_reply);
2068 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
2069 req->r_con_filling_msg = NULL;
2072 if (front > req->r_reply->front.iov_len) {
2073 pr_warning("get_reply front %d > preallocated %d\n",
2074 front, (int)req->r_reply->front.iov_len);
2075 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
2078 ceph_msg_put(req->r_reply);
2081 m = ceph_msg_get(req->r_reply);
2084 int want = calc_pages_for(req->r_page_alignment, data_len);
2086 if (req->r_pages && unlikely(req->r_num_pages < want)) {
2087 pr_warning("tid %lld reply has %d bytes %d pages, we"
2088 " had only %d pages ready\n", tid, data_len,
2089 want, req->r_num_pages);
2095 m->pages = req->r_pages;
2096 m->page_count = req->r_num_pages;
2097 m->page_alignment = req->r_page_alignment;
2099 m->bio = req->r_bio;
2103 req->r_con_filling_msg = con->ops->get(con);
2104 dout("get_reply tid %lld %p\n", tid, m);
2107 mutex_unlock(&osdc->request_mutex);
2112 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2113 struct ceph_msg_header *hdr,
2116 struct ceph_osd *osd = con->private;
2117 int type = le16_to_cpu(hdr->type);
2118 int front = le32_to_cpu(hdr->front_len);
2122 case CEPH_MSG_OSD_MAP:
2123 case CEPH_MSG_WATCH_NOTIFY:
2124 return ceph_msg_new(type, front, GFP_NOFS, false);
2125 case CEPH_MSG_OSD_OPREPLY:
2126 return get_reply(con, hdr, skip);
2128 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2136 * Wrappers to refcount containing ceph_osd struct
2138 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2140 struct ceph_osd *osd = con->private;
2146 static void put_osd_con(struct ceph_connection *con)
2148 struct ceph_osd *osd = con->private;
2156 * Note: returned pointer is the address of a structure that's
2157 * managed separately. Caller must *not* attempt to free it.
2159 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2160 int *proto, int force_new)
2162 struct ceph_osd *o = con->private;
2163 struct ceph_osd_client *osdc = o->o_osdc;
2164 struct ceph_auth_client *ac = osdc->client->monc.auth;
2165 struct ceph_auth_handshake *auth = &o->o_auth;
2167 if (force_new && auth->authorizer) {
2168 if (ac->ops && ac->ops->destroy_authorizer)
2169 ac->ops->destroy_authorizer(ac, auth->authorizer);
2170 auth->authorizer = NULL;
2172 if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
2173 int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2176 return ERR_PTR(ret);
2178 *proto = ac->protocol;
2184 static int verify_authorizer_reply(struct ceph_connection *con, int len)
2186 struct ceph_osd *o = con->private;
2187 struct ceph_osd_client *osdc = o->o_osdc;
2188 struct ceph_auth_client *ac = osdc->client->monc.auth;
2191 * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
2192 * XXX which do we do: succeed or fail?
2194 return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
2197 static int invalidate_authorizer(struct ceph_connection *con)
2199 struct ceph_osd *o = con->private;
2200 struct ceph_osd_client *osdc = o->o_osdc;
2201 struct ceph_auth_client *ac = osdc->client->monc.auth;
2203 if (ac->ops && ac->ops->invalidate_authorizer)
2204 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2206 return ceph_monc_validate_auth(&osdc->client->monc);
2209 static const struct ceph_connection_operations osd_con_ops = {
2212 .dispatch = dispatch,
2213 .get_authorizer = get_authorizer,
2214 .verify_authorizer_reply = verify_authorizer_reply,
2215 .invalidate_authorizer = invalidate_authorizer,
2216 .alloc_msg = alloc_msg,