2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_32_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
125 static int skip_name_map(void **p, void *end)
128 ceph_decode_32_safe(p, end, len ,bad);
132 ceph_decode_32_safe(p, end, strlen, bad);
140 static struct crush_map *crush_decode(void *pbyval, void *end)
146 void *start = pbyval;
150 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
152 c = kzalloc(sizeof(*c), GFP_NOFS);
154 return ERR_PTR(-ENOMEM);
156 /* set tunables to default values */
157 c->choose_local_tries = 2;
158 c->choose_local_fallback_tries = 5;
159 c->choose_total_tries = 19;
160 c->chooseleaf_descend_once = 0;
162 ceph_decode_need(p, end, 4*sizeof(u32), bad);
163 magic = ceph_decode_32(p);
164 if (magic != CRUSH_MAGIC) {
165 pr_err("crush_decode magic %x != current %x\n",
166 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
169 c->max_buckets = ceph_decode_32(p);
170 c->max_rules = ceph_decode_32(p);
171 c->max_devices = ceph_decode_32(p);
173 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
174 if (c->buckets == NULL)
176 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
177 if (c->rules == NULL)
181 for (i = 0; i < c->max_buckets; i++) {
184 struct crush_bucket *b;
186 ceph_decode_32_safe(p, end, alg, bad);
188 c->buckets[i] = NULL;
191 dout("crush_decode bucket %d off %x %p to %p\n",
192 i, (int)(*p-start), *p, end);
195 case CRUSH_BUCKET_UNIFORM:
196 size = sizeof(struct crush_bucket_uniform);
198 case CRUSH_BUCKET_LIST:
199 size = sizeof(struct crush_bucket_list);
201 case CRUSH_BUCKET_TREE:
202 size = sizeof(struct crush_bucket_tree);
204 case CRUSH_BUCKET_STRAW:
205 size = sizeof(struct crush_bucket_straw);
212 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
216 ceph_decode_need(p, end, 4*sizeof(u32), bad);
217 b->id = ceph_decode_32(p);
218 b->type = ceph_decode_16(p);
219 b->alg = ceph_decode_8(p);
220 b->hash = ceph_decode_8(p);
221 b->weight = ceph_decode_32(p);
222 b->size = ceph_decode_32(p);
224 dout("crush_decode bucket size %d off %x %p to %p\n",
225 b->size, (int)(*p-start), *p, end);
227 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
228 if (b->items == NULL)
230 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
235 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
236 for (j = 0; j < b->size; j++)
237 b->items[j] = ceph_decode_32(p);
240 case CRUSH_BUCKET_UNIFORM:
241 err = crush_decode_uniform_bucket(p, end,
242 (struct crush_bucket_uniform *)b);
246 case CRUSH_BUCKET_LIST:
247 err = crush_decode_list_bucket(p, end,
248 (struct crush_bucket_list *)b);
252 case CRUSH_BUCKET_TREE:
253 err = crush_decode_tree_bucket(p, end,
254 (struct crush_bucket_tree *)b);
258 case CRUSH_BUCKET_STRAW:
259 err = crush_decode_straw_bucket(p, end,
260 (struct crush_bucket_straw *)b);
268 dout("rule vec is %p\n", c->rules);
269 for (i = 0; i < c->max_rules; i++) {
271 struct crush_rule *r;
273 ceph_decode_32_safe(p, end, yes, bad);
275 dout("crush_decode NO rule %d off %x %p to %p\n",
276 i, (int)(*p-start), *p, end);
281 dout("crush_decode rule %d off %x %p to %p\n",
282 i, (int)(*p-start), *p, end);
285 ceph_decode_32_safe(p, end, yes, bad);
286 #if BITS_PER_LONG == 32
288 if (yes > (ULONG_MAX - sizeof(*r))
289 / sizeof(struct crush_rule_step))
292 r = c->rules[i] = kmalloc(sizeof(*r) +
293 yes*sizeof(struct crush_rule_step),
297 dout(" rule %d is at %p\n", i, r);
299 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
300 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
301 for (j = 0; j < r->len; j++) {
302 r->steps[j].op = ceph_decode_32(p);
303 r->steps[j].arg1 = ceph_decode_32(p);
304 r->steps[j].arg2 = ceph_decode_32(p);
308 /* ignore trailing name maps. */
309 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
310 err = skip_name_map(p, end);
316 ceph_decode_need(p, end, 3*sizeof(u32), done);
317 c->choose_local_tries = ceph_decode_32(p);
318 c->choose_local_fallback_tries = ceph_decode_32(p);
319 c->choose_total_tries = ceph_decode_32(p);
320 dout("crush decode tunable choose_local_tries = %d",
321 c->choose_local_tries);
322 dout("crush decode tunable choose_local_fallback_tries = %d",
323 c->choose_local_fallback_tries);
324 dout("crush decode tunable choose_total_tries = %d",
325 c->choose_total_tries);
327 ceph_decode_need(p, end, sizeof(u32), done);
328 c->chooseleaf_descend_once = ceph_decode_32(p);
329 dout("crush decode tunable chooseleaf_descend_once = %d",
330 c->chooseleaf_descend_once);
333 dout("crush_decode success\n");
339 dout("crush_decode fail %d\n", err);
345 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
348 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
361 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
362 struct rb_root *root)
364 struct rb_node **p = &root->rb_node;
365 struct rb_node *parent = NULL;
366 struct ceph_pg_mapping *pg = NULL;
369 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
372 pg = rb_entry(parent, struct ceph_pg_mapping, node);
373 c = pgid_cmp(new->pgid, pg->pgid);
382 rb_link_node(&new->node, parent, p);
383 rb_insert_color(&new->node, root);
387 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
390 struct rb_node *n = root->rb_node;
391 struct ceph_pg_mapping *pg;
395 pg = rb_entry(n, struct ceph_pg_mapping, node);
396 c = pgid_cmp(pgid, pg->pgid);
402 dout("__lookup_pg_mapping %lld.%x got %p\n",
403 pgid.pool, pgid.seed, pg);
410 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
412 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
415 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
417 rb_erase(&pg->node, root);
421 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
426 * rbtree of pg pool info
428 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
430 struct rb_node **p = &root->rb_node;
431 struct rb_node *parent = NULL;
432 struct ceph_pg_pool_info *pi = NULL;
436 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
437 if (new->id < pi->id)
439 else if (new->id > pi->id)
445 rb_link_node(&new->node, parent, p);
446 rb_insert_color(&new->node, root);
450 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
452 struct ceph_pg_pool_info *pi;
453 struct rb_node *n = root->rb_node;
456 pi = rb_entry(n, struct ceph_pg_pool_info, node);
459 else if (id > pi->id)
467 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
469 return __lookup_pg_pool(&map->pg_pools, id);
472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
474 struct ceph_pg_pool_info *pi;
476 if (id == CEPH_NOPOOL)
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
484 return pi ? pi->name : NULL;
486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
493 struct ceph_pg_pool_info *pi =
494 rb_entry(rbp, struct ceph_pg_pool_info, node);
495 if (pi->name && strcmp(pi->name, name) == 0)
500 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
504 rb_erase(&pi->node, root);
509 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
515 ceph_decode_need(p, end, 2 + 4, bad);
516 ev = ceph_decode_8(p); /* encoding version */
517 cv = ceph_decode_8(p); /* compat version */
519 pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
523 pr_warning("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
526 len = ceph_decode_32(p);
527 ceph_decode_need(p, end, len, bad);
530 pi->type = ceph_decode_8(p);
531 pi->size = ceph_decode_8(p);
532 pi->crush_ruleset = ceph_decode_8(p);
533 pi->object_hash = ceph_decode_8(p);
535 pi->pg_num = ceph_decode_32(p);
536 pi->pgp_num = ceph_decode_32(p);
538 *p += 4 + 4; /* skip lpg* */
539 *p += 4; /* skip last_change */
540 *p += 8 + 4; /* skip snap_seq, snap_epoch */
543 num = ceph_decode_32(p);
545 *p += 8; /* snapid key */
546 *p += 1 + 1; /* versions */
547 len = ceph_decode_32(p);
551 /* skip removed_snaps */
552 num = ceph_decode_32(p);
555 *p += 8; /* skip auid */
556 pi->flags = ceph_decode_64(p);
557 *p += 4; /* skip crash_replay_interval */
560 *p += 1; /* skip min_size */
563 *p += 8 + 8; /* skip quota_max_* */
567 num = ceph_decode_32(p);
570 *p += 8; /* skip tier_of */
571 *p += 1; /* skip cache_mode */
573 pi->read_tier = ceph_decode_64(p);
574 pi->write_tier = ceph_decode_64(p);
580 /* ignore the rest */
590 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
592 struct ceph_pg_pool_info *pi;
596 ceph_decode_32_safe(p, end, num, bad);
597 dout(" %d pool names\n", num);
599 ceph_decode_64_safe(p, end, pool, bad);
600 ceph_decode_32_safe(p, end, len, bad);
601 dout(" pool %llu len %d\n", pool, len);
602 ceph_decode_need(p, end, len, bad);
603 pi = __lookup_pg_pool(&map->pg_pools, pool);
605 char *name = kstrndup(*p, len, GFP_NOFS);
611 dout(" name is %s\n", pi->name);
624 void ceph_osdmap_destroy(struct ceph_osdmap *map)
626 dout("osdmap_destroy %p\n", map);
628 crush_destroy(map->crush);
629 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
630 struct ceph_pg_mapping *pg =
631 rb_entry(rb_first(&map->pg_temp),
632 struct ceph_pg_mapping, node);
633 rb_erase(&pg->node, &map->pg_temp);
636 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
637 struct ceph_pg_pool_info *pi =
638 rb_entry(rb_first(&map->pg_pools),
639 struct ceph_pg_pool_info, node);
640 __remove_pg_pool(&map->pg_pools, pi);
642 kfree(map->osd_state);
643 kfree(map->osd_weight);
644 kfree(map->osd_addr);
649 * adjust max osd value. reallocate arrays.
651 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
654 struct ceph_entity_addr *addr;
657 state = kcalloc(max, sizeof(*state), GFP_NOFS);
658 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
659 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
660 if (state == NULL || addr == NULL || weight == NULL) {
668 if (map->osd_state) {
669 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
670 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
671 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
672 kfree(map->osd_state);
673 kfree(map->osd_addr);
674 kfree(map->osd_weight);
677 map->osd_state = state;
678 map->osd_weight = weight;
679 map->osd_addr = addr;
687 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
694 struct ceph_pg_pool_info *pi;
696 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
698 ceph_decode_16_safe(p, end, version, e_inval);
700 pr_warning("got unknown v %d > 6 of osdmap\n", version);
704 pr_warning("got old v %d < 6 of osdmap\n", version);
708 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), e_inval);
709 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
710 epoch = map->epoch = ceph_decode_32(p);
711 ceph_decode_copy(p, &map->created, sizeof(map->created));
712 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
714 ceph_decode_32_safe(p, end, max, e_inval);
716 ceph_decode_need(p, end, 8 + 2, e_inval);
717 pi = kzalloc(sizeof(*pi), GFP_NOFS);
722 pi->id = ceph_decode_64(p);
723 err = __decode_pool(p, end, pi);
728 __insert_pg_pool(&map->pg_pools, pi);
731 err = __decode_pool_names(p, end, map);
735 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
737 ceph_decode_32_safe(p, end, map->flags, e_inval);
739 max = ceph_decode_32(p);
741 /* (re)alloc osd arrays */
742 err = osdmap_set_max_osd(map, max);
747 ceph_decode_need(p, end, 3*sizeof(u32) +
748 map->max_osd*(1 + sizeof(*map->osd_weight) +
749 sizeof(*map->osd_addr)), e_inval);
751 *p += 4; /* skip length field (should match max) */
752 ceph_decode_copy(p, map->osd_state, map->max_osd);
754 *p += 4; /* skip length field (should match max) */
755 for (i = 0; i < map->max_osd; i++)
756 map->osd_weight[i] = ceph_decode_32(p);
758 *p += 4; /* skip length field (should match max) */
759 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
760 for (i = 0; i < map->max_osd; i++)
761 ceph_decode_addr(&map->osd_addr[i]);
764 ceph_decode_32_safe(p, end, len, e_inval);
765 for (i = 0; i < len; i++) {
768 struct ceph_pg_mapping *pg;
770 err = ceph_decode_pgid(p, end, &pgid);
773 ceph_decode_need(p, end, sizeof(u32), e_inval);
774 n = ceph_decode_32(p);
775 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
777 ceph_decode_need(p, end, n * sizeof(u32), e_inval);
778 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
785 for (j = 0; j < n; j++)
786 pg->osds[j] = ceph_decode_32(p);
788 err = __insert_pg_mapping(pg, &map->pg_temp);
791 dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed,
796 ceph_decode_32_safe(p, end, len, e_inval);
797 dout("osdmap_decode crush len %d from off 0x%x\n", len,
799 ceph_decode_need(p, end, len, e_inval);
800 map->crush = crush_decode(*p, end);
802 if (IS_ERR(map->crush)) {
803 err = PTR_ERR(map->crush);
808 /* ignore the rest */
811 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
817 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
818 err, epoch, (int)(*p - start), *p, start, end);
819 print_hex_dump(KERN_DEBUG, "osdmap: ",
820 DUMP_PREFIX_OFFSET, 16, 1,
821 start, end - start, true);
826 * Allocate and decode a full map.
828 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
830 struct ceph_osdmap *map;
833 map = kzalloc(sizeof(*map), GFP_NOFS);
835 return ERR_PTR(-ENOMEM);
837 map->pg_temp = RB_ROOT;
838 mutex_init(&map->crush_scratch_mutex);
840 ret = osdmap_decode(p, end, map);
842 ceph_osdmap_destroy(map);
850 * decode and apply an incremental map update.
852 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
853 struct ceph_osdmap *map,
854 struct ceph_messenger *msgr)
856 struct crush_map *newcrush = NULL;
857 struct ceph_fsid fsid;
859 struct ceph_timespec modified;
863 __s32 new_flags, max;
868 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
870 ceph_decode_16_safe(p, end, version, bad);
872 pr_warning("got unknown v %d != 6 of inc osdmap\n", version);
876 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
878 ceph_decode_copy(p, &fsid, sizeof(fsid));
879 epoch = ceph_decode_32(p);
880 BUG_ON(epoch != map->epoch+1);
881 ceph_decode_copy(p, &modified, sizeof(modified));
882 new_pool_max = ceph_decode_64(p);
883 new_flags = ceph_decode_32(p);
886 ceph_decode_32_safe(p, end, len, bad);
888 dout("apply_incremental full map len %d, %p to %p\n",
890 return ceph_osdmap_decode(p, min(*p+len, end));
894 ceph_decode_32_safe(p, end, len, bad);
896 dout("apply_incremental new crush map len %d, %p to %p\n",
898 newcrush = crush_decode(*p, min(*p+len, end));
899 if (IS_ERR(newcrush))
900 return ERR_CAST(newcrush);
906 map->flags = new_flags;
907 if (new_pool_max >= 0)
908 map->pool_max = new_pool_max;
910 ceph_decode_need(p, end, 5*sizeof(u32), bad);
913 max = ceph_decode_32(p);
915 err = osdmap_set_max_osd(map, max);
921 map->modified = modified;
924 crush_destroy(map->crush);
925 map->crush = newcrush;
930 ceph_decode_32_safe(p, end, len, bad);
932 struct ceph_pg_pool_info *pi;
934 ceph_decode_64_safe(p, end, pool, bad);
935 pi = __lookup_pg_pool(&map->pg_pools, pool);
937 pi = kzalloc(sizeof(*pi), GFP_NOFS);
943 __insert_pg_pool(&map->pg_pools, pi);
945 err = __decode_pool(p, end, pi);
950 err = __decode_pool_names(p, end, map);
956 ceph_decode_32_safe(p, end, len, bad);
958 struct ceph_pg_pool_info *pi;
960 ceph_decode_64_safe(p, end, pool, bad);
961 pi = __lookup_pg_pool(&map->pg_pools, pool);
963 __remove_pg_pool(&map->pg_pools, pi);
968 ceph_decode_32_safe(p, end, len, bad);
971 struct ceph_entity_addr addr;
972 ceph_decode_32_safe(p, end, osd, bad);
973 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
974 ceph_decode_addr(&addr);
975 pr_info("osd%d up\n", osd);
976 BUG_ON(osd >= map->max_osd);
977 map->osd_state[osd] |= CEPH_OSD_UP;
978 map->osd_addr[osd] = addr;
982 ceph_decode_32_safe(p, end, len, bad);
986 ceph_decode_32_safe(p, end, osd, bad);
987 xorstate = **(u8 **)p;
988 (*p)++; /* clean flag */
990 xorstate = CEPH_OSD_UP;
991 if (xorstate & CEPH_OSD_UP)
992 pr_info("osd%d down\n", osd);
993 if (osd < map->max_osd)
994 map->osd_state[osd] ^= xorstate;
998 ceph_decode_32_safe(p, end, len, bad);
1001 ceph_decode_need(p, end, sizeof(u32)*2, bad);
1002 osd = ceph_decode_32(p);
1003 off = ceph_decode_32(p);
1004 pr_info("osd%d weight 0x%x %s\n", osd, off,
1005 off == CEPH_OSD_IN ? "(in)" :
1006 (off == CEPH_OSD_OUT ? "(out)" : ""));
1007 if (osd < map->max_osd)
1008 map->osd_weight[osd] = off;
1012 ceph_decode_32_safe(p, end, len, bad);
1014 struct ceph_pg_mapping *pg;
1016 struct ceph_pg pgid;
1019 err = ceph_decode_pgid(p, end, &pgid);
1022 ceph_decode_need(p, end, sizeof(u32), bad);
1023 pglen = ceph_decode_32(p);
1025 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
1027 /* removing existing (if any) */
1028 (void) __remove_pg_mapping(&map->pg_temp, pgid);
1032 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
1035 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
1040 for (j = 0; j < pglen; j++)
1041 pg->osds[j] = ceph_decode_32(p);
1042 err = __insert_pg_mapping(pg, &map->pg_temp);
1047 dout(" added pg_temp %lld.%x len %d\n", pgid.pool,
1051 __remove_pg_mapping(&map->pg_temp, pgid);
1055 /* ignore the rest */
1058 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1062 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1063 err, epoch, (int)(*p - start), *p, start, end);
1064 print_hex_dump(KERN_DEBUG, "osdmap: ",
1065 DUMP_PREFIX_OFFSET, 16, 1,
1066 start, end - start, true);
1068 crush_destroy(newcrush);
1069 return ERR_PTR(err);
1076 * calculate file layout from given offset, length.
1077 * fill in correct oid, logical length, and object extent
1080 * for now, we write only a single su, until we can
1081 * pass a stride back to the caller.
1083 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
1086 u64 *oxoff, u64 *oxlen)
1088 u32 osize = le32_to_cpu(layout->fl_object_size);
1089 u32 su = le32_to_cpu(layout->fl_stripe_unit);
1090 u32 sc = le32_to_cpu(layout->fl_stripe_count);
1091 u32 bl, stripeno, stripepos, objsetno;
1095 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
1097 if (su == 0 || sc == 0)
1099 su_per_object = osize / su;
1100 if (su_per_object == 0)
1102 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1105 if ((su & ~PAGE_MASK) != 0)
1108 /* bl = *off / su; */
1112 dout("off %llu / su %u = bl %u\n", off, su, bl);
1115 stripepos = bl % sc;
1116 objsetno = stripeno / su_per_object;
1118 *ono = objsetno * sc + stripepos;
1119 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1121 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1123 su_offset = do_div(t, su);
1124 *oxoff = su_offset + (stripeno % su_per_object) * su;
1127 * Calculate the length of the extent being written to the selected
1128 * object. This is the minimum of the full length requested (len) or
1129 * the remainder of the current stripe being written to.
1131 *oxlen = min_t(u64, len, su - su_offset);
1133 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1137 dout(" invalid layout\n");
1143 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1146 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be
1147 * called with target's (oloc, oid), since tiering isn't taken into
1150 int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
1151 struct ceph_object_locator *oloc,
1152 struct ceph_object_id *oid,
1153 struct ceph_pg *pg_out)
1155 struct ceph_pg_pool_info *pi;
1157 pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool);
1161 pg_out->pool = oloc->pool;
1162 pg_out->seed = ceph_str_hash(pi->object_hash, oid->name,
1165 dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name,
1166 pg_out->pool, pg_out->seed);
1169 EXPORT_SYMBOL(ceph_oloc_oid_to_pg);
1171 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
1172 int *result, int result_max,
1173 const __u32 *weight, int weight_max)
1177 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
1179 mutex_lock(&map->crush_scratch_mutex);
1180 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
1181 weight, weight_max, map->crush_scratch_ary);
1182 mutex_unlock(&map->crush_scratch_mutex);
1188 * Calculate raw osd vector for the given pgid. Return pointer to osd
1189 * array, or NULL on failure.
1191 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1192 int *osds, int *num)
1194 struct ceph_pg_mapping *pg;
1195 struct ceph_pg_pool_info *pool;
1200 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
1205 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
1207 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1214 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
1215 pool->type, pool->size);
1217 pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
1218 pgid.pool, pool->crush_ruleset, pool->type,
1223 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
1224 /* hash pool id and seed sothat pool PGs do not overlap */
1225 pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
1226 ceph_stable_mod(pgid.seed, pool->pgp_num,
1227 pool->pgp_num_mask),
1231 * legacy ehavior: add ps and pool together. this is
1232 * not a great approach because the PGs from each pool
1233 * will overlap on top of each other: 0.5 == 1.4 ==
1236 pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
1237 pool->pgp_num_mask) +
1238 (unsigned)pgid.pool;
1240 r = do_crush(osdmap, ruleno, pps, osds, min_t(int, pool->size, *num),
1241 osdmap->osd_weight, osdmap->max_osd);
1243 pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
1244 " size %d\n", r, pgid.pool, pool->crush_ruleset,
1245 pool->type, pool->size);
1253 * Return acting set for given pgid.
1255 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1258 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1259 int i, o, num = CEPH_PG_MAX_SIZE;
1261 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1265 /* primary is first up osd */
1267 for (i = 0; i < num; i++)
1268 if (ceph_osd_is_up(osdmap, osds[i]))
1269 acting[o++] = osds[i];
1274 * Return primary osd for given pgid, or -1 if none.
1276 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1278 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1279 int i, num = CEPH_PG_MAX_SIZE;
1281 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1285 /* primary is first up osd */
1286 for (i = 0; i < num; i++)
1287 if (ceph_osd_is_up(osdmap, osds[i]))
1291 EXPORT_SYMBOL(ceph_calc_pg_primary);