2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_32_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
125 static int skip_name_map(void **p, void *end)
128 ceph_decode_32_safe(p, end, len ,bad);
132 ceph_decode_32_safe(p, end, strlen, bad);
140 static struct crush_map *crush_decode(void *pbyval, void *end)
146 void *start = pbyval;
150 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
152 c = kzalloc(sizeof(*c), GFP_NOFS);
154 return ERR_PTR(-ENOMEM);
156 /* set tunables to default values */
157 c->choose_local_tries = 2;
158 c->choose_local_fallback_tries = 5;
159 c->choose_total_tries = 19;
160 c->chooseleaf_descend_once = 0;
162 ceph_decode_need(p, end, 4*sizeof(u32), bad);
163 magic = ceph_decode_32(p);
164 if (magic != CRUSH_MAGIC) {
165 pr_err("crush_decode magic %x != current %x\n",
166 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
169 c->max_buckets = ceph_decode_32(p);
170 c->max_rules = ceph_decode_32(p);
171 c->max_devices = ceph_decode_32(p);
173 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
174 if (c->buckets == NULL)
176 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
177 if (c->rules == NULL)
181 for (i = 0; i < c->max_buckets; i++) {
184 struct crush_bucket *b;
186 ceph_decode_32_safe(p, end, alg, bad);
188 c->buckets[i] = NULL;
191 dout("crush_decode bucket %d off %x %p to %p\n",
192 i, (int)(*p-start), *p, end);
195 case CRUSH_BUCKET_UNIFORM:
196 size = sizeof(struct crush_bucket_uniform);
198 case CRUSH_BUCKET_LIST:
199 size = sizeof(struct crush_bucket_list);
201 case CRUSH_BUCKET_TREE:
202 size = sizeof(struct crush_bucket_tree);
204 case CRUSH_BUCKET_STRAW:
205 size = sizeof(struct crush_bucket_straw);
212 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
216 ceph_decode_need(p, end, 4*sizeof(u32), bad);
217 b->id = ceph_decode_32(p);
218 b->type = ceph_decode_16(p);
219 b->alg = ceph_decode_8(p);
220 b->hash = ceph_decode_8(p);
221 b->weight = ceph_decode_32(p);
222 b->size = ceph_decode_32(p);
224 dout("crush_decode bucket size %d off %x %p to %p\n",
225 b->size, (int)(*p-start), *p, end);
227 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
228 if (b->items == NULL)
230 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
235 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
236 for (j = 0; j < b->size; j++)
237 b->items[j] = ceph_decode_32(p);
240 case CRUSH_BUCKET_UNIFORM:
241 err = crush_decode_uniform_bucket(p, end,
242 (struct crush_bucket_uniform *)b);
246 case CRUSH_BUCKET_LIST:
247 err = crush_decode_list_bucket(p, end,
248 (struct crush_bucket_list *)b);
252 case CRUSH_BUCKET_TREE:
253 err = crush_decode_tree_bucket(p, end,
254 (struct crush_bucket_tree *)b);
258 case CRUSH_BUCKET_STRAW:
259 err = crush_decode_straw_bucket(p, end,
260 (struct crush_bucket_straw *)b);
268 dout("rule vec is %p\n", c->rules);
269 for (i = 0; i < c->max_rules; i++) {
271 struct crush_rule *r;
273 ceph_decode_32_safe(p, end, yes, bad);
275 dout("crush_decode NO rule %d off %x %p to %p\n",
276 i, (int)(*p-start), *p, end);
281 dout("crush_decode rule %d off %x %p to %p\n",
282 i, (int)(*p-start), *p, end);
285 ceph_decode_32_safe(p, end, yes, bad);
286 #if BITS_PER_LONG == 32
288 if (yes > (ULONG_MAX - sizeof(*r))
289 / sizeof(struct crush_rule_step))
292 r = c->rules[i] = kmalloc(sizeof(*r) +
293 yes*sizeof(struct crush_rule_step),
297 dout(" rule %d is at %p\n", i, r);
299 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
300 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
301 for (j = 0; j < r->len; j++) {
302 r->steps[j].op = ceph_decode_32(p);
303 r->steps[j].arg1 = ceph_decode_32(p);
304 r->steps[j].arg2 = ceph_decode_32(p);
308 /* ignore trailing name maps. */
309 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
310 err = skip_name_map(p, end);
316 ceph_decode_need(p, end, 3*sizeof(u32), done);
317 c->choose_local_tries = ceph_decode_32(p);
318 c->choose_local_fallback_tries = ceph_decode_32(p);
319 c->choose_total_tries = ceph_decode_32(p);
320 dout("crush decode tunable choose_local_tries = %d",
321 c->choose_local_tries);
322 dout("crush decode tunable choose_local_fallback_tries = %d",
323 c->choose_local_fallback_tries);
324 dout("crush decode tunable choose_total_tries = %d",
325 c->choose_total_tries);
327 ceph_decode_need(p, end, sizeof(u32), done);
328 c->chooseleaf_descend_once = ceph_decode_32(p);
329 dout("crush decode tunable chooseleaf_descend_once = %d",
330 c->chooseleaf_descend_once);
333 dout("crush_decode success\n");
339 dout("crush_decode fail %d\n", err);
345 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
346 * to a set of osds) and primary_temp (explicit primary setting)
348 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
361 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
362 struct rb_root *root)
364 struct rb_node **p = &root->rb_node;
365 struct rb_node *parent = NULL;
366 struct ceph_pg_mapping *pg = NULL;
369 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
372 pg = rb_entry(parent, struct ceph_pg_mapping, node);
373 c = pgid_cmp(new->pgid, pg->pgid);
382 rb_link_node(&new->node, parent, p);
383 rb_insert_color(&new->node, root);
387 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
390 struct rb_node *n = root->rb_node;
391 struct ceph_pg_mapping *pg;
395 pg = rb_entry(n, struct ceph_pg_mapping, node);
396 c = pgid_cmp(pgid, pg->pgid);
402 dout("__lookup_pg_mapping %lld.%x got %p\n",
403 pgid.pool, pgid.seed, pg);
410 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
412 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
415 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
417 rb_erase(&pg->node, root);
421 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
426 * rbtree of pg pool info
428 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
430 struct rb_node **p = &root->rb_node;
431 struct rb_node *parent = NULL;
432 struct ceph_pg_pool_info *pi = NULL;
436 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
437 if (new->id < pi->id)
439 else if (new->id > pi->id)
445 rb_link_node(&new->node, parent, p);
446 rb_insert_color(&new->node, root);
450 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
452 struct ceph_pg_pool_info *pi;
453 struct rb_node *n = root->rb_node;
456 pi = rb_entry(n, struct ceph_pg_pool_info, node);
459 else if (id > pi->id)
467 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
469 return __lookup_pg_pool(&map->pg_pools, id);
472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
474 struct ceph_pg_pool_info *pi;
476 if (id == CEPH_NOPOOL)
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
484 return pi ? pi->name : NULL;
486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
493 struct ceph_pg_pool_info *pi =
494 rb_entry(rbp, struct ceph_pg_pool_info, node);
495 if (pi->name && strcmp(pi->name, name) == 0)
500 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
504 rb_erase(&pi->node, root);
509 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
515 ceph_decode_need(p, end, 2 + 4, bad);
516 ev = ceph_decode_8(p); /* encoding version */
517 cv = ceph_decode_8(p); /* compat version */
519 pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
523 pr_warning("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
526 len = ceph_decode_32(p);
527 ceph_decode_need(p, end, len, bad);
530 pi->type = ceph_decode_8(p);
531 pi->size = ceph_decode_8(p);
532 pi->crush_ruleset = ceph_decode_8(p);
533 pi->object_hash = ceph_decode_8(p);
535 pi->pg_num = ceph_decode_32(p);
536 pi->pgp_num = ceph_decode_32(p);
538 *p += 4 + 4; /* skip lpg* */
539 *p += 4; /* skip last_change */
540 *p += 8 + 4; /* skip snap_seq, snap_epoch */
543 num = ceph_decode_32(p);
545 *p += 8; /* snapid key */
546 *p += 1 + 1; /* versions */
547 len = ceph_decode_32(p);
551 /* skip removed_snaps */
552 num = ceph_decode_32(p);
555 *p += 8; /* skip auid */
556 pi->flags = ceph_decode_64(p);
557 *p += 4; /* skip crash_replay_interval */
560 *p += 1; /* skip min_size */
563 *p += 8 + 8; /* skip quota_max_* */
567 num = ceph_decode_32(p);
570 *p += 8; /* skip tier_of */
571 *p += 1; /* skip cache_mode */
573 pi->read_tier = ceph_decode_64(p);
574 pi->write_tier = ceph_decode_64(p);
580 /* ignore the rest */
590 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
592 struct ceph_pg_pool_info *pi;
596 ceph_decode_32_safe(p, end, num, bad);
597 dout(" %d pool names\n", num);
599 ceph_decode_64_safe(p, end, pool, bad);
600 ceph_decode_32_safe(p, end, len, bad);
601 dout(" pool %llu len %d\n", pool, len);
602 ceph_decode_need(p, end, len, bad);
603 pi = __lookup_pg_pool(&map->pg_pools, pool);
605 char *name = kstrndup(*p, len, GFP_NOFS);
611 dout(" name is %s\n", pi->name);
624 void ceph_osdmap_destroy(struct ceph_osdmap *map)
626 dout("osdmap_destroy %p\n", map);
628 crush_destroy(map->crush);
629 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
630 struct ceph_pg_mapping *pg =
631 rb_entry(rb_first(&map->pg_temp),
632 struct ceph_pg_mapping, node);
633 rb_erase(&pg->node, &map->pg_temp);
636 while (!RB_EMPTY_ROOT(&map->primary_temp)) {
637 struct ceph_pg_mapping *pg =
638 rb_entry(rb_first(&map->primary_temp),
639 struct ceph_pg_mapping, node);
640 rb_erase(&pg->node, &map->primary_temp);
643 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
644 struct ceph_pg_pool_info *pi =
645 rb_entry(rb_first(&map->pg_pools),
646 struct ceph_pg_pool_info, node);
647 __remove_pg_pool(&map->pg_pools, pi);
649 kfree(map->osd_state);
650 kfree(map->osd_weight);
651 kfree(map->osd_addr);
652 kfree(map->osd_primary_affinity);
657 * Adjust max_osd value, (re)allocate arrays.
659 * The new elements are properly initialized.
661 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
665 struct ceph_entity_addr *addr;
668 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS);
669 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS);
670 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS);
671 if (!state || !weight || !addr) {
679 for (i = map->max_osd; i < max; i++) {
681 weight[i] = CEPH_OSD_OUT;
682 memset(addr + i, 0, sizeof(*addr));
685 map->osd_state = state;
686 map->osd_weight = weight;
687 map->osd_addr = addr;
689 if (map->osd_primary_affinity) {
692 affinity = krealloc(map->osd_primary_affinity,
693 max*sizeof(*affinity), GFP_NOFS);
697 for (i = map->max_osd; i < max; i++)
698 affinity[i] = CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
700 map->osd_primary_affinity = affinity;
708 #define OSDMAP_WRAPPER_COMPAT_VER 7
709 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
712 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
713 * to struct_v of the client_data section for new (v7 and above)
716 static int get_osdmap_client_data_v(void **p, void *end,
717 const char *prefix, u8 *v)
721 ceph_decode_8_safe(p, end, struct_v, e_inval);
725 ceph_decode_8_safe(p, end, struct_compat, e_inval);
726 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
727 pr_warning("got v %d cv %d > %d of %s ceph_osdmap\n",
728 struct_v, struct_compat,
729 OSDMAP_WRAPPER_COMPAT_VER, prefix);
732 *p += 4; /* ignore wrapper struct_len */
734 ceph_decode_8_safe(p, end, struct_v, e_inval);
735 ceph_decode_8_safe(p, end, struct_compat, e_inval);
736 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
737 pr_warning("got v %d cv %d > %d of %s ceph_osdmap client data\n",
738 struct_v, struct_compat,
739 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
742 *p += 4; /* ignore client data struct_len */
747 ceph_decode_16_safe(p, end, version, e_inval);
749 pr_warning("got v %d < 6 of %s ceph_osdmap\n", version,
754 /* old osdmap enconding */
765 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
770 ceph_decode_32_safe(p, end, n, e_inval);
772 struct ceph_pg_pool_info *pi;
776 ceph_decode_64_safe(p, end, pool, e_inval);
778 pi = __lookup_pg_pool(&map->pg_pools, pool);
779 if (!incremental || !pi) {
780 pi = kzalloc(sizeof(*pi), GFP_NOFS);
786 ret = __insert_pg_pool(&map->pg_pools, pi);
793 ret = decode_pool(p, end, pi);
804 static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
806 return __decode_pools(p, end, map, false);
809 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
811 return __decode_pools(p, end, map, true);
814 static int __decode_pg_temp(void **p, void *end, struct ceph_osdmap *map,
819 ceph_decode_32_safe(p, end, n, e_inval);
825 ret = ceph_decode_pgid(p, end, &pgid);
829 ceph_decode_32_safe(p, end, len, e_inval);
831 ret = __remove_pg_mapping(&map->pg_temp, pgid);
832 BUG_ON(!incremental && ret != -ENOENT);
834 if (!incremental || len > 0) {
835 struct ceph_pg_mapping *pg;
837 ceph_decode_need(p, end, len*sizeof(u32), e_inval);
839 if (len > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
842 pg = kzalloc(sizeof(*pg) + len*sizeof(u32), GFP_NOFS);
847 pg->pg_temp.len = len;
848 for (i = 0; i < len; i++)
849 pg->pg_temp.osds[i] = ceph_decode_32(p);
851 ret = __insert_pg_mapping(pg, &map->pg_temp);
865 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
867 return __decode_pg_temp(p, end, map, false);
870 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
872 return __decode_pg_temp(p, end, map, true);
875 static int __decode_primary_temp(void **p, void *end, struct ceph_osdmap *map,
880 ceph_decode_32_safe(p, end, n, e_inval);
886 ret = ceph_decode_pgid(p, end, &pgid);
890 ceph_decode_32_safe(p, end, osd, e_inval);
892 ret = __remove_pg_mapping(&map->primary_temp, pgid);
893 BUG_ON(!incremental && ret != -ENOENT);
895 if (!incremental || osd != (u32)-1) {
896 struct ceph_pg_mapping *pg;
898 pg = kzalloc(sizeof(*pg), GFP_NOFS);
903 pg->primary_temp.osd = osd;
905 ret = __insert_pg_mapping(pg, &map->primary_temp);
919 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
921 return __decode_primary_temp(p, end, map, false);
924 static int decode_new_primary_temp(void **p, void *end,
925 struct ceph_osdmap *map)
927 return __decode_primary_temp(p, end, map, true);
930 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
932 BUG_ON(osd >= map->max_osd);
934 if (!map->osd_primary_affinity)
935 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
937 return map->osd_primary_affinity[osd];
940 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
942 BUG_ON(osd >= map->max_osd);
944 if (!map->osd_primary_affinity) {
947 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32),
949 if (!map->osd_primary_affinity)
952 for (i = 0; i < map->max_osd; i++)
953 map->osd_primary_affinity[i] =
954 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
957 map->osd_primary_affinity[osd] = aff;
962 static int decode_primary_affinity(void **p, void *end,
963 struct ceph_osdmap *map)
967 ceph_decode_32_safe(p, end, len, e_inval);
969 kfree(map->osd_primary_affinity);
970 map->osd_primary_affinity = NULL;
973 if (len != map->max_osd)
976 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
978 for (i = 0; i < map->max_osd; i++) {
981 ret = set_primary_affinity(map, i, ceph_decode_32(p));
992 static int decode_new_primary_affinity(void **p, void *end,
993 struct ceph_osdmap *map)
997 ceph_decode_32_safe(p, end, n, e_inval);
1002 ceph_decode_32_safe(p, end, osd, e_inval);
1003 ceph_decode_32_safe(p, end, aff, e_inval);
1005 ret = set_primary_affinity(map, osd, aff);
1017 * decode a full map.
1019 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1028 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1030 err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1034 /* fsid, epoch, created, modified */
1035 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1036 sizeof(map->created) + sizeof(map->modified), e_inval);
1037 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1038 epoch = map->epoch = ceph_decode_32(p);
1039 ceph_decode_copy(p, &map->created, sizeof(map->created));
1040 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1043 err = decode_pools(p, end, map);
1048 err = decode_pool_names(p, end, map);
1052 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1054 ceph_decode_32_safe(p, end, map->flags, e_inval);
1057 ceph_decode_32_safe(p, end, max, e_inval);
1059 /* (re)alloc osd arrays */
1060 err = osdmap_set_max_osd(map, max);
1064 /* osd_state, osd_weight, osd_addrs->client_addr */
1065 ceph_decode_need(p, end, 3*sizeof(u32) +
1066 map->max_osd*(1 + sizeof(*map->osd_weight) +
1067 sizeof(*map->osd_addr)), e_inval);
1069 if (ceph_decode_32(p) != map->max_osd)
1072 ceph_decode_copy(p, map->osd_state, map->max_osd);
1074 if (ceph_decode_32(p) != map->max_osd)
1077 for (i = 0; i < map->max_osd; i++)
1078 map->osd_weight[i] = ceph_decode_32(p);
1080 if (ceph_decode_32(p) != map->max_osd)
1083 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
1084 for (i = 0; i < map->max_osd; i++)
1085 ceph_decode_addr(&map->osd_addr[i]);
1088 err = decode_pg_temp(p, end, map);
1093 if (struct_v >= 1) {
1094 err = decode_primary_temp(p, end, map);
1099 /* primary_affinity */
1100 if (struct_v >= 2) {
1101 err = decode_primary_affinity(p, end, map);
1105 /* XXX can this happen? */
1106 kfree(map->osd_primary_affinity);
1107 map->osd_primary_affinity = NULL;
1111 ceph_decode_32_safe(p, end, len, e_inval);
1112 map->crush = crush_decode(*p, min(*p + len, end));
1113 if (IS_ERR(map->crush)) {
1114 err = PTR_ERR(map->crush);
1120 /* ignore the rest */
1123 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1129 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1130 err, epoch, (int)(*p - start), *p, start, end);
1131 print_hex_dump(KERN_DEBUG, "osdmap: ",
1132 DUMP_PREFIX_OFFSET, 16, 1,
1133 start, end - start, true);
1138 * Allocate and decode a full map.
1140 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1142 struct ceph_osdmap *map;
1145 map = kzalloc(sizeof(*map), GFP_NOFS);
1147 return ERR_PTR(-ENOMEM);
1149 map->pg_temp = RB_ROOT;
1150 map->primary_temp = RB_ROOT;
1151 mutex_init(&map->crush_scratch_mutex);
1153 ret = osdmap_decode(p, end, map);
1155 ceph_osdmap_destroy(map);
1156 return ERR_PTR(ret);
1163 * decode and apply an incremental map update.
1165 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1166 struct ceph_osdmap *map,
1167 struct ceph_messenger *msgr)
1169 struct crush_map *newcrush = NULL;
1170 struct ceph_fsid fsid;
1172 struct ceph_timespec modified;
1176 __s32 new_flags, max;
1181 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1183 err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1187 /* fsid, epoch, modified, new_pool_max, new_flags */
1188 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1189 sizeof(u64) + sizeof(u32), e_inval);
1190 ceph_decode_copy(p, &fsid, sizeof(fsid));
1191 epoch = ceph_decode_32(p);
1192 BUG_ON(epoch != map->epoch+1);
1193 ceph_decode_copy(p, &modified, sizeof(modified));
1194 new_pool_max = ceph_decode_64(p);
1195 new_flags = ceph_decode_32(p);
1198 ceph_decode_32_safe(p, end, len, e_inval);
1200 dout("apply_incremental full map len %d, %p to %p\n",
1202 return ceph_osdmap_decode(p, min(*p+len, end));
1206 ceph_decode_32_safe(p, end, len, e_inval);
1208 newcrush = crush_decode(*p, min(*p+len, end));
1209 if (IS_ERR(newcrush)) {
1210 err = PTR_ERR(newcrush);
1219 map->flags = new_flags;
1220 if (new_pool_max >= 0)
1221 map->pool_max = new_pool_max;
1224 ceph_decode_32_safe(p, end, max, e_inval);
1226 err = osdmap_set_max_osd(map, max);
1232 map->modified = modified;
1235 crush_destroy(map->crush);
1236 map->crush = newcrush;
1241 err = decode_new_pools(p, end, map);
1245 /* new_pool_names */
1246 err = decode_pool_names(p, end, map);
1251 ceph_decode_32_safe(p, end, len, e_inval);
1253 struct ceph_pg_pool_info *pi;
1255 ceph_decode_64_safe(p, end, pool, e_inval);
1256 pi = __lookup_pg_pool(&map->pg_pools, pool);
1258 __remove_pg_pool(&map->pg_pools, pi);
1262 ceph_decode_32_safe(p, end, len, e_inval);
1265 struct ceph_entity_addr addr;
1266 ceph_decode_32_safe(p, end, osd, e_inval);
1267 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
1268 ceph_decode_addr(&addr);
1269 pr_info("osd%d up\n", osd);
1270 BUG_ON(osd >= map->max_osd);
1271 map->osd_state[osd] |= CEPH_OSD_UP;
1272 map->osd_addr[osd] = addr;
1276 ceph_decode_32_safe(p, end, len, e_inval);
1280 ceph_decode_32_safe(p, end, osd, e_inval);
1281 xorstate = **(u8 **)p;
1282 (*p)++; /* clean flag */
1284 xorstate = CEPH_OSD_UP;
1285 if (xorstate & CEPH_OSD_UP)
1286 pr_info("osd%d down\n", osd);
1287 if (osd < map->max_osd)
1288 map->osd_state[osd] ^= xorstate;
1292 ceph_decode_32_safe(p, end, len, e_inval);
1295 ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
1296 osd = ceph_decode_32(p);
1297 off = ceph_decode_32(p);
1298 pr_info("osd%d weight 0x%x %s\n", osd, off,
1299 off == CEPH_OSD_IN ? "(in)" :
1300 (off == CEPH_OSD_OUT ? "(out)" : ""));
1301 if (osd < map->max_osd)
1302 map->osd_weight[osd] = off;
1306 err = decode_new_pg_temp(p, end, map);
1310 /* new_primary_temp */
1311 if (struct_v >= 1) {
1312 err = decode_new_primary_temp(p, end, map);
1317 /* new_primary_affinity */
1318 if (struct_v >= 2) {
1319 err = decode_new_primary_affinity(p, end, map);
1324 /* ignore the rest */
1327 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1333 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1334 err, epoch, (int)(*p - start), *p, start, end);
1335 print_hex_dump(KERN_DEBUG, "osdmap: ",
1336 DUMP_PREFIX_OFFSET, 16, 1,
1337 start, end - start, true);
1339 crush_destroy(newcrush);
1340 return ERR_PTR(err);
1347 * calculate file layout from given offset, length.
1348 * fill in correct oid, logical length, and object extent
1351 * for now, we write only a single su, until we can
1352 * pass a stride back to the caller.
1354 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
1357 u64 *oxoff, u64 *oxlen)
1359 u32 osize = le32_to_cpu(layout->fl_object_size);
1360 u32 su = le32_to_cpu(layout->fl_stripe_unit);
1361 u32 sc = le32_to_cpu(layout->fl_stripe_count);
1362 u32 bl, stripeno, stripepos, objsetno;
1366 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
1368 if (su == 0 || sc == 0)
1370 su_per_object = osize / su;
1371 if (su_per_object == 0)
1373 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1376 if ((su & ~PAGE_MASK) != 0)
1379 /* bl = *off / su; */
1383 dout("off %llu / su %u = bl %u\n", off, su, bl);
1386 stripepos = bl % sc;
1387 objsetno = stripeno / su_per_object;
1389 *ono = objsetno * sc + stripepos;
1390 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1392 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1394 su_offset = do_div(t, su);
1395 *oxoff = su_offset + (stripeno % su_per_object) * su;
1398 * Calculate the length of the extent being written to the selected
1399 * object. This is the minimum of the full length requested (len) or
1400 * the remainder of the current stripe being written to.
1402 *oxlen = min_t(u64, len, su - su_offset);
1404 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1408 dout(" invalid layout\n");
1414 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1417 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be
1418 * called with target's (oloc, oid), since tiering isn't taken into
1421 int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
1422 struct ceph_object_locator *oloc,
1423 struct ceph_object_id *oid,
1424 struct ceph_pg *pg_out)
1426 struct ceph_pg_pool_info *pi;
1428 pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool);
1432 pg_out->pool = oloc->pool;
1433 pg_out->seed = ceph_str_hash(pi->object_hash, oid->name,
1436 dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name,
1437 pg_out->pool, pg_out->seed);
1440 EXPORT_SYMBOL(ceph_oloc_oid_to_pg);
1442 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
1443 int *result, int result_max,
1444 const __u32 *weight, int weight_max)
1448 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
1450 mutex_lock(&map->crush_scratch_mutex);
1451 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
1452 weight, weight_max, map->crush_scratch_ary);
1453 mutex_unlock(&map->crush_scratch_mutex);
1459 * Calculate raw osd vector for the given pgid. Return pointer to osd
1460 * array, or NULL on failure.
1462 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1463 int *osds, int *num)
1465 struct ceph_pg_mapping *pg;
1466 struct ceph_pg_pool_info *pool;
1471 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
1476 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
1478 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1480 *num = pg->pg_temp.len;
1481 return pg->pg_temp.osds;
1485 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
1486 pool->type, pool->size);
1488 pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
1489 pgid.pool, pool->crush_ruleset, pool->type,
1494 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
1495 /* hash pool id and seed sothat pool PGs do not overlap */
1496 pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
1497 ceph_stable_mod(pgid.seed, pool->pgp_num,
1498 pool->pgp_num_mask),
1502 * legacy ehavior: add ps and pool together. this is
1503 * not a great approach because the PGs from each pool
1504 * will overlap on top of each other: 0.5 == 1.4 ==
1507 pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
1508 pool->pgp_num_mask) +
1509 (unsigned)pgid.pool;
1511 r = do_crush(osdmap, ruleno, pps, osds, min_t(int, pool->size, *num),
1512 osdmap->osd_weight, osdmap->max_osd);
1514 pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
1515 " size %d\n", r, pgid.pool, pool->crush_ruleset,
1516 pool->type, pool->size);
1524 * Calculate raw (crush) set for given pgid.
1526 * Return raw set length, or error.
1528 static int pg_to_raw_osds(struct ceph_osdmap *osdmap,
1529 struct ceph_pg_pool_info *pool,
1530 struct ceph_pg pgid, u32 pps, int *osds)
1536 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
1537 pool->type, pool->size);
1539 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
1540 pgid.pool, pool->crush_ruleset, pool->type,
1545 len = do_crush(osdmap, ruleno, pps, osds,
1546 min_t(int, pool->size, CEPH_PG_MAX_SIZE),
1547 osdmap->osd_weight, osdmap->max_osd);
1549 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
1550 len, ruleno, pgid.pool, pool->crush_ruleset,
1551 pool->type, pool->size);
1559 * Given raw set, calculate up set and up primary.
1561 * Return up set length. *primary is set to up primary osd id, or -1
1562 * if up set is empty.
1564 static int raw_to_up_osds(struct ceph_osdmap *osdmap,
1565 struct ceph_pg_pool_info *pool,
1566 int *osds, int len, int *primary)
1568 int up_primary = -1;
1571 if (ceph_can_shift_osds(pool)) {
1574 for (i = 0; i < len; i++) {
1575 if (ceph_osd_is_down(osdmap, osds[i])) {
1580 osds[i - removed] = osds[i];
1585 up_primary = osds[0];
1587 for (i = len - 1; i >= 0; i--) {
1588 if (ceph_osd_is_down(osdmap, osds[i]))
1589 osds[i] = CRUSH_ITEM_NONE;
1591 up_primary = osds[i];
1595 *primary = up_primary;
1600 * Given up set, apply pg_temp mapping.
1602 * Return acting set length. *primary is set to acting primary osd id,
1603 * or -1 if acting set is empty.
1605 static int apply_temps(struct ceph_osdmap *osdmap,
1606 struct ceph_pg_pool_info *pool, struct ceph_pg pgid,
1607 int *osds, int len, int *primary)
1609 struct ceph_pg_mapping *pg;
1615 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
1619 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1624 for (i = 0; i < pg->pg_temp.len; i++) {
1625 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
1626 if (ceph_can_shift_osds(pool))
1629 osds[temp_len++] = CRUSH_ITEM_NONE;
1631 osds[temp_len++] = pg->pg_temp.osds[i];
1635 /* apply pg_temp's primary */
1636 for (i = 0; i < temp_len; i++) {
1637 if (osds[i] != CRUSH_ITEM_NONE) {
1638 temp_primary = osds[i];
1644 temp_primary = *primary;
1647 *primary = temp_primary;
1652 * Calculate acting set for given pgid.
1654 * Return acting set length, or error. *primary is set to acting
1655 * primary osd id, or -1 if acting set is empty or on error.
1657 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1658 int *osds, int *primary)
1660 struct ceph_pg_pool_info *pool;
1664 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
1670 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
1671 /* hash pool id and seed so that pool PGs do not overlap */
1672 pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
1673 ceph_stable_mod(pgid.seed, pool->pgp_num,
1674 pool->pgp_num_mask),
1678 * legacy behavior: add ps and pool together. this is
1679 * not a great approach because the PGs from each pool
1680 * will overlap on top of each other: 0.5 == 1.4 ==
1683 pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
1684 pool->pgp_num_mask) +
1685 (unsigned)pgid.pool;
1688 len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds);
1694 len = raw_to_up_osds(osdmap, pool, osds, len, primary);
1696 len = apply_temps(osdmap, pool, pgid, osds, len, primary);
1702 * Return primary osd for given pgid, or -1 if none.
1704 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1706 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1707 int i, num = CEPH_PG_MAX_SIZE;
1709 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1713 /* primary is first up osd */
1714 for (i = 0; i < num; i++)
1715 if (ceph_osd_is_up(osdmap, osds[i]))
1719 EXPORT_SYMBOL(ceph_calc_pg_primary);