1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/jiffies.h>
25 #include <linux/module.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/file.h>
31 #include <linux/kthread.h>
32 #include <linux/configfs.h>
33 #include <linux/random.h>
34 #include <linux/crc32.h>
35 #include <linux/time.h>
36 #include <linux/debugfs.h>
37 #include <linux/slab.h>
39 #include "heartbeat.h"
41 #include "nodemanager.h"
48 * The first heartbeat pass had one global thread that would serialize all hb
49 * callback calls. This global serializing sem should only be removed once
50 * we've made sure that all callees can deal with being called concurrently
51 * from multiple hb region threads.
53 static DECLARE_RWSEM(o2hb_callback_sem);
56 * multiple hb threads are watching multiple regions. A node is live
57 * whenever any of the threads sees activity from the node in its region.
59 static DEFINE_SPINLOCK(o2hb_live_lock);
60 static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
61 static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
62 static LIST_HEAD(o2hb_node_events);
63 static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
65 #define O2HB_DEBUG_DIR "o2hb"
66 #define O2HB_DEBUG_LIVENODES "livenodes"
67 static struct dentry *o2hb_debug_dir;
68 static struct dentry *o2hb_debug_livenodes;
70 static LIST_HEAD(o2hb_all_regions);
72 static struct o2hb_callback {
73 struct list_head list;
74 } o2hb_callbacks[O2HB_NUM_CB];
76 static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
78 #define O2HB_DEFAULT_BLOCK_BITS 9
80 enum o2hb_heartbeat_modes {
81 O2HB_HEARTBEAT_LOCAL = 0,
82 O2HB_HEARTBEAT_GLOBAL,
83 O2HB_HEARTBEAT_NUM_MODES,
86 char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
87 "local", /* O2HB_HEARTBEAT_LOCAL */
88 "global", /* O2HB_HEARTBEAT_GLOBAL */
91 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
92 unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
94 /* Only sets a new threshold if there are no active regions.
96 * No locking or otherwise interesting code is required for reading
97 * o2hb_dead_threshold as it can't change once regions are active and
98 * it's not interesting to anyone until then anyway. */
99 static void o2hb_dead_threshold_set(unsigned int threshold)
101 if (threshold > O2HB_MIN_DEAD_THRESHOLD) {
102 spin_lock(&o2hb_live_lock);
103 if (list_empty(&o2hb_all_regions))
104 o2hb_dead_threshold = threshold;
105 spin_unlock(&o2hb_live_lock);
109 static int o2hb_global_hearbeat_mode_set(unsigned int hb_mode)
113 if (hb_mode < O2HB_HEARTBEAT_NUM_MODES) {
114 spin_lock(&o2hb_live_lock);
115 if (list_empty(&o2hb_all_regions)) {
116 o2hb_heartbeat_mode = hb_mode;
119 spin_unlock(&o2hb_live_lock);
125 struct o2hb_node_event {
126 struct list_head hn_item;
127 enum o2hb_callback_type hn_event_type;
128 struct o2nm_node *hn_node;
132 struct o2hb_disk_slot {
133 struct o2hb_disk_heartbeat_block *ds_raw_block;
136 u64 ds_last_generation;
137 u16 ds_equal_samples;
138 u16 ds_changed_samples;
139 struct list_head ds_live_item;
142 /* each thread owns a region.. when we're asked to tear down the region
143 * we ask the thread to stop, who cleans up the region */
145 struct config_item hr_item;
147 struct list_head hr_all_item;
148 unsigned hr_unclean_stop:1;
150 /* protected by the hr_callback_sem */
151 struct task_struct *hr_task;
153 unsigned int hr_blocks;
154 unsigned long long hr_start_block;
156 unsigned int hr_block_bits;
157 unsigned int hr_block_bytes;
159 unsigned int hr_slots_per_page;
160 unsigned int hr_num_pages;
162 struct page **hr_slot_data;
163 struct block_device *hr_bdev;
164 struct o2hb_disk_slot *hr_slots;
166 /* let the person setting up hb wait for it to return until it
167 * has reached a 'steady' state. This will be fixed when we have
168 * a more complete api that doesn't lead to this sort of fragility. */
169 atomic_t hr_steady_iterations;
171 char hr_dev_name[BDEVNAME_SIZE];
173 unsigned int hr_timeout_ms;
175 /* randomized as the region goes up and down so that a node
176 * recognizes a node going up and down in one iteration */
179 struct delayed_work hr_write_timeout_work;
180 unsigned long hr_last_timeout_start;
182 /* Used during o2hb_check_slot to hold a copy of the block
183 * being checked because we temporarily have to zero out the
185 struct o2hb_disk_heartbeat_block *hr_tmp_block;
188 struct o2hb_bio_wait_ctxt {
189 atomic_t wc_num_reqs;
190 struct completion wc_io_complete;
194 static void o2hb_write_timeout(struct work_struct *work)
196 struct o2hb_region *reg =
197 container_of(work, struct o2hb_region,
198 hr_write_timeout_work.work);
200 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
201 "milliseconds\n", reg->hr_dev_name,
202 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
203 o2quo_disk_timeout();
206 static void o2hb_arm_write_timeout(struct o2hb_region *reg)
208 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
209 O2HB_MAX_WRITE_TIMEOUT_MS);
211 cancel_delayed_work(®->hr_write_timeout_work);
212 reg->hr_last_timeout_start = jiffies;
213 schedule_delayed_work(®->hr_write_timeout_work,
214 msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
217 static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
219 cancel_delayed_work(®->hr_write_timeout_work);
220 flush_scheduled_work();
223 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
225 atomic_set(&wc->wc_num_reqs, 1);
226 init_completion(&wc->wc_io_complete);
230 /* Used in error paths too */
231 static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
234 /* sadly atomic_sub_and_test() isn't available on all platforms. The
235 * good news is that the fast path only completes one at a time */
237 if (atomic_dec_and_test(&wc->wc_num_reqs)) {
239 complete(&wc->wc_io_complete);
244 static void o2hb_wait_on_io(struct o2hb_region *reg,
245 struct o2hb_bio_wait_ctxt *wc)
247 struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
249 blk_run_address_space(mapping);
250 o2hb_bio_wait_dec(wc, 1);
252 wait_for_completion(&wc->wc_io_complete);
255 static void o2hb_bio_end_io(struct bio *bio,
258 struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
261 mlog(ML_ERROR, "IO Error %d\n", error);
262 wc->wc_error = error;
265 o2hb_bio_wait_dec(wc, 1);
269 /* Setup a Bio to cover I/O against num_slots slots starting at
271 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
272 struct o2hb_bio_wait_ctxt *wc,
273 unsigned int *current_slot,
274 unsigned int max_slots)
276 int len, current_page;
277 unsigned int vec_len, vec_start;
278 unsigned int bits = reg->hr_block_bits;
279 unsigned int spp = reg->hr_slots_per_page;
280 unsigned int cs = *current_slot;
284 /* Testing has shown this allocation to take long enough under
285 * GFP_KERNEL that the local node can get fenced. It would be
286 * nicest if we could pre-allocate these bios and avoid this
288 bio = bio_alloc(GFP_ATOMIC, 16);
290 mlog(ML_ERROR, "Could not alloc slots BIO!\n");
291 bio = ERR_PTR(-ENOMEM);
295 /* Must put everything in 512 byte sectors for the bio... */
296 bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
297 bio->bi_bdev = reg->hr_bdev;
298 bio->bi_private = wc;
299 bio->bi_end_io = o2hb_bio_end_io;
301 vec_start = (cs << bits) % PAGE_CACHE_SIZE;
302 while(cs < max_slots) {
303 current_page = cs / spp;
304 page = reg->hr_slot_data[current_page];
306 vec_len = min(PAGE_CACHE_SIZE - vec_start,
307 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
309 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
310 current_page, vec_len, vec_start);
312 len = bio_add_page(bio, page, vec_len, vec_start);
313 if (len != vec_len) break;
315 cs += vec_len / (PAGE_CACHE_SIZE/spp);
324 static int o2hb_read_slots(struct o2hb_region *reg,
325 unsigned int max_slots)
327 unsigned int current_slot=0;
329 struct o2hb_bio_wait_ctxt wc;
332 o2hb_bio_wait_init(&wc);
334 while(current_slot < max_slots) {
335 bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots);
337 status = PTR_ERR(bio);
342 atomic_inc(&wc.wc_num_reqs);
343 submit_bio(READ, bio);
349 o2hb_wait_on_io(reg, &wc);
350 if (wc.wc_error && !status)
351 status = wc.wc_error;
356 static int o2hb_issue_node_write(struct o2hb_region *reg,
357 struct o2hb_bio_wait_ctxt *write_wc)
363 o2hb_bio_wait_init(write_wc);
365 slot = o2nm_this_node();
367 bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
369 status = PTR_ERR(bio);
374 atomic_inc(&write_wc->wc_num_reqs);
375 submit_bio(WRITE, bio);
382 static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
383 struct o2hb_disk_heartbeat_block *hb_block)
388 /* We want to compute the block crc with a 0 value in the
389 * hb_cksum field. Save it off here and replace after the
391 old_cksum = hb_block->hb_cksum;
392 hb_block->hb_cksum = 0;
394 ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes);
396 hb_block->hb_cksum = old_cksum;
401 static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
403 mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, "
404 "cksum = 0x%x, generation 0x%llx\n",
405 (long long)le64_to_cpu(hb_block->hb_seq),
406 hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum),
407 (long long)le64_to_cpu(hb_block->hb_generation));
410 static int o2hb_verify_crc(struct o2hb_region *reg,
411 struct o2hb_disk_heartbeat_block *hb_block)
415 read = le32_to_cpu(hb_block->hb_cksum);
416 computed = o2hb_compute_block_crc_le(reg, hb_block);
418 return read == computed;
421 /* We want to make sure that nobody is heartbeating on top of us --
422 * this will help detect an invalid configuration. */
423 static int o2hb_check_last_timestamp(struct o2hb_region *reg)
426 struct o2hb_disk_slot *slot;
427 struct o2hb_disk_heartbeat_block *hb_block;
429 node_num = o2nm_this_node();
432 slot = ®->hr_slots[node_num];
433 /* Don't check on our 1st timestamp */
434 if (slot->ds_last_time) {
435 hb_block = slot->ds_raw_block;
437 if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time)
444 static inline void o2hb_prepare_block(struct o2hb_region *reg,
449 struct o2hb_disk_slot *slot;
450 struct o2hb_disk_heartbeat_block *hb_block;
452 node_num = o2nm_this_node();
453 slot = ®->hr_slots[node_num];
455 hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
456 memset(hb_block, 0, reg->hr_block_bytes);
457 /* TODO: time stuff */
458 cputime = CURRENT_TIME.tv_sec;
462 hb_block->hb_seq = cpu_to_le64(cputime);
463 hb_block->hb_node = node_num;
464 hb_block->hb_generation = cpu_to_le64(generation);
465 hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
467 /* This step must always happen last! */
468 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
471 mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n",
472 (long long)generation,
473 le32_to_cpu(hb_block->hb_cksum));
476 static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
477 struct o2nm_node *node,
480 struct list_head *iter;
481 struct o2hb_callback_func *f;
483 list_for_each(iter, &hbcall->list) {
484 f = list_entry(iter, struct o2hb_callback_func, hc_item);
485 mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
486 (f->hc_func)(node, idx, f->hc_data);
490 /* Will run the list in order until we process the passed event */
491 static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
494 struct o2hb_callback *hbcall;
495 struct o2hb_node_event *event;
497 spin_lock(&o2hb_live_lock);
498 empty = list_empty(&queued_event->hn_item);
499 spin_unlock(&o2hb_live_lock);
503 /* Holding callback sem assures we don't alter the callback
504 * lists when doing this, and serializes ourselves with other
505 * processes wanting callbacks. */
506 down_write(&o2hb_callback_sem);
508 spin_lock(&o2hb_live_lock);
509 while (!list_empty(&o2hb_node_events)
510 && !list_empty(&queued_event->hn_item)) {
511 event = list_entry(o2hb_node_events.next,
512 struct o2hb_node_event,
514 list_del_init(&event->hn_item);
515 spin_unlock(&o2hb_live_lock);
517 mlog(ML_HEARTBEAT, "Node %s event for %d\n",
518 event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
521 hbcall = hbcall_from_type(event->hn_event_type);
523 /* We should *never* have gotten on to the list with a
524 * bad type... This isn't something that we should try
525 * to recover from. */
526 BUG_ON(IS_ERR(hbcall));
528 o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
530 spin_lock(&o2hb_live_lock);
532 spin_unlock(&o2hb_live_lock);
534 up_write(&o2hb_callback_sem);
537 static void o2hb_queue_node_event(struct o2hb_node_event *event,
538 enum o2hb_callback_type type,
539 struct o2nm_node *node,
542 assert_spin_locked(&o2hb_live_lock);
544 event->hn_event_type = type;
545 event->hn_node = node;
546 event->hn_node_num = node_num;
548 mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
549 type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
551 list_add_tail(&event->hn_item, &o2hb_node_events);
554 static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
556 struct o2hb_node_event event =
557 { .hn_item = LIST_HEAD_INIT(event.hn_item), };
558 struct o2nm_node *node;
560 node = o2nm_get_node_by_num(slot->ds_node_num);
564 spin_lock(&o2hb_live_lock);
565 if (!list_empty(&slot->ds_live_item)) {
566 mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
569 list_del_init(&slot->ds_live_item);
571 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
572 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
574 o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
578 spin_unlock(&o2hb_live_lock);
580 o2hb_run_event_list(&event);
585 static int o2hb_check_slot(struct o2hb_region *reg,
586 struct o2hb_disk_slot *slot)
588 int changed = 0, gen_changed = 0;
589 struct o2hb_node_event event =
590 { .hn_item = LIST_HEAD_INIT(event.hn_item), };
591 struct o2nm_node *node;
592 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
594 unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
595 unsigned int slot_dead_ms;
597 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
599 /* Is this correct? Do we assume that the node doesn't exist
600 * if we're not configured for him? */
601 node = o2nm_get_node_by_num(slot->ds_node_num);
605 if (!o2hb_verify_crc(reg, hb_block)) {
606 /* all paths from here will drop o2hb_live_lock for
608 spin_lock(&o2hb_live_lock);
610 /* Don't print an error on the console in this case -
611 * a freshly formatted heartbeat area will not have a
613 if (list_empty(&slot->ds_live_item))
616 /* The node is live but pushed out a bad crc. We
617 * consider it a transient miss but don't populate any
618 * other values as they may be junk. */
619 mlog(ML_ERROR, "Node %d has written a bad crc to %s\n",
620 slot->ds_node_num, reg->hr_dev_name);
621 o2hb_dump_slot(hb_block);
623 slot->ds_equal_samples++;
627 /* we don't care if these wrap.. the state transitions below
628 * clear at the right places */
629 cputime = le64_to_cpu(hb_block->hb_seq);
630 if (slot->ds_last_time != cputime)
631 slot->ds_changed_samples++;
633 slot->ds_equal_samples++;
634 slot->ds_last_time = cputime;
636 /* The node changed heartbeat generations. We assume this to
637 * mean it dropped off but came back before we timed out. We
638 * want to consider it down for the time being but don't want
639 * to lose any changed_samples state we might build up to
640 * considering it live again. */
641 if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
643 slot->ds_equal_samples = 0;
644 mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx "
645 "to 0x%llx)\n", slot->ds_node_num,
646 (long long)slot->ds_last_generation,
647 (long long)le64_to_cpu(hb_block->hb_generation));
650 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
652 mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x "
653 "seq %llu last %llu changed %u equal %u\n",
654 slot->ds_node_num, (long long)slot->ds_last_generation,
655 le32_to_cpu(hb_block->hb_cksum),
656 (unsigned long long)le64_to_cpu(hb_block->hb_seq),
657 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
658 slot->ds_equal_samples);
660 spin_lock(&o2hb_live_lock);
663 /* dead nodes only come to life after some number of
664 * changes at any time during their dead time */
665 if (list_empty(&slot->ds_live_item) &&
666 slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
667 mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n",
668 slot->ds_node_num, (long long)slot->ds_last_generation);
670 /* first on the list generates a callback */
671 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
672 set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
674 o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
680 list_add_tail(&slot->ds_live_item,
681 &o2hb_live_slots[slot->ds_node_num]);
683 slot->ds_equal_samples = 0;
685 /* We want to be sure that all nodes agree on the
686 * number of milliseconds before a node will be
687 * considered dead. The self-fencing timeout is
688 * computed from this value, and a discrepancy might
689 * result in heartbeat calling a node dead when it
690 * hasn't self-fenced yet. */
691 slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
692 if (slot_dead_ms && slot_dead_ms != dead_ms) {
693 /* TODO: Perhaps we can fail the region here. */
694 mlog(ML_ERROR, "Node %d on device %s has a dead count "
695 "of %u ms, but our count is %u ms.\n"
696 "Please double check your configuration values "
697 "for 'O2CB_HEARTBEAT_THRESHOLD'\n",
698 slot->ds_node_num, reg->hr_dev_name, slot_dead_ms,
704 /* if the list is dead, we're done.. */
705 if (list_empty(&slot->ds_live_item))
708 /* live nodes only go dead after enough consequtive missed
709 * samples.. reset the missed counter whenever we see
711 if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
712 mlog(ML_HEARTBEAT, "Node %d left my region\n",
715 /* last off the live_slot generates a callback */
716 list_del_init(&slot->ds_live_item);
717 if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
718 clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
720 o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
726 /* We don't clear this because the node is still
727 * actually writing new blocks. */
729 slot->ds_changed_samples = 0;
732 if (slot->ds_changed_samples) {
733 slot->ds_changed_samples = 0;
734 slot->ds_equal_samples = 0;
737 spin_unlock(&o2hb_live_lock);
739 o2hb_run_event_list(&event);
745 /* This could be faster if we just implmented a find_last_bit, but I
746 * don't think the circumstances warrant it. */
747 static int o2hb_highest_node(unsigned long *nodes,
754 while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
764 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
766 int i, ret, highest_node, change = 0;
767 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
768 struct o2hb_bio_wait_ctxt write_wc;
770 ret = o2nm_configured_node_map(configured_nodes,
771 sizeof(configured_nodes));
777 highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
778 if (highest_node >= O2NM_MAX_NODES) {
779 mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
783 /* No sense in reading the slots of nodes that don't exist
784 * yet. Of course, if the node definitions have holes in them
785 * then we're reading an empty slot anyway... Consider this
787 ret = o2hb_read_slots(reg, highest_node + 1);
793 /* With an up to date view of the slots, we can check that no
794 * other node has been improperly configured to heartbeat in
796 if (!o2hb_check_last_timestamp(reg))
797 mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
798 "in our slot!\n", reg->hr_dev_name);
800 /* fill in the proper info for our next heartbeat */
801 o2hb_prepare_block(reg, reg->hr_generation);
803 /* And fire off the write. Note that we don't wait on this I/O
805 ret = o2hb_issue_node_write(reg, &write_wc);
812 while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
814 change |= o2hb_check_slot(reg, ®->hr_slots[i]);
818 * We have to be sure we've advertised ourselves on disk
819 * before we can go to steady state. This ensures that
820 * people we find in our steady state have seen us.
822 o2hb_wait_on_io(reg, &write_wc);
823 if (write_wc.wc_error) {
824 /* Do not re-arm the write timeout on I/O error - we
825 * can't be sure that the new block ever made it to
827 mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
828 write_wc.wc_error, reg->hr_dev_name);
829 return write_wc.wc_error;
832 o2hb_arm_write_timeout(reg);
834 /* let the person who launched us know when things are steady */
835 if (!change && (atomic_read(®->hr_steady_iterations) != 0)) {
836 if (atomic_dec_and_test(®->hr_steady_iterations))
837 wake_up(&o2hb_steady_queue);
843 /* Subtract b from a, storing the result in a. a *must* have a larger
845 static void o2hb_tv_subtract(struct timeval *a,
848 /* just return 0 when a is after b */
849 if (a->tv_sec < b->tv_sec ||
850 (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) {
856 a->tv_sec -= b->tv_sec;
857 a->tv_usec -= b->tv_usec;
858 while ( a->tv_usec < 0 ) {
860 a->tv_usec += 1000000;
864 static unsigned int o2hb_elapsed_msecs(struct timeval *start,
867 struct timeval res = *end;
869 o2hb_tv_subtract(&res, start);
871 return res.tv_sec * 1000 + res.tv_usec / 1000;
875 * we ride the region ref that the region dir holds. before the region
876 * dir is removed and drops it ref it will wait to tear down this
879 static int o2hb_thread(void *data)
882 struct o2hb_region *reg = data;
883 struct o2hb_bio_wait_ctxt write_wc;
884 struct timeval before_hb, after_hb;
885 unsigned int elapsed_msec;
887 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
889 set_user_nice(current, -20);
891 while (!kthread_should_stop() && !reg->hr_unclean_stop) {
892 /* We track the time spent inside
893 * o2hb_do_disk_heartbeat so that we avoid more than
894 * hr_timeout_ms between disk writes. On busy systems
895 * this should result in a heartbeat which is less
896 * likely to time itself out. */
897 do_gettimeofday(&before_hb);
901 ret = o2hb_do_disk_heartbeat(reg);
902 } while (ret && ++i < 2);
904 do_gettimeofday(&after_hb);
905 elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
908 "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
909 before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
910 after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
913 if (elapsed_msec < reg->hr_timeout_ms) {
914 /* the kthread api has blocked signals for us so no
915 * need to record the return value. */
916 msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
920 o2hb_disarm_write_timeout(reg);
922 /* unclean stop is only used in very bad situation */
923 for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
924 o2hb_shutdown_slot(®->hr_slots[i]);
926 /* Explicit down notification - avoid forcing the other nodes
927 * to timeout on this region when we could just as easily
928 * write a clear generation - thus indicating to them that
929 * this node has left this region.
931 * XXX: Should we skip this on unclean_stop? */
932 o2hb_prepare_block(reg, 0);
933 ret = o2hb_issue_node_write(reg, &write_wc);
935 o2hb_wait_on_io(reg, &write_wc);
940 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
945 #ifdef CONFIG_DEBUG_FS
946 static int o2hb_debug_open(struct inode *inode, struct file *file)
948 unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
953 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
957 o2hb_fill_node_map(map, sizeof(map));
959 while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
960 out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
961 out += snprintf(buf + out, PAGE_SIZE - out, "\n");
963 i_size_write(inode, out);
965 file->private_data = buf;
972 static int o2hb_debug_release(struct inode *inode, struct file *file)
974 kfree(file->private_data);
978 static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
979 size_t nbytes, loff_t *ppos)
981 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
982 i_size_read(file->f_mapping->host));
985 static int o2hb_debug_open(struct inode *inode, struct file *file)
989 static int o2hb_debug_release(struct inode *inode, struct file *file)
993 static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
994 size_t nbytes, loff_t *ppos)
998 #endif /* CONFIG_DEBUG_FS */
1000 static const struct file_operations o2hb_debug_fops = {
1001 .open = o2hb_debug_open,
1002 .release = o2hb_debug_release,
1003 .read = o2hb_debug_read,
1004 .llseek = generic_file_llseek,
1007 void o2hb_exit(void)
1009 if (o2hb_debug_livenodes)
1010 debugfs_remove(o2hb_debug_livenodes);
1012 debugfs_remove(o2hb_debug_dir);
1019 for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
1020 INIT_LIST_HEAD(&o2hb_callbacks[i].list);
1022 for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
1023 INIT_LIST_HEAD(&o2hb_live_slots[i]);
1025 INIT_LIST_HEAD(&o2hb_node_events);
1027 memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
1029 o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
1030 if (!o2hb_debug_dir) {
1031 mlog_errno(-ENOMEM);
1035 o2hb_debug_livenodes = debugfs_create_file(O2HB_DEBUG_LIVENODES,
1037 o2hb_debug_dir, NULL,
1039 if (!o2hb_debug_livenodes) {
1040 mlog_errno(-ENOMEM);
1041 debugfs_remove(o2hb_debug_dir);
1048 /* if we're already in a callback then we're already serialized by the sem */
1049 static void o2hb_fill_node_map_from_callback(unsigned long *map,
1052 BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
1054 memcpy(map, &o2hb_live_node_bitmap, bytes);
1058 * get a map of all nodes that are heartbeating in any regions
1060 void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
1062 /* callers want to serialize this map and callbacks so that they
1063 * can trust that they don't miss nodes coming to the party */
1064 down_read(&o2hb_callback_sem);
1065 spin_lock(&o2hb_live_lock);
1066 o2hb_fill_node_map_from_callback(map, bytes);
1067 spin_unlock(&o2hb_live_lock);
1068 up_read(&o2hb_callback_sem);
1070 EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
1073 * heartbeat configfs bits. The heartbeat set is a default set under
1074 * the cluster set in nodemanager.c.
1077 static struct o2hb_region *to_o2hb_region(struct config_item *item)
1079 return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
1082 /* drop_item only drops its ref after killing the thread, nothing should
1083 * be using the region anymore. this has to clean up any state that
1084 * attributes might have built up. */
1085 static void o2hb_region_release(struct config_item *item)
1089 struct o2hb_region *reg = to_o2hb_region(item);
1091 if (reg->hr_tmp_block)
1092 kfree(reg->hr_tmp_block);
1094 if (reg->hr_slot_data) {
1095 for (i = 0; i < reg->hr_num_pages; i++) {
1096 page = reg->hr_slot_data[i];
1100 kfree(reg->hr_slot_data);
1104 blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE);
1107 kfree(reg->hr_slots);
1109 spin_lock(&o2hb_live_lock);
1110 list_del(®->hr_all_item);
1111 spin_unlock(&o2hb_live_lock);
1116 static int o2hb_read_block_input(struct o2hb_region *reg,
1119 unsigned long *ret_bytes,
1120 unsigned int *ret_bits)
1122 unsigned long bytes;
1123 char *p = (char *)page;
1125 bytes = simple_strtoul(p, &p, 0);
1126 if (!p || (*p && (*p != '\n')))
1129 /* Heartbeat and fs min / max block sizes are the same. */
1130 if (bytes > 4096 || bytes < 512)
1132 if (hweight16(bytes) != 1)
1138 *ret_bits = ffs(bytes) - 1;
1143 static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg,
1146 return sprintf(page, "%u\n", reg->hr_block_bytes);
1149 static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg,
1154 unsigned long block_bytes;
1155 unsigned int block_bits;
1160 status = o2hb_read_block_input(reg, page, count,
1161 &block_bytes, &block_bits);
1165 reg->hr_block_bytes = (unsigned int)block_bytes;
1166 reg->hr_block_bits = block_bits;
1171 static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg,
1174 return sprintf(page, "%llu\n", reg->hr_start_block);
1177 static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg,
1181 unsigned long long tmp;
1182 char *p = (char *)page;
1187 tmp = simple_strtoull(p, &p, 0);
1188 if (!p || (*p && (*p != '\n')))
1191 reg->hr_start_block = tmp;
1196 static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg,
1199 return sprintf(page, "%d\n", reg->hr_blocks);
1202 static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg,
1207 char *p = (char *)page;
1212 tmp = simple_strtoul(p, &p, 0);
1213 if (!p || (*p && (*p != '\n')))
1216 if (tmp > O2NM_MAX_NODES || tmp == 0)
1219 reg->hr_blocks = (unsigned int)tmp;
1224 static ssize_t o2hb_region_dev_read(struct o2hb_region *reg,
1227 unsigned int ret = 0;
1230 ret = sprintf(page, "%s\n", reg->hr_dev_name);
1235 static void o2hb_init_region_params(struct o2hb_region *reg)
1237 reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
1238 reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
1240 mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
1241 reg->hr_start_block, reg->hr_blocks);
1242 mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n",
1243 reg->hr_block_bytes, reg->hr_block_bits);
1244 mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms);
1245 mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold);
1248 static int o2hb_map_slot_data(struct o2hb_region *reg)
1251 unsigned int last_slot;
1252 unsigned int spp = reg->hr_slots_per_page;
1255 struct o2hb_disk_slot *slot;
1257 reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
1258 if (reg->hr_tmp_block == NULL) {
1259 mlog_errno(-ENOMEM);
1263 reg->hr_slots = kcalloc(reg->hr_blocks,
1264 sizeof(struct o2hb_disk_slot), GFP_KERNEL);
1265 if (reg->hr_slots == NULL) {
1266 mlog_errno(-ENOMEM);
1270 for(i = 0; i < reg->hr_blocks; i++) {
1271 slot = ®->hr_slots[i];
1272 slot->ds_node_num = i;
1273 INIT_LIST_HEAD(&slot->ds_live_item);
1274 slot->ds_raw_block = NULL;
1277 reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
1278 mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
1279 "at %u blocks per page\n",
1280 reg->hr_num_pages, reg->hr_blocks, spp);
1282 reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
1284 if (!reg->hr_slot_data) {
1285 mlog_errno(-ENOMEM);
1289 for(i = 0; i < reg->hr_num_pages; i++) {
1290 page = alloc_page(GFP_KERNEL);
1292 mlog_errno(-ENOMEM);
1296 reg->hr_slot_data[i] = page;
1298 last_slot = i * spp;
1299 raw = page_address(page);
1301 (j < spp) && ((j + last_slot) < reg->hr_blocks);
1303 BUG_ON((j + last_slot) >= reg->hr_blocks);
1305 slot = ®->hr_slots[j + last_slot];
1306 slot->ds_raw_block =
1307 (struct o2hb_disk_heartbeat_block *) raw;
1309 raw += reg->hr_block_bytes;
1316 /* Read in all the slots available and populate the tracking
1317 * structures so that we can start with a baseline idea of what's
1319 static int o2hb_populate_slot_data(struct o2hb_region *reg)
1322 struct o2hb_disk_slot *slot;
1323 struct o2hb_disk_heartbeat_block *hb_block;
1327 ret = o2hb_read_slots(reg, reg->hr_blocks);
1333 /* We only want to get an idea of the values initially in each
1334 * slot, so we do no verification - o2hb_check_slot will
1335 * actually determine if each configured slot is valid and
1336 * whether any values have changed. */
1337 for(i = 0; i < reg->hr_blocks; i++) {
1338 slot = ®->hr_slots[i];
1339 hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block;
1341 /* Only fill the values that o2hb_check_slot uses to
1342 * determine changing slots */
1343 slot->ds_last_time = le64_to_cpu(hb_block->hb_seq);
1344 slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
1352 /* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
1353 static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1357 struct task_struct *hb_task;
1360 char *p = (char *)page;
1361 struct file *filp = NULL;
1362 struct inode *inode = NULL;
1363 ssize_t ret = -EINVAL;
1368 /* We can't heartbeat without having had our node number
1369 * configured yet. */
1370 if (o2nm_this_node() == O2NM_MAX_NODES)
1373 fd = simple_strtol(p, &p, 0);
1374 if (!p || (*p && (*p != '\n')))
1377 if (fd < 0 || fd >= INT_MAX)
1384 if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
1385 reg->hr_block_bytes == 0)
1388 inode = igrab(filp->f_mapping->host);
1392 if (!S_ISBLK(inode->i_mode))
1395 reg->hr_bdev = I_BDEV(filp->f_mapping->host);
1396 ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ);
1398 reg->hr_bdev = NULL;
1403 bdevname(reg->hr_bdev, reg->hr_dev_name);
1405 sectsize = bdev_logical_block_size(reg->hr_bdev);
1406 if (sectsize != reg->hr_block_bytes) {
1408 "blocksize %u incorrect for device, expected %d",
1409 reg->hr_block_bytes, sectsize);
1414 o2hb_init_region_params(reg);
1416 /* Generation of zero is invalid */
1418 get_random_bytes(®->hr_generation,
1419 sizeof(reg->hr_generation));
1420 } while (reg->hr_generation == 0);
1422 ret = o2hb_map_slot_data(reg);
1428 ret = o2hb_populate_slot_data(reg);
1434 INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout);
1437 * A node is considered live after it has beat LIVE_THRESHOLD
1438 * times. We're not steady until we've given them a chance
1439 * _after_ our first read.
1441 atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1);
1443 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
1444 reg->hr_item.ci_name);
1445 if (IS_ERR(hb_task)) {
1446 ret = PTR_ERR(hb_task);
1451 spin_lock(&o2hb_live_lock);
1452 reg->hr_task = hb_task;
1453 spin_unlock(&o2hb_live_lock);
1455 ret = wait_event_interruptible(o2hb_steady_queue,
1456 atomic_read(®->hr_steady_iterations) == 0);
1458 /* We got interrupted (hello ptrace!). Clean up */
1459 spin_lock(&o2hb_live_lock);
1460 hb_task = reg->hr_task;
1461 reg->hr_task = NULL;
1462 spin_unlock(&o2hb_live_lock);
1465 kthread_stop(hb_task);
1469 /* Ok, we were woken. Make sure it wasn't by drop_item() */
1470 spin_lock(&o2hb_live_lock);
1471 hb_task = reg->hr_task;
1472 spin_unlock(&o2hb_live_lock);
1479 if (hb_task && o2hb_global_heartbeat_active())
1480 printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
1481 config_item_name(®->hr_item));
1490 blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE);
1491 reg->hr_bdev = NULL;
1497 static ssize_t o2hb_region_pid_read(struct o2hb_region *reg,
1502 spin_lock(&o2hb_live_lock);
1504 pid = task_pid_nr(reg->hr_task);
1505 spin_unlock(&o2hb_live_lock);
1510 return sprintf(page, "%u\n", pid);
1513 struct o2hb_region_attribute {
1514 struct configfs_attribute attr;
1515 ssize_t (*show)(struct o2hb_region *, char *);
1516 ssize_t (*store)(struct o2hb_region *, const char *, size_t);
1519 static struct o2hb_region_attribute o2hb_region_attr_block_bytes = {
1520 .attr = { .ca_owner = THIS_MODULE,
1521 .ca_name = "block_bytes",
1522 .ca_mode = S_IRUGO | S_IWUSR },
1523 .show = o2hb_region_block_bytes_read,
1524 .store = o2hb_region_block_bytes_write,
1527 static struct o2hb_region_attribute o2hb_region_attr_start_block = {
1528 .attr = { .ca_owner = THIS_MODULE,
1529 .ca_name = "start_block",
1530 .ca_mode = S_IRUGO | S_IWUSR },
1531 .show = o2hb_region_start_block_read,
1532 .store = o2hb_region_start_block_write,
1535 static struct o2hb_region_attribute o2hb_region_attr_blocks = {
1536 .attr = { .ca_owner = THIS_MODULE,
1537 .ca_name = "blocks",
1538 .ca_mode = S_IRUGO | S_IWUSR },
1539 .show = o2hb_region_blocks_read,
1540 .store = o2hb_region_blocks_write,
1543 static struct o2hb_region_attribute o2hb_region_attr_dev = {
1544 .attr = { .ca_owner = THIS_MODULE,
1546 .ca_mode = S_IRUGO | S_IWUSR },
1547 .show = o2hb_region_dev_read,
1548 .store = o2hb_region_dev_write,
1551 static struct o2hb_region_attribute o2hb_region_attr_pid = {
1552 .attr = { .ca_owner = THIS_MODULE,
1554 .ca_mode = S_IRUGO | S_IRUSR },
1555 .show = o2hb_region_pid_read,
1558 static struct configfs_attribute *o2hb_region_attrs[] = {
1559 &o2hb_region_attr_block_bytes.attr,
1560 &o2hb_region_attr_start_block.attr,
1561 &o2hb_region_attr_blocks.attr,
1562 &o2hb_region_attr_dev.attr,
1563 &o2hb_region_attr_pid.attr,
1567 static ssize_t o2hb_region_show(struct config_item *item,
1568 struct configfs_attribute *attr,
1571 struct o2hb_region *reg = to_o2hb_region(item);
1572 struct o2hb_region_attribute *o2hb_region_attr =
1573 container_of(attr, struct o2hb_region_attribute, attr);
1576 if (o2hb_region_attr->show)
1577 ret = o2hb_region_attr->show(reg, page);
1581 static ssize_t o2hb_region_store(struct config_item *item,
1582 struct configfs_attribute *attr,
1583 const char *page, size_t count)
1585 struct o2hb_region *reg = to_o2hb_region(item);
1586 struct o2hb_region_attribute *o2hb_region_attr =
1587 container_of(attr, struct o2hb_region_attribute, attr);
1588 ssize_t ret = -EINVAL;
1590 if (o2hb_region_attr->store)
1591 ret = o2hb_region_attr->store(reg, page, count);
1595 static struct configfs_item_operations o2hb_region_item_ops = {
1596 .release = o2hb_region_release,
1597 .show_attribute = o2hb_region_show,
1598 .store_attribute = o2hb_region_store,
1601 static struct config_item_type o2hb_region_type = {
1602 .ct_item_ops = &o2hb_region_item_ops,
1603 .ct_attrs = o2hb_region_attrs,
1604 .ct_owner = THIS_MODULE,
1609 struct o2hb_heartbeat_group {
1610 struct config_group hs_group;
1614 static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
1617 container_of(group, struct o2hb_heartbeat_group, hs_group)
1621 static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
1624 struct o2hb_region *reg = NULL;
1626 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL);
1628 return ERR_PTR(-ENOMEM);
1630 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN)
1631 return ERR_PTR(-ENAMETOOLONG);
1633 config_item_init_type_name(®->hr_item, name, &o2hb_region_type);
1635 spin_lock(&o2hb_live_lock);
1636 list_add_tail(®->hr_all_item, &o2hb_all_regions);
1637 spin_unlock(&o2hb_live_lock);
1639 return ®->hr_item;
1642 static void o2hb_heartbeat_group_drop_item(struct config_group *group,
1643 struct config_item *item)
1645 struct task_struct *hb_task;
1646 struct o2hb_region *reg = to_o2hb_region(item);
1648 /* stop the thread when the user removes the region dir */
1649 spin_lock(&o2hb_live_lock);
1650 hb_task = reg->hr_task;
1651 reg->hr_task = NULL;
1652 spin_unlock(&o2hb_live_lock);
1655 kthread_stop(hb_task);
1658 * If we're racing a dev_write(), we need to wake them. They will
1659 * check reg->hr_task
1661 if (atomic_read(®->hr_steady_iterations) != 0) {
1662 atomic_set(®->hr_steady_iterations, 0);
1663 wake_up(&o2hb_steady_queue);
1666 if (o2hb_global_heartbeat_active())
1667 printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
1668 config_item_name(®->hr_item));
1669 config_item_put(item);
1672 struct o2hb_heartbeat_group_attribute {
1673 struct configfs_attribute attr;
1674 ssize_t (*show)(struct o2hb_heartbeat_group *, char *);
1675 ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t);
1678 static ssize_t o2hb_heartbeat_group_show(struct config_item *item,
1679 struct configfs_attribute *attr,
1682 struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
1683 struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
1684 container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
1687 if (o2hb_heartbeat_group_attr->show)
1688 ret = o2hb_heartbeat_group_attr->show(reg, page);
1692 static ssize_t o2hb_heartbeat_group_store(struct config_item *item,
1693 struct configfs_attribute *attr,
1694 const char *page, size_t count)
1696 struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
1697 struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
1698 container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
1699 ssize_t ret = -EINVAL;
1701 if (o2hb_heartbeat_group_attr->store)
1702 ret = o2hb_heartbeat_group_attr->store(reg, page, count);
1706 static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group,
1709 return sprintf(page, "%u\n", o2hb_dead_threshold);
1712 static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group,
1717 char *p = (char *)page;
1719 tmp = simple_strtoul(p, &p, 10);
1720 if (!p || (*p && (*p != '\n')))
1723 /* this will validate ranges for us. */
1724 o2hb_dead_threshold_set((unsigned int) tmp);
1730 ssize_t o2hb_heartbeat_group_mode_show(struct o2hb_heartbeat_group *group,
1733 return sprintf(page, "%s\n",
1734 o2hb_heartbeat_mode_desc[o2hb_heartbeat_mode]);
1738 ssize_t o2hb_heartbeat_group_mode_store(struct o2hb_heartbeat_group *group,
1739 const char *page, size_t count)
1745 len = (page[count - 1] == '\n') ? count - 1 : count;
1749 for (i = 0; i < O2HB_HEARTBEAT_NUM_MODES; ++i) {
1750 if (strnicmp(page, o2hb_heartbeat_mode_desc[i], len))
1753 ret = o2hb_global_hearbeat_mode_set(i);
1755 printk(KERN_NOTICE "o2hb: Heartbeat mode set to %s\n",
1756 o2hb_heartbeat_mode_desc[i]);
1764 static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = {
1765 .attr = { .ca_owner = THIS_MODULE,
1766 .ca_name = "dead_threshold",
1767 .ca_mode = S_IRUGO | S_IWUSR },
1768 .show = o2hb_heartbeat_group_threshold_show,
1769 .store = o2hb_heartbeat_group_threshold_store,
1772 static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_mode = {
1773 .attr = { .ca_owner = THIS_MODULE,
1775 .ca_mode = S_IRUGO | S_IWUSR },
1776 .show = o2hb_heartbeat_group_mode_show,
1777 .store = o2hb_heartbeat_group_mode_store,
1780 static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
1781 &o2hb_heartbeat_group_attr_threshold.attr,
1782 &o2hb_heartbeat_group_attr_mode.attr,
1786 static struct configfs_item_operations o2hb_hearbeat_group_item_ops = {
1787 .show_attribute = o2hb_heartbeat_group_show,
1788 .store_attribute = o2hb_heartbeat_group_store,
1791 static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
1792 .make_item = o2hb_heartbeat_group_make_item,
1793 .drop_item = o2hb_heartbeat_group_drop_item,
1796 static struct config_item_type o2hb_heartbeat_group_type = {
1797 .ct_group_ops = &o2hb_heartbeat_group_group_ops,
1798 .ct_item_ops = &o2hb_hearbeat_group_item_ops,
1799 .ct_attrs = o2hb_heartbeat_group_attrs,
1800 .ct_owner = THIS_MODULE,
1803 /* this is just here to avoid touching group in heartbeat.h which the
1804 * entire damn world #includes */
1805 struct config_group *o2hb_alloc_hb_set(void)
1807 struct o2hb_heartbeat_group *hs = NULL;
1808 struct config_group *ret = NULL;
1810 hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
1814 config_group_init_type_name(&hs->hs_group, "heartbeat",
1815 &o2hb_heartbeat_group_type);
1817 ret = &hs->hs_group;
1824 void o2hb_free_hb_set(struct config_group *group)
1826 struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
1830 /* hb callback registration and issueing */
1832 static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
1834 if (type == O2HB_NUM_CB)
1835 return ERR_PTR(-EINVAL);
1837 return &o2hb_callbacks[type];
1840 void o2hb_setup_callback(struct o2hb_callback_func *hc,
1841 enum o2hb_callback_type type,
1846 INIT_LIST_HEAD(&hc->hc_item);
1849 hc->hc_priority = priority;
1851 hc->hc_magic = O2HB_CB_MAGIC;
1853 EXPORT_SYMBOL_GPL(o2hb_setup_callback);
1855 static struct o2hb_region *o2hb_find_region(const char *region_uuid)
1857 struct o2hb_region *p, *reg = NULL;
1859 assert_spin_locked(&o2hb_live_lock);
1861 list_for_each_entry(p, &o2hb_all_regions, hr_all_item) {
1862 if (!strcmp(region_uuid, config_item_name(&p->hr_item))) {
1871 static int o2hb_region_get(const char *region_uuid)
1874 struct o2hb_region *reg;
1876 spin_lock(&o2hb_live_lock);
1878 reg = o2hb_find_region(region_uuid);
1881 spin_unlock(&o2hb_live_lock);
1886 ret = o2nm_depend_this_node();
1890 ret = o2nm_depend_item(®->hr_item);
1892 o2nm_undepend_this_node();
1898 static void o2hb_region_put(const char *region_uuid)
1900 struct o2hb_region *reg;
1902 spin_lock(&o2hb_live_lock);
1904 reg = o2hb_find_region(region_uuid);
1906 spin_unlock(&o2hb_live_lock);
1909 o2nm_undepend_item(®->hr_item);
1910 o2nm_undepend_this_node();
1914 int o2hb_register_callback(const char *region_uuid,
1915 struct o2hb_callback_func *hc)
1917 struct o2hb_callback_func *tmp;
1918 struct list_head *iter;
1919 struct o2hb_callback *hbcall;
1922 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
1923 BUG_ON(!list_empty(&hc->hc_item));
1925 hbcall = hbcall_from_type(hc->hc_type);
1926 if (IS_ERR(hbcall)) {
1927 ret = PTR_ERR(hbcall);
1932 ret = o2hb_region_get(region_uuid);
1937 down_write(&o2hb_callback_sem);
1939 list_for_each(iter, &hbcall->list) {
1940 tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
1941 if (hc->hc_priority < tmp->hc_priority) {
1942 list_add_tail(&hc->hc_item, iter);
1946 if (list_empty(&hc->hc_item))
1947 list_add_tail(&hc->hc_item, &hbcall->list);
1949 up_write(&o2hb_callback_sem);
1952 mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
1953 ret, __builtin_return_address(0), hc);
1956 EXPORT_SYMBOL_GPL(o2hb_register_callback);
1958 void o2hb_unregister_callback(const char *region_uuid,
1959 struct o2hb_callback_func *hc)
1961 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
1963 mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
1964 __builtin_return_address(0), hc);
1966 /* XXX Can this happen _with_ a region reference? */
1967 if (list_empty(&hc->hc_item))
1971 o2hb_region_put(region_uuid);
1973 down_write(&o2hb_callback_sem);
1975 list_del_init(&hc->hc_item);
1977 up_write(&o2hb_callback_sem);
1979 EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
1981 int o2hb_check_node_heartbeating(u8 node_num)
1983 unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1985 o2hb_fill_node_map(testing_map, sizeof(testing_map));
1986 if (!test_bit(node_num, testing_map)) {
1988 "node (%u) does not have heartbeating enabled.\n",
1995 EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating);
1997 int o2hb_check_node_heartbeating_from_callback(u8 node_num)
1999 unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
2001 o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
2002 if (!test_bit(node_num, testing_map)) {
2004 "node (%u) does not have heartbeating enabled.\n",
2011 EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback);
2013 /* Makes sure our local node is configured with a node number, and is
2015 int o2hb_check_local_node_heartbeating(void)
2019 /* if this node was set then we have networking */
2020 node_num = o2nm_this_node();
2021 if (node_num == O2NM_MAX_NODES) {
2022 mlog(ML_HEARTBEAT, "this node has not been configured.\n");
2026 return o2hb_check_node_heartbeating(node_num);
2028 EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating);
2031 * this is just a hack until we get the plumbing which flips file systems
2032 * read only and drops the hb ref instead of killing the node dead.
2034 void o2hb_stop_all_regions(void)
2036 struct o2hb_region *reg;
2038 mlog(ML_ERROR, "stopping heartbeat on all active regions.\n");
2040 spin_lock(&o2hb_live_lock);
2042 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item)
2043 reg->hr_unclean_stop = 1;
2045 spin_unlock(&o2hb_live_lock);
2047 EXPORT_SYMBOL_GPL(o2hb_stop_all_regions);
2049 int o2hb_get_all_regions(char *region_uuids, u8 max_regions)
2051 struct o2hb_region *reg;
2055 spin_lock(&o2hb_live_lock);
2058 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
2059 mlog(0, "Region: %s\n", config_item_name(®->hr_item));
2060 if (numregs < max_regions) {
2061 memcpy(p, config_item_name(®->hr_item),
2062 O2HB_MAX_REGION_NAME_LEN);
2063 p += O2HB_MAX_REGION_NAME_LEN;
2068 spin_unlock(&o2hb_live_lock);
2072 EXPORT_SYMBOL_GPL(o2hb_get_all_regions);
2074 int o2hb_global_heartbeat_active(void)
2078 EXPORT_SYMBOL(o2hb_global_heartbeat_active);