#endif
};
+struct rcu_node;
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
#ifdef CONFIG_TREE_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
- void *rcu_blocked_node;
+ struct rcu_node *rcu_blocked_node;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#endif /* #ifdef CONFIG_SMP */
#ifdef CONFIG_NO_HZ
-static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
/**
* rcu_enter_nohz - inform RCU that current CPU is entering nohz
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting--;
- WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
local_irq_restore(flags);
}
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
local_irq_restore(flags);
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
if (rdtp->dynticks & 0x1)
return;
rdtp->dynticks_nmi++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks_nmi++;
- WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
}
/**
if (rdtp->dynticks_nesting++)
return;
rdtp->dynticks++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks++;
- WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
/* If the interrupt queued a callback, get out of dyntick mode. */
if (__get_cpu_var(rcu_sched_data).nxtlist ||
struct rcu_node {
spinlock_t lock;
long gpnum; /* Current grace period for this node. */
+ /* This will either be equal to or one */
+ /* behind the root rcu_node's gpnum. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
unsigned long qsmaskinit;
rnp = rdp->mynode;
spin_lock(&rnp->lock);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
- t->rcu_blocked_node = (void *)rnp;
+ t->rcu_blocked_node = rnp;
/*
* If this CPU has already checked in, then this task
* most one time. So at most two passes through loop.
*/
for (;;) {
- rnp = (struct rcu_node *)t->rcu_blocked_node;
+ rnp = t->rcu_blocked_node;
spin_lock(&rnp->lock);
- if (rnp == (struct rcu_node *)t->rcu_blocked_node)
+ if (rnp == t->rcu_blocked_node)
break;
spin_unlock(&rnp->lock);
}
struct rcu_node *rnp_root = rcu_get_root(rsp);
struct task_struct *tp;
- if (rnp == rnp_root)
+ if (rnp == rnp_root) {
+ WARN_ONCE(1, "Last CPU thought to be offlined?");
return; /* Shouldn't happen: at least one CPU online. */
+ }
/*
* Move tasks up to root rcu_node. Rely on the fact that the