X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=kernel%2Fsoftirq.c;h=8a1e6e104892daff68dded8f7ff2af9d72f3dd1e;hb=32665f2bbfed2e325d37236d9b0071a11a69124e;hp=9a4500e4c1893ec22e17c4ac98a1d34cc6a0b6cf;hpb=9326657abe1a83ed4b4f396b923ca1217fd50cba;p=firefly-linux-kernel-4.4.55.git diff --git a/kernel/softirq.c b/kernel/softirq.c index 9a4500e4c189..8a1e6e104892 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -89,7 +89,7 @@ static void wakeup_softirqd(void) * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS -static void __local_bh_disable(unsigned long ip, unsigned int cnt) +void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { unsigned long flags; @@ -107,33 +107,21 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) /* * Were softirqs turned off above: */ - if (softirq_count() == cnt) + if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_off(ip); raw_local_irq_restore(flags); if (preempt_count() == cnt) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } -#else /* !CONFIG_TRACE_IRQFLAGS */ -static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) -{ - preempt_count_add(cnt); - barrier(); -} +EXPORT_SYMBOL(__local_bh_disable_ip); #endif /* CONFIG_TRACE_IRQFLAGS */ -void local_bh_disable(void) -{ - __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET); -} - -EXPORT_SYMBOL(local_bh_disable); - static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(!irqs_disabled()); - if (softirq_count() == cnt) + if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_on(_RET_IP_); preempt_count_sub(cnt); } @@ -151,7 +139,7 @@ void _local_bh_enable(void) EXPORT_SYMBOL(_local_bh_enable); -static inline void _local_bh_enable_ip(unsigned long ip) +void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) { WARN_ON_ONCE(in_irq() || irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS @@ -166,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) * Keep preemption disabled until we are done with * softirq processing: */ - preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); + preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { /* @@ -182,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) #endif preempt_check_resched(); } - -void local_bh_enable(void) -{ - _local_bh_enable_ip(_RET_IP_); -} -EXPORT_SYMBOL(local_bh_enable); - -void local_bh_enable_ip(unsigned long ip) -{ - _local_bh_enable_ip(ip); -} -EXPORT_SYMBOL(local_bh_enable_ip); +EXPORT_SYMBOL(__local_bh_enable_ip); /* * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, @@ -264,7 +241,7 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_irq_enter_time(current); - __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); in_hardirq = lockdep_softirq_start(); cpu = smp_processor_id(); @@ -342,8 +319,6 @@ asmlinkage void do_softirq(void) */ void irq_enter(void) { - int cpu = smp_processor_id(); - rcu_irq_enter(); if (is_idle_task(current) && !in_interrupt()) { /* @@ -351,7 +326,7 @@ void irq_enter(void) * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); - tick_check_idle(cpu); + tick_check_idle(); _local_bh_enable(); }