From: Roman Zippel Date: Thu, 1 May 2008 11:34:28 +0000 (-0700) Subject: rename div64_64 to div64_u64 X-Git-Tag: firefly_0821_release~20565 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=6f6d6a1a6a1336431a6cba60ace9e97c3a496a19;p=firefly-linux-kernel-4.4.55.git rename div64_64 to div64_u64 Rename div64_64 to div64_u64 to make it consistent with the other divide functions, so it clearly includes the type of the divide. Move its definition to math64.h as currently no architecture overrides the generic implementation. They can still override it of course, but the duplicated declarations are avoided. Signed-off-by: Roman Zippel Cc: Avi Kivity Cc: Russell King Cc: Geert Uytterhoeven Cc: Ralf Baechle Cc: David Howells Cc: Jeff Dike Cc: Ingo Molnar Cc: "David S. Miller" Cc: Patrick McHardy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 6df073240135..318b81100623 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1,4 +1,3 @@ - /* * kvm_ia64.c: Basic KVM suppport On Itanium series processors * @@ -431,7 +430,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) if (itc_diff < 0) itc_diff = -itc_diff; - expires = div64_64(itc_diff, cyc_per_usec); + expires = div64_u64(itc_diff, cyc_per_usec); kt = ktime_set(0, 1000 * expires); vcpu->arch.ht_active = 1; hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 361e31611276..4c943eabacc3 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -35,7 +35,7 @@ #include "i8254.h" #ifndef CONFIG_X86_64 -#define mod_64(x, y) ((x) - (y) * div64_64(x, y)) +#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #else #define mod_64(x, y) ((x) % (y)) #endif @@ -60,8 +60,8 @@ static u64 muldiv64(u64 a, u32 b, u32 c) rl = (u64)u.l.low * (u64)b; rh = (u64)u.l.high * (u64)b; rh += (rl >> 32); - res.l.high = div64_64(rh, c); - res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); + res.l.high = div64_u64(rh, c); + res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); return res.ll; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 57ac4e4c556a..36809d79788b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -25,13 +25,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include "irq.h" #define PRId64 "d" @@ -526,8 +526,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) } else passed = ktime_sub(now, apic->timer.last_update); - counter_passed = div64_64(ktime_to_ns(passed), - (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); + counter_passed = div64_u64(ktime_to_ns(passed), + (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); if (counter_passed > tmcct) { if (unlikely(!apic_lvtt_period(apic))) { diff --git a/include/asm-arm/div64.h b/include/asm-arm/div64.h index 0b5f881c3d85..5001390be958 100644 --- a/include/asm-arm/div64.h +++ b/include/asm-arm/div64.h @@ -224,6 +224,4 @@ #endif -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); - #endif diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index a4a49370793c..8f4e3193342e 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -30,11 +30,6 @@ __rem; \ }) -static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor) -{ - return dividend / divisor; -} - #elif BITS_PER_LONG == 32 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); @@ -54,8 +49,6 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); __rem; \ }) -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); - #else /* BITS_PER_LONG == ?? */ # error do_div() does not yet support the C64 diff --git a/include/asm-m68k/div64.h b/include/asm-m68k/div64.h index 33caad1628d4..8243c931b5c0 100644 --- a/include/asm-m68k/div64.h +++ b/include/asm-m68k/div64.h @@ -25,5 +25,4 @@ __rem; \ }) -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif /* _M68K_DIV64_H */ diff --git a/include/asm-mips/div64.h b/include/asm-mips/div64.h index 716371bd0980..d1d699105c11 100644 --- a/include/asm-mips/div64.h +++ b/include/asm-mips/div64.h @@ -82,7 +82,6 @@ (n) = __quot; \ __mod; }) -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif /* (_MIPS_SZLONG == 32) */ #if (_MIPS_SZLONG == 64) @@ -106,11 +105,6 @@ extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); (n) = __quot; \ __mod; }) -static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor) -{ - return dividend / divisor; -} - #endif /* (_MIPS_SZLONG == 64) */ #endif /* _ASM_DIV64_H */ diff --git a/include/asm-mn10300/div64.h b/include/asm-mn10300/div64.h index bf9c515a998c..3a8329b3e869 100644 --- a/include/asm-mn10300/div64.h +++ b/include/asm-mn10300/div64.h @@ -97,7 +97,4 @@ signed __muldiv64s(signed val, signed mult, signed div) return result; } -extern __attribute__((const)) -uint64_t div64_64(uint64_t dividend, uint64_t divisor); - #endif /* _ASM_DIV64 */ diff --git a/include/asm-um/div64.h b/include/asm-um/div64.h index 7b73b2cd5b34..1e17f7409cab 100644 --- a/include/asm-um/div64.h +++ b/include/asm-um/div64.h @@ -3,5 +3,4 @@ #include "asm/arch/div64.h" -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index c7892cfe9ce6..32fdbddaae55 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h @@ -71,8 +71,6 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) } #define div_u64_rem div_u64_rem -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); - #else # include #endif /* CONFIG_X86_32 */ diff --git a/include/linux/math64.h b/include/linux/math64.h index 6d1716641008..c1a5f81501ff 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -27,6 +27,14 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) return dividend / divisor; } +/** + * div64_u64 - unsigned 64bit divide with 64bit divisor + */ +static inline u64 div64_u64(u64 dividend, u64 divisor) +{ + return dividend / divisor; +} + #elif BITS_PER_LONG == 32 #ifndef div_u64_rem @@ -41,6 +49,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); #endif +#ifndef div64_u64 +extern u64 div64_u64(u64 dividend, u64 divisor); +#endif + #endif /* BITS_PER_LONG */ /** diff --git a/kernel/sched.c b/kernel/sched.c index e2f7f5acc807..34bcc5bc120e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, se->my_q = cfs_rq; se->load.weight = tg->shares; - se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); + se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight); se->parent = parent; } #endif @@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) dequeue_entity(cfs_rq, se, 0); se->load.weight = shares; - se->load.inv_weight = div64_64((1ULL<<32), shares); + se->load.inv_weight = div64_u64((1ULL<<32), shares); if (on_rq) enqueue_entity(cfs_rq, se, 0); @@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) if (runtime == RUNTIME_INF) return 1ULL << 16; - return div64_64(runtime << 16, period); + return div64_u64(runtime << 16, period); } #ifdef CONFIG_CGROUP_SCHED diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 8a9498e7c831..6b4a12558e88 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) avg_per_cpu = p->se.sum_exec_runtime; if (p->se.nr_migrations) { - avg_per_cpu = div64_64(avg_per_cpu, - p->se.nr_migrations); + avg_per_cpu = div64_u64(avg_per_cpu, + p->se.nr_migrations); } else { avg_per_cpu = -1LL; } diff --git a/lib/div64.c b/lib/div64.c index 689bd76833fa..bb5bd0c0f030 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -78,9 +78,10 @@ EXPORT_SYMBOL(div_s64_rem); #endif /* 64bit divisor, dividend and result. dynamic precision */ -uint64_t div64_64(uint64_t dividend, uint64_t divisor) +#ifndef div64_u64 +u64 div64_u64(u64 dividend, u64 divisor) { - uint32_t high, d; + u32 high, d; high = divisor >> 32; if (high) { @@ -91,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor) } else d = divisor; - do_div(dividend, d); - - return dividend; + return div_u64(dividend, d); } -EXPORT_SYMBOL(div64_64); +EXPORT_SYMBOL(div64_u64); +#endif #endif /* BITS_PER_LONG == 32 */ diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index eb5b9854c8c7..4a1221e5e8ee 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -15,8 +15,8 @@ #include #include +#include #include -#include #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta @@ -128,7 +128,7 @@ static u32 cubic_root(u64 a) * x = ( 2 * x + a / x ) / 3 * k+1 k k */ - x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1))); + x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1))); x = ((x * 341) >> 10); return x; } diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index b15e7e2fa143..d7e8983cd37f 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -4,12 +4,11 @@ #include #include #include +#include #include #include #include -#include - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching"); @@ -82,7 +81,7 @@ connbytes_mt(const struct sk_buff *skb, const struct net_device *in, break; } if (pkts != 0) - what = div64_64(bytes, pkts); + what = div64_u64(bytes, pkts); break; }