Merge branch 'x86/urgent' into x86/asm, before applying dependent patches
[firefly-linux-kernel-4.4.55.git] / arch / x86 / lib / delay.c
1 /*
2  *      Precise Delay Loops for i386
3  *
4  *      Copyright (C) 1993 Linus Torvalds
5  *      Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6  *      Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
7  *
8  *      The __delay function must _NOT_ be inlined as its execution time
9  *      depends wildly on alignment on many x86 processors. The additional
10  *      jump magic is needed to get the timing stable on all the CPU's
11  *      we have to worry about.
12  */
13
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/timex.h>
17 #include <linux/preempt.h>
18 #include <linux/delay.h>
19
20 #include <asm/processor.h>
21 #include <asm/delay.h>
22 #include <asm/timer.h>
23
24 #ifdef CONFIG_SMP
25 # include <asm/smp.h>
26 #endif
27
28 /* simple loop based delay: */
29 static void delay_loop(unsigned long loops)
30 {
31         asm volatile(
32                 "       test %0,%0      \n"
33                 "       jz 3f           \n"
34                 "       jmp 1f          \n"
35
36                 ".align 16              \n"
37                 "1:     jmp 2f          \n"
38
39                 ".align 16              \n"
40                 "2:     dec %0          \n"
41                 "       jnz 2b          \n"
42                 "3:     dec %0          \n"
43
44                 : /* we don't need output */
45                 :"a" (loops)
46         );
47 }
48
49 /* TSC based delay: */
50 static void delay_tsc(unsigned long __loops)
51 {
52         u64 bclock, now, loops = __loops;
53         int cpu;
54
55         preempt_disable();
56         cpu = smp_processor_id();
57         bclock = rdtsc_ordered();
58         for (;;) {
59                 now = rdtsc_ordered();
60                 if ((now - bclock) >= loops)
61                         break;
62
63                 /* Allow RT tasks to run */
64                 preempt_enable();
65                 rep_nop();
66                 preempt_disable();
67
68                 /*
69                  * It is possible that we moved to another CPU, and
70                  * since TSC's are per-cpu we need to calculate
71                  * that. The delay must guarantee that we wait "at
72                  * least" the amount of time. Being moved to another
73                  * CPU could make the wait longer but we just need to
74                  * make sure we waited long enough. Rebalance the
75                  * counter for this CPU.
76                  */
77                 if (unlikely(cpu != smp_processor_id())) {
78                         loops -= (now - bclock);
79                         cpu = smp_processor_id();
80                         bclock = rdtsc_ordered();
81                 }
82         }
83         preempt_enable();
84 }
85
86 /*
87  * Since we calibrate only once at boot, this
88  * function should be set once at boot and not changed
89  */
90 static void (*delay_fn)(unsigned long) = delay_loop;
91
92 void use_tsc_delay(void)
93 {
94         delay_fn = delay_tsc;
95 }
96
97 int read_current_timer(unsigned long *timer_val)
98 {
99         if (delay_fn == delay_tsc) {
100                 *timer_val = rdtsc();
101                 return 0;
102         }
103         return -1;
104 }
105
106 void __delay(unsigned long loops)
107 {
108         delay_fn(loops);
109 }
110 EXPORT_SYMBOL(__delay);
111
112 inline void __const_udelay(unsigned long xloops)
113 {
114         int d0;
115
116         xloops *= 4;
117         asm("mull %%edx"
118                 :"=d" (xloops), "=&a" (d0)
119                 :"1" (xloops), "0"
120                 (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
121
122         __delay(++xloops);
123 }
124 EXPORT_SYMBOL(__const_udelay);
125
126 void __udelay(unsigned long usecs)
127 {
128         __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
129 }
130 EXPORT_SYMBOL(__udelay);
131
132 void __ndelay(unsigned long nsecs)
133 {
134         __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
135 }
136 EXPORT_SYMBOL(__ndelay);