1 #ifndef _ASM_X86_TIMER_H
2 #define _ASM_X86_TIMER_H
3 #include <linux/init.h>
4 #include <linux/pm.h>
5 #include <linux/percpu.h>
6 #include <linux/interrupt.h>
7 
8 #define TICK_SIZE (tick_nsec / 1000)
9 
10 unsigned long long native_sched_clock(void);
11 extern int recalibrate_cpu_khz(void);
12 
13 extern int no_timer_check;
14 
15 /* Accelerators for sched_clock()
16  * convert from cycles(64bits) => nanoseconds (64bits)
17  *  basic equation:
18  *		ns = cycles / (freq / ns_per_sec)
19  *		ns = cycles * (ns_per_sec / freq)
20  *		ns = cycles * (10^9 / (cpu_khz * 10^3))
21  *		ns = cycles * (10^6 / cpu_khz)
22  *
23  *	Then we use scaling math (suggested by george@mvista.com) to get:
24  *		ns = cycles * (10^6 * SC / cpu_khz) / SC
25  *		ns = cycles * cyc2ns_scale / SC
26  *
27  *	And since SC is a constant power of two, we can convert the div
28  *  into a shift.
29  *
30  *  We can use khz divisor instead of mhz to keep a better precision, since
31  *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
32  *  (mathieu.desnoyers@polymtl.ca)
33  *
34  *			-johnstul@us.ibm.com "math is hard, lets go shopping!"
35  *
36  * In:
37  *
38  * ns = cycles * cyc2ns_scale / SC
39  *
40  * Although we may still have enough bits to store the value of ns,
41  * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
42  * leading to an incorrect result.
43  *
44  * To avoid this, we can decompose 'cycles' into quotient and remainder
45  * of division by SC.  Then,
46  *
47  * ns = (quot * SC + rem) * cyc2ns_scale / SC
48  *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
49  *
50  *			- sqazi@google.com
51  */
52 
53 DECLARE_PER_CPU(unsigned long, cyc2ns);
54 DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
55 
56 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
57 
__cycles_2_ns(unsigned long long cyc)58 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
59 {
60 	int cpu = smp_processor_id();
61 	unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
62 	ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
63 			(1UL << CYC2NS_SCALE_FACTOR));
64 	return ns;
65 }
66 
cycles_2_ns(unsigned long long cyc)67 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
68 {
69 	unsigned long long ns;
70 	unsigned long flags;
71 
72 	local_irq_save(flags);
73 	ns = __cycles_2_ns(cyc);
74 	local_irq_restore(flags);
75 
76 	return ns;
77 }
78 
79 #endif /* _ASM_X86_TIMER_H */
80