2008-10-23 01:26:29 -04:00
|
|
|
#ifndef _ASM_X86_TIMER_H
|
|
|
|
#define _ASM_X86_TIMER_H
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/init.h>
|
2005-09-03 18:57:07 -04:00
|
|
|
#include <linux/pm.h>
|
2008-01-30 07:30:06 -05:00
|
|
|
#include <linux/percpu.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#define TICK_SIZE (tick_nsec / 1000)
|
2007-03-05 03:30:35 -05:00
|
|
|
|
|
|
|
unsigned long long native_sched_clock(void);
|
2008-07-01 14:43:36 -04:00
|
|
|
unsigned long native_calibrate_tsc(void);
|
2007-03-05 03:30:35 -05:00
|
|
|
|
2008-07-21 12:22:51 -04:00
|
|
|
#ifdef CONFIG_X86_32
|
2005-04-16 18:20:36 -04:00
|
|
|
extern int timer_ack;
|
2005-05-31 22:03:46 -04:00
|
|
|
extern int recalibrate_cpu_khz(void);
|
2008-07-21 12:22:51 -04:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
extern int no_timer_check;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2007-03-05 03:30:35 -05:00
|
|
|
#ifndef CONFIG_PARAVIRT
|
2008-07-01 14:43:36 -04:00
|
|
|
#define calibrate_tsc() native_calibrate_tsc()
|
2007-03-05 03:30:35 -05:00
|
|
|
#endif
|
|
|
|
|
2008-01-30 07:30:06 -05:00
|
|
|
/* Accelerators for sched_clock()
|
2007-07-17 21:37:04 -04:00
|
|
|
* convert from cycles(64bits) => nanoseconds (64bits)
|
|
|
|
* basic equation:
|
|
|
|
* ns = cycles / (freq / ns_per_sec)
|
|
|
|
* ns = cycles * (ns_per_sec / freq)
|
|
|
|
* ns = cycles * (10^9 / (cpu_khz * 10^3))
|
|
|
|
* ns = cycles * (10^6 / cpu_khz)
|
|
|
|
*
|
|
|
|
* Then we use scaling math (suggested by george@mvista.com) to get:
|
|
|
|
* ns = cycles * (10^6 * SC / cpu_khz) / SC
|
|
|
|
* ns = cycles * cyc2ns_scale / SC
|
|
|
|
*
|
|
|
|
* And since SC is a constant power of two, we can convert the div
|
|
|
|
* into a shift.
|
|
|
|
*
|
2008-01-30 07:30:06 -05:00
|
|
|
* We can use khz divisor instead of mhz to keep a better precision, since
|
2007-07-17 21:37:04 -04:00
|
|
|
* cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
|
|
|
|
* (mathieu.desnoyers@polymtl.ca)
|
|
|
|
*
|
|
|
|
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
|
|
|
*/
|
2008-01-30 07:30:06 -05:00
|
|
|
|
|
|
|
DECLARE_PER_CPU(unsigned long, cyc2ns);
|
2007-07-17 21:37:04 -04:00
|
|
|
|
|
|
|
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
|
|
|
|
|
2008-01-30 07:30:06 -05:00
|
|
|
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
|
2007-07-17 21:37:04 -04:00
|
|
|
{
|
2008-01-30 07:30:06 -05:00
|
|
|
return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR;
|
2007-07-17 21:37:04 -04:00
|
|
|
}
|
|
|
|
|
2008-01-30 07:30:06 -05:00
|
|
|
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
|
|
{
|
|
|
|
unsigned long long ns;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
ns = __cycles_2_ns(cyc);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
return ns;
|
|
|
|
}
|
2007-07-17 21:37:04 -04:00
|
|
|
|
2008-10-23 01:26:29 -04:00
|
|
|
#endif /* _ASM_X86_TIMER_H */
|