powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-23 18:06:59 -05:00
|
|
|
/*
|
|
|
|
* Definitions for measuring cputime on powerpc machines.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Paul Mackerras, IBM Corp.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
|
|
|
|
* the same units as the timebase. Otherwise we measure cpu time
|
|
|
|
* in jiffies using the generic definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __POWERPC_CPUTIME_H
|
|
|
|
#define __POWERPC_CPUTIME_H
|
|
|
|
|
|
|
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
2005-08-29 00:06:56 -04:00
|
|
|
#include <asm-generic/cputime.h>
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-23 18:06:59 -05:00
|
|
|
#else
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <asm/div64.h>
|
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/param.h>
|
|
|
|
|
|
|
|
typedef u64 cputime_t;
|
|
|
|
typedef u64 cputime64_t;
|
|
|
|
|
|
|
|
#define cputime_zero ((cputime_t)0)
|
|
|
|
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
|
|
|
|
#define cputime_add(__a, __b) ((__a) + (__b))
|
|
|
|
#define cputime_sub(__a, __b) ((__a) - (__b))
|
|
|
|
#define cputime_div(__a, __n) ((__a) / (__n))
|
|
|
|
#define cputime_halve(__a) ((__a) >> 1)
|
|
|
|
#define cputime_eq(__a, __b) ((__a) == (__b))
|
|
|
|
#define cputime_gt(__a, __b) ((__a) > (__b))
|
|
|
|
#define cputime_ge(__a, __b) ((__a) >= (__b))
|
|
|
|
#define cputime_lt(__a, __b) ((__a) < (__b))
|
|
|
|
#define cputime_le(__a, __b) ((__a) <= (__b))
|
|
|
|
|
|
|
|
#define cputime64_zero ((cputime64_t)0)
|
|
|
|
#define cputime64_add(__a, __b) ((__a) + (__b))
|
2006-07-05 06:24:26 -04:00
|
|
|
#define cputime64_sub(__a, __b) ((__a) - (__b))
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-23 18:06:59 -05:00
|
|
|
#define cputime_to_cputime64(__ct) (__ct)
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> jiffies
|
|
|
|
*/
|
|
|
|
extern u64 __cputime_jiffies_factor;
|
|
|
|
|
|
|
|
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
|
|
|
|
{
|
|
|
|
return mulhdu(ct, __cputime_jiffies_factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
unsigned long sec;
|
|
|
|
|
|
|
|
/* have to be a little careful about overflow */
|
|
|
|
ct = jif % HZ;
|
|
|
|
sec = jif / HZ;
|
|
|
|
if (ct) {
|
|
|
|
ct *= tb_ticks_per_sec;
|
|
|
|
do_div(ct, HZ);
|
|
|
|
}
|
|
|
|
if (sec)
|
|
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
|
|
return ct;
|
|
|
|
}
|
|
|
|
|
2006-07-05 06:24:26 -04:00
|
|
|
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
u64 sec;
|
|
|
|
|
|
|
|
/* have to be a little careful about overflow */
|
|
|
|
ct = jif % HZ;
|
|
|
|
sec = jif / HZ;
|
|
|
|
if (ct) {
|
|
|
|
ct *= tb_ticks_per_sec;
|
|
|
|
do_div(ct, HZ);
|
|
|
|
}
|
|
|
|
if (sec)
|
|
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
|
|
return ct;
|
|
|
|
}
|
|
|
|
|
powerpc: Implement accurate task and CPU time accounting
This implements accurate task and cpu time accounting for 64-bit
powerpc kernels. Instead of accounting a whole jiffy of time to a
task on a timer interrupt because that task happened to be running at
the time, we now account time in units of timebase ticks according to
the actual time spent by the task in user mode and kernel mode. We
also count the time spent processing hardware and software interrupts
accurately. This is conditional on CONFIG_VIRT_CPU_ACCOUNTING. If
that is not set, we do tick-based approximate accounting as before.
To get this accurate information, we read either the PURR (processor
utilization of resources register) on POWER5 machines, or the timebase
on other machines on
* each entry to the kernel from usermode
* each exit to usermode
* transitions between process context, hard irq context and soft irq
context in kernel mode
* context switches.
On POWER5 systems with shared-processor logical partitioning we also
read both the PURR and the timebase at each timer interrupt and
context switch in order to determine how much time has been taken by
the hypervisor to run other partitions ("steal" time). Unfortunately,
since we need values of the PURR on both threads at the same time to
accurately calculate the steal time, and since we can only calculate
steal time on a per-core basis, the apportioning of the steal time
between idle time (time which we ceded to the hypervisor in the idle
loop) and actual stolen time is somewhat approximate at the moment.
This is all based quite heavily on what s390 does, and it uses the
generic interfaces that were added by the s390 developers,
i.e. account_system_time(), account_user_time(), etc.
This patch doesn't add any new interfaces between the kernel and
userspace, and doesn't change the units in which time is reported to
userspace by things such as /proc/stat, /proc/<pid>/stat, getrusage(),
times(), etc. Internally the various task and cpu times are stored in
timebase units, but they are converted to USER_HZ units (1/100th of a
second) when reported to userspace. Some precision is therefore lost
but there should not be any accumulating error, since the internal
accumulation is at full precision.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-02-23 18:06:59 -05:00
|
|
|
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
|
|
|
|
{
|
|
|
|
return mulhdu(ct, __cputime_jiffies_factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> milliseconds
|
|
|
|
*/
|
|
|
|
extern u64 __cputime_msec_factor;
|
|
|
|
|
|
|
|
static inline unsigned long cputime_to_msecs(const cputime_t ct)
|
|
|
|
{
|
|
|
|
return mulhdu(ct, __cputime_msec_factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t msecs_to_cputime(const unsigned long ms)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
unsigned long sec;
|
|
|
|
|
|
|
|
/* have to be a little careful about overflow */
|
|
|
|
ct = ms % 1000;
|
|
|
|
sec = ms / 1000;
|
|
|
|
if (ct) {
|
|
|
|
ct *= tb_ticks_per_sec;
|
|
|
|
do_div(ct, 1000);
|
|
|
|
}
|
|
|
|
if (sec)
|
|
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
|
|
return ct;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> seconds
|
|
|
|
*/
|
|
|
|
extern u64 __cputime_sec_factor;
|
|
|
|
|
|
|
|
static inline unsigned long cputime_to_secs(const cputime_t ct)
|
|
|
|
{
|
|
|
|
return mulhdu(ct, __cputime_sec_factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t secs_to_cputime(const unsigned long sec)
|
|
|
|
{
|
|
|
|
return (cputime_t) sec * tb_ticks_per_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> timespec
|
|
|
|
*/
|
|
|
|
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
|
|
|
|
{
|
|
|
|
u64 x = ct;
|
|
|
|
unsigned int frac;
|
|
|
|
|
|
|
|
frac = do_div(x, tb_ticks_per_sec);
|
|
|
|
p->tv_sec = x;
|
|
|
|
x = (u64) frac * 1000000000;
|
|
|
|
do_div(x, tb_ticks_per_sec);
|
|
|
|
p->tv_nsec = x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t timespec_to_cputime(const struct timespec *p)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
|
|
|
|
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
|
|
|
|
do_div(ct, 1000000000);
|
|
|
|
return ct + (u64) p->tv_sec * tb_ticks_per_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> timeval
|
|
|
|
*/
|
|
|
|
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
|
|
|
|
{
|
|
|
|
u64 x = ct;
|
|
|
|
unsigned int frac;
|
|
|
|
|
|
|
|
frac = do_div(x, tb_ticks_per_sec);
|
|
|
|
p->tv_sec = x;
|
|
|
|
x = (u64) frac * 1000000;
|
|
|
|
do_div(x, tb_ticks_per_sec);
|
|
|
|
p->tv_usec = x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t timeval_to_cputime(const struct timeval *p)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
|
|
|
|
ct = (u64) p->tv_usec * tb_ticks_per_sec;
|
|
|
|
do_div(ct, 1000000);
|
|
|
|
return ct + (u64) p->tv_sec * tb_ticks_per_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
|
|
|
|
*/
|
|
|
|
extern u64 __cputime_clockt_factor;
|
|
|
|
|
|
|
|
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
|
|
|
|
{
|
|
|
|
return mulhdu(ct, __cputime_clockt_factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
|
|
|
|
{
|
|
|
|
cputime_t ct;
|
|
|
|
unsigned long sec;
|
|
|
|
|
|
|
|
/* have to be a little careful about overflow */
|
|
|
|
ct = clk % USER_HZ;
|
|
|
|
sec = clk / USER_HZ;
|
|
|
|
if (ct) {
|
|
|
|
ct *= tb_ticks_per_sec;
|
|
|
|
do_div(ct, USER_HZ);
|
|
|
|
}
|
|
|
|
if (sec)
|
|
|
|
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
|
|
return ct;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
|
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
|
#endif /* __POWERPC_CPUTIME_H */
|