346 lines
7.9 KiB
C
346 lines
7.9 KiB
C
|
/*
|
||
|
* linux/kernel/time/clockevents.c
|
||
|
*
|
||
|
* This file contains functions which manage clock event devices.
|
||
|
*
|
||
|
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
|
||
|
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
|
||
|
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
|
||
|
*
|
||
|
* This code is licenced under the GPL version 2. For details see
|
||
|
* kernel-base/COPYING.
|
||
|
*/
|
||
|
|
||
|
#include <linux/clockchips.h>
|
||
|
#include <linux/hrtimer.h>
|
||
|
#include <linux/init.h>
|
||
|
#include <linux/module.h>
|
||
|
#include <linux/notifier.h>
|
||
|
#include <linux/smp.h>
|
||
|
#include <linux/sysdev.h>
|
||
|
|
||
|
/* The registered clock event devices */
|
||
|
static LIST_HEAD(clockevent_devices);
|
||
|
static LIST_HEAD(clockevents_released);
|
||
|
|
||
|
/* Notification for clock events */
|
||
|
static RAW_NOTIFIER_HEAD(clockevents_chain);
|
||
|
|
||
|
/* Protection for the above */
|
||
|
static DEFINE_SPINLOCK(clockevents_lock);
|
||
|
|
||
|
/**
|
||
|
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
|
||
|
* @latch: value to convert
|
||
|
* @evt: pointer to clock event device descriptor
|
||
|
*
|
||
|
* Math helper, returns latch value converted to nanoseconds (bound checked)
|
||
|
*/
|
||
|
unsigned long clockevent_delta2ns(unsigned long latch,
|
||
|
struct clock_event_device *evt)
|
||
|
{
|
||
|
u64 clc = ((u64) latch << evt->shift);
|
||
|
|
||
|
do_div(clc, evt->mult);
|
||
|
if (clc < 1000)
|
||
|
clc = 1000;
|
||
|
if (clc > LONG_MAX)
|
||
|
clc = LONG_MAX;
|
||
|
|
||
|
return (unsigned long) clc;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_set_mode - set the operating mode of a clock event device
|
||
|
* @dev: device to modify
|
||
|
* @mode: new mode
|
||
|
*
|
||
|
* Must be called with interrupts disabled !
|
||
|
*/
|
||
|
void clockevents_set_mode(struct clock_event_device *dev,
|
||
|
enum clock_event_mode mode)
|
||
|
{
|
||
|
if (dev->mode != mode) {
|
||
|
dev->set_mode(mode, dev);
|
||
|
dev->mode = mode;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_program_event - Reprogram the clock event device.
|
||
|
* @expires: absolute expiry time (monotonic clock)
|
||
|
*
|
||
|
* Returns 0 on success, -ETIME when the event is in the past.
|
||
|
*/
|
||
|
int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
|
||
|
ktime_t now)
|
||
|
{
|
||
|
unsigned long long clc;
|
||
|
int64_t delta;
|
||
|
|
||
|
delta = ktime_to_ns(ktime_sub(expires, now));
|
||
|
|
||
|
if (delta <= 0)
|
||
|
return -ETIME;
|
||
|
|
||
|
dev->next_event = expires;
|
||
|
|
||
|
if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
|
||
|
return 0;
|
||
|
|
||
|
if (delta > dev->max_delta_ns)
|
||
|
delta = dev->max_delta_ns;
|
||
|
if (delta < dev->min_delta_ns)
|
||
|
delta = dev->min_delta_ns;
|
||
|
|
||
|
clc = delta * dev->mult;
|
||
|
clc >>= dev->shift;
|
||
|
|
||
|
return dev->set_next_event((unsigned long) clc, dev);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_register_notifier - register a clock events change listener
|
||
|
*/
|
||
|
int clockevents_register_notifier(struct notifier_block *nb)
|
||
|
{
|
||
|
int ret;
|
||
|
|
||
|
spin_lock(&clockevents_lock);
|
||
|
ret = raw_notifier_chain_register(&clockevents_chain, nb);
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_unregister_notifier - unregister a clock events change listener
|
||
|
*/
|
||
|
void clockevents_unregister_notifier(struct notifier_block *nb)
|
||
|
{
|
||
|
spin_lock(&clockevents_lock);
|
||
|
raw_notifier_chain_unregister(&clockevents_chain, nb);
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
* Notify about a clock event change. Called with clockevents_lock
|
||
|
* held.
|
||
|
*/
|
||
|
static void clockevents_do_notify(unsigned long reason, void *dev)
|
||
|
{
|
||
|
raw_notifier_call_chain(&clockevents_chain, reason, dev);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
* Called after a notify add to make devices availble which were
|
||
|
* released from the notifier call.
|
||
|
*/
|
||
|
static void clockevents_notify_released(void)
|
||
|
{
|
||
|
struct clock_event_device *dev;
|
||
|
|
||
|
while (!list_empty(&clockevents_released)) {
|
||
|
dev = list_entry(clockevents_released.next,
|
||
|
struct clock_event_device, list);
|
||
|
list_del(&dev->list);
|
||
|
list_add(&dev->list, &clockevent_devices);
|
||
|
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_register_device - register a clock event device
|
||
|
* @dev: device to register
|
||
|
*/
|
||
|
void clockevents_register_device(struct clock_event_device *dev)
|
||
|
{
|
||
|
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
|
||
|
|
||
|
spin_lock(&clockevents_lock);
|
||
|
|
||
|
list_add(&dev->list, &clockevent_devices);
|
||
|
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
|
||
|
clockevents_notify_released();
|
||
|
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
* Noop handler when we shut down an event device
|
||
|
*/
|
||
|
static void clockevents_handle_noop(struct clock_event_device *dev)
|
||
|
{
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_exchange_device - release and request clock devices
|
||
|
* @old: device to release (can be NULL)
|
||
|
* @new: device to request (can be NULL)
|
||
|
*
|
||
|
* Called from the notifier chain. clockevents_lock is held already
|
||
|
*/
|
||
|
void clockevents_exchange_device(struct clock_event_device *old,
|
||
|
struct clock_event_device *new)
|
||
|
{
|
||
|
unsigned long flags;
|
||
|
|
||
|
local_irq_save(flags);
|
||
|
/*
|
||
|
* Caller releases a clock event device. We queue it into the
|
||
|
* released list and do a notify add later.
|
||
|
*/
|
||
|
if (old) {
|
||
|
old->event_handler = clockevents_handle_noop;
|
||
|
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
|
||
|
list_del(&old->list);
|
||
|
list_add(&old->list, &clockevents_released);
|
||
|
}
|
||
|
|
||
|
if (new) {
|
||
|
BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
|
||
|
clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
|
||
|
}
|
||
|
local_irq_restore(flags);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_request_device
|
||
|
*/
|
||
|
struct clock_event_device *clockevents_request_device(unsigned int features,
|
||
|
cpumask_t cpumask)
|
||
|
{
|
||
|
struct clock_event_device *cur, *dev = NULL;
|
||
|
struct list_head *tmp;
|
||
|
|
||
|
spin_lock(&clockevents_lock);
|
||
|
|
||
|
list_for_each(tmp, &clockevent_devices) {
|
||
|
cur = list_entry(tmp, struct clock_event_device, list);
|
||
|
|
||
|
if ((cur->features & features) == features &&
|
||
|
cpus_equal(cpumask, cur->cpumask)) {
|
||
|
if (!dev || dev->rating < cur->rating)
|
||
|
dev = cur;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
clockevents_exchange_device(NULL, dev);
|
||
|
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
|
||
|
return dev;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_release_device
|
||
|
*/
|
||
|
void clockevents_release_device(struct clock_event_device *dev)
|
||
|
{
|
||
|
spin_lock(&clockevents_lock);
|
||
|
|
||
|
clockevents_exchange_device(dev, NULL);
|
||
|
clockevents_notify_released();
|
||
|
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* clockevents_notify - notification about relevant events
|
||
|
*/
|
||
|
void clockevents_notify(unsigned long reason, void *arg)
|
||
|
{
|
||
|
spin_lock(&clockevents_lock);
|
||
|
clockevents_do_notify(reason, arg);
|
||
|
|
||
|
switch (reason) {
|
||
|
case CLOCK_EVT_NOTIFY_CPU_DEAD:
|
||
|
/*
|
||
|
* Unregister the clock event devices which were
|
||
|
* released from the users in the notify chain.
|
||
|
*/
|
||
|
while (!list_empty(&clockevents_released)) {
|
||
|
struct clock_event_device *dev;
|
||
|
|
||
|
dev = list_entry(clockevents_released.next,
|
||
|
struct clock_event_device, list);
|
||
|
list_del(&dev->list);
|
||
|
}
|
||
|
break;
|
||
|
default:
|
||
|
break;
|
||
|
}
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(clockevents_notify);
|
||
|
|
||
|
#ifdef CONFIG_SYSFS
|
||
|
|
||
|
/**
|
||
|
* clockevents_show_registered - sysfs interface for listing clockevents
|
||
|
* @dev: unused
|
||
|
* @buf: char buffer to be filled with clock events list
|
||
|
*
|
||
|
* Provides sysfs interface for listing registered clock event devices
|
||
|
*/
|
||
|
static ssize_t clockevents_show_registered(struct sys_device *dev, char *buf)
|
||
|
{
|
||
|
struct list_head *tmp;
|
||
|
char *p = buf;
|
||
|
int cpu;
|
||
|
|
||
|
spin_lock(&clockevents_lock);
|
||
|
|
||
|
list_for_each(tmp, &clockevent_devices) {
|
||
|
struct clock_event_device *ce;
|
||
|
|
||
|
ce = list_entry(tmp, struct clock_event_device, list);
|
||
|
p += sprintf(p, "%-20s F:%04x M:%d", ce->name,
|
||
|
ce->features, ce->mode);
|
||
|
p += sprintf(p, " C:");
|
||
|
if (!cpus_equal(ce->cpumask, cpu_possible_map)) {
|
||
|
for_each_cpu_mask(cpu, ce->cpumask)
|
||
|
p += sprintf(p, " %d", cpu);
|
||
|
} else {
|
||
|
/*
|
||
|
* FIXME: Add the cpu which is handling this sucker
|
||
|
*/
|
||
|
}
|
||
|
p += sprintf(p, "\n");
|
||
|
}
|
||
|
|
||
|
spin_unlock(&clockevents_lock);
|
||
|
|
||
|
return p - buf;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
* Sysfs setup bits:
|
||
|
*/
|
||
|
static SYSDEV_ATTR(registered, 0600,
|
||
|
clockevents_show_registered, NULL);
|
||
|
|
||
|
static struct sysdev_class clockevents_sysclass = {
|
||
|
set_kset_name("clockevents"),
|
||
|
};
|
||
|
|
||
|
static struct sys_device clockevents_sys_device = {
|
||
|
.id = 0,
|
||
|
.cls = &clockevents_sysclass,
|
||
|
};
|
||
|
|
||
|
static int __init clockevents_sysfs_init(void)
|
||
|
{
|
||
|
int error = sysdev_class_register(&clockevents_sysclass);
|
||
|
|
||
|
if (!error)
|
||
|
error = sysdev_register(&clockevents_sys_device);
|
||
|
if (!error)
|
||
|
error = sysdev_create_file(
|
||
|
&clockevents_sys_device,
|
||
|
&attr_registered);
|
||
|
return error;
|
||
|
}
|
||
|
device_initcall(clockevents_sysfs_init);
|
||
|
#endif
|