android_kernel_xiaomi_sm8350/drivers/rtc/rtc-dev.c
Marcin Slusarz 2e4a75cdcb rtc: fix kernel panic on second use of SIGIO nofitication
When userspace uses SIGIO notification and forgets to disable it before
closing file descriptor, rtc->async_queue contains stale pointer to struct
file.  When user space enables again SIGIO notification in different
process, kernel dereferences this (poisoned) pointer and crashes.

So disable SIGIO notification on close.

Kernel panic:
(second run of qemu (requires echo 1024 > /sys/class/rtc/rtc0/max_user_freq))

general protection fault: 0000 [1] PREEMPT
CPU 0
Modules linked in: af_packet snd_pcm_oss snd_mixer_oss snd_seq_oss snd_seq_midi_event snd_seq usbhid tuner tea5767 tda8290 tuner_xc2028 xc5000 tda9887 tuner_simple tuner_types mt20xx tea5761 tda9875 uhci_hcd ehci_hcd usbcore bttv snd_via82xx snd_ac97_codec ac97_bus snd_pcm snd_timer ir_common compat_ioctl32 snd_page_alloc videodev v4l1_compat snd_mpu401_uart snd_rawmidi v4l2_common videobuf_dma_sg videobuf_core snd_seq_device snd btcx_risc soundcore tveeprom i2c_viapro
Pid: 5781, comm: qemu-system-x86 Not tainted 2.6.27-rc6 #363
RIP: 0010:[<ffffffff8024f891>]  [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
RSP: 0000:ffffffff80674cb8  EFLAGS: 00010002
RAX: ffff8800224c62f0 RBX: 0000000000000046 RCX: 0000000000000002
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff8800224c62f0
RBP: ffffffff80674d08 R08: 0000000000000002 R09: 0000000000000001
R10: ffffffff80238941 R11: 0000000000000001 R12: 0000000000000000
R13: 6b6b6b6b6b6b6b6b R14: ffff88003a450080 R15: 0000000000000000
FS:  00007f98b69516f0(0000) GS:ffffffff80623200(0000) knlGS:00000000f7cc86d0
CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000a87000 CR3: 0000000022598000 CR4: 00000000000006e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process qemu-system-x86 (pid: 5781, threadinfo ffff880028812000, task ffff88003a450080)
Stack:  ffffffff80674cf8 0000000180238440 0000000200000002 0000000000000000
 ffff8800224c62f0 0000000000000046 0000000000000000 0000000000000002
 0000000000000002 0000000000000000 ffffffff80674d68 ffffffff8024fc7a
Call Trace:
 <IRQ>  [<ffffffff8024fc7a>] lock_acquire+0x85/0xa9
 [<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
 [<ffffffff80491d1f>] _read_lock+0x3e/0x4a
 [<ffffffff8029cb62>] ? send_sigio+0x2a/0x184
 [<ffffffff8029cb62>] send_sigio+0x2a/0x184
 [<ffffffff8024fb97>] ? __lock_acquire+0x6e1/0x73f
 [<ffffffff8029cd4d>] ? kill_fasync+0x2c/0x4e
 [<ffffffff8029cd10>] __kill_fasync+0x54/0x65
 [<ffffffff8029cd5b>] kill_fasync+0x3a/0x4e
 [<ffffffff80402896>] rtc_update_irq+0x9c/0xa5
 [<ffffffff80404640>] cmos_interrupt+0xae/0xc0
 [<ffffffff8025d1c1>] handle_IRQ_event+0x25/0x5a
 [<ffffffff8025e5e4>] handle_edge_irq+0xdd/0x123
 [<ffffffff8020da34>] do_IRQ+0xe4/0x144
 [<ffffffff8020bad6>] ret_from_intr+0x0/0xf
 <EOI>  [<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
 [<ffffffff8033fe67>] ? clear_page_c+0x7/0x10
 [<ffffffff8026fc10>] ? get_page_from_freelist+0x385/0x450
 [<ffffffff8026fdc2>] ? __alloc_pages_internal+0xe7/0x3ad
 [<ffffffff80280aac>] ? anon_vma_prepare+0x2e/0xf6
 [<ffffffff80279400>] ? handle_mm_fault+0x227/0x6a5
 [<ffffffff80494716>] ? do_page_fault+0x494/0x83f
 [<ffffffff8049251d>] ? error_exit+0x0/0xa9

Code: cc 41 39 45 28 74 24 e8 5e 1d 0f 00 85 c0 0f 84 6a 03 00 00 83 3d 8f a9 aa 00 00 be 47 03 00 00 0f 84 6a 02 00 00 e9 53 03 00 00 <41> ff 85 38 01 00 00 45 8b be 90 06 00 00 41 83 ff 2f 76 24 e8
RIP  [<ffffffff8024f891>] __lock_acquire+0x3db/0x73f
 RSP <ffffffff80674cb8>
---[ end trace 431877d860448760 ]---
Kernel panic - not syncing: Aiee, killing interrupt handler!

Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
Acked-by: Alessandro Zummo <alessandro.zummo@towertech.it>
Acked-by: David Brownell <dbrownell@users.sourceforge.net>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-03 18:22:17 -07:00

515 lines
12 KiB
C

/*
* RTC subsystem, dev interface
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include "rtc-core.h"
static dev_t rtc_devt;
#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
static int rtc_dev_open(struct inode *inode, struct file *file)
{
int err;
struct rtc_device *rtc = container_of(inode->i_cdev,
struct rtc_device, char_dev);
const struct rtc_class_ops *ops = rtc->ops;
if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
return -EBUSY;
file->private_data = rtc;
err = ops->open ? ops->open(rtc->dev.parent) : 0;
if (err == 0) {
spin_lock_irq(&rtc->irq_lock);
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
return 0;
}
/* something has gone wrong */
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return err;
}
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Routine to poll RTC seconds field for change as often as possible,
* after first RTC_UIE use timer to reduce polling
*/
static void rtc_uie_task(struct work_struct *work)
{
struct rtc_device *rtc =
container_of(work, struct rtc_device, uie_task);
struct rtc_time tm;
int num = 0;
int err;
err = rtc_read_time(rtc, &tm);
local_irq_disable();
spin_lock(&rtc->irq_lock);
if (rtc->stop_uie_polling || err) {
rtc->uie_task_active = 0;
} else if (rtc->oldsecs != tm.tm_sec) {
num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
rtc->oldsecs = tm.tm_sec;
rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
rtc->uie_timer_active = 1;
rtc->uie_task_active = 0;
add_timer(&rtc->uie_timer);
} else if (schedule_work(&rtc->uie_task) == 0) {
rtc->uie_task_active = 0;
}
spin_unlock(&rtc->irq_lock);
if (num)
rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
local_irq_enable();
}
static void rtc_uie_timer(unsigned long data)
{
struct rtc_device *rtc = (struct rtc_device *)data;
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
rtc->uie_timer_active = 0;
rtc->uie_task_active = 1;
if ((schedule_work(&rtc->uie_task) == 0))
rtc->uie_task_active = 0;
spin_unlock_irqrestore(&rtc->irq_lock, flags);
}
static void clear_uie(struct rtc_device *rtc)
{
spin_lock_irq(&rtc->irq_lock);
if (rtc->irq_active) {
rtc->stop_uie_polling = 1;
if (rtc->uie_timer_active) {
spin_unlock_irq(&rtc->irq_lock);
del_timer_sync(&rtc->uie_timer);
spin_lock_irq(&rtc->irq_lock);
rtc->uie_timer_active = 0;
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
flush_scheduled_work();
spin_lock_irq(&rtc->irq_lock);
}
rtc->irq_active = 0;
}
spin_unlock_irq(&rtc->irq_lock);
}
static int set_uie(struct rtc_device *rtc)
{
struct rtc_time tm;
int err;
err = rtc_read_time(rtc, &tm);
if (err)
return err;
spin_lock_irq(&rtc->irq_lock);
if (!rtc->irq_active) {
rtc->irq_active = 1;
rtc->stop_uie_polling = 0;
rtc->oldsecs = tm.tm_sec;
rtc->uie_task_active = 1;
if (schedule_work(&rtc->uie_task) == 0)
rtc->uie_task_active = 0;
}
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
return 0;
}
#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_device *rtc = file->private_data;
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t ret;
if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
return -EINVAL;
add_wait_queue(&rtc->irq_queue, &wait);
do {
__set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&rtc->irq_lock);
data = rtc->irq_data;
rtc->irq_data = 0;
spin_unlock_irq(&rtc->irq_lock);
if (data != 0) {
ret = 0;
break;
}
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
schedule();
} while (1);
set_current_state(TASK_RUNNING);
remove_wait_queue(&rtc->irq_queue, &wait);
if (ret == 0) {
/* Check for any data updates */
if (rtc->ops->read_callback)
data = rtc->ops->read_callback(rtc->dev.parent,
data);
if (sizeof(int) != sizeof(long) &&
count == sizeof(unsigned int))
ret = put_user(data, (unsigned int __user *)buf) ?:
sizeof(unsigned int);
else
ret = put_user(data, (unsigned long __user *)buf) ?:
sizeof(unsigned long);
}
return ret;
}
static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
poll_wait(file, &rtc->irq_queue, wait);
data = rtc->irq_data;
return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
}
static long rtc_dev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct rtc_device *rtc = file->private_data;
const struct rtc_class_ops *ops = rtc->ops;
struct rtc_time tm;
struct rtc_wkalrm alarm;
void __user *uarg = (void __user *) arg;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
/* check that the calling task has appropriate permissions
* for certain ioctls. doing this check here is useful
* to avoid duplicate code in each driver.
*/
switch (cmd) {
case RTC_EPOCH_SET:
case RTC_SET_TIME:
if (!capable(CAP_SYS_TIME))
err = -EACCES;
break;
case RTC_IRQP_SET:
if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
err = -EACCES;
break;
case RTC_PIE_ON:
if (rtc->irq_freq > rtc->max_user_freq &&
!capable(CAP_SYS_RESOURCE))
err = -EACCES;
break;
}
if (err)
goto done;
/* try the driver's ioctl interface */
if (ops->ioctl) {
err = ops->ioctl(rtc->dev.parent, cmd, arg);
if (err != -ENOIOCTLCMD) {
mutex_unlock(&rtc->ops_lock);
return err;
}
}
/* if the driver does not provide the ioctl interface
* or if that particular ioctl was not implemented
* (-ENOIOCTLCMD), we will try to emulate here.
*
* Drivers *SHOULD NOT* provide ioctl implementations
* for these requests. Instead, provide methods to
* support the following code, so that the RTC's main
* features are accessible without using ioctls.
*
* RTC and alarm times will be in UTC, by preference,
* but dual-booting with MS-Windows implies RTCs must
* use the local wall clock time.
*/
switch (cmd) {
case RTC_ALM_READ:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_alarm(rtc, &alarm);
if (err < 0)
return err;
if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
err = -EFAULT;
return err;
case RTC_ALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
return -EFAULT;
alarm.enabled = 0;
alarm.pending = 0;
alarm.time.tm_wday = -1;
alarm.time.tm_yday = -1;
alarm.time.tm_isdst = -1;
/* RTC_ALM_SET alarms may be up to 24 hours in the future.
* Rather than expecting every RTC to implement "don't care"
* for day/month/year fields, just force the alarm to have
* the right values for those fields.
*
* RTC_WKALM_SET should be used instead. Not only does it
* eliminate the need for a separate RTC_AIE_ON call, it
* doesn't have the "alarm 23:59:59 in the future" race.
*
* NOTE: some legacy code may have used invalid fields as
* wildcards, exposing hardware "periodic alarm" capabilities.
* Not supported here.
*/
{
unsigned long now, then;
err = rtc_read_time(rtc, &tm);
if (err < 0)
return err;
rtc_tm_to_time(&tm, &now);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
alarm.time.tm_year = tm.tm_year;
err = rtc_valid_tm(&alarm.time);
if (err < 0)
return err;
rtc_tm_to_time(&alarm.time, &then);
/* alarm may need to wrap into tomorrow */
if (then < now) {
rtc_time_to_tm(now + 24 * 60 * 60, &tm);
alarm.time.tm_mday = tm.tm_mday;
alarm.time.tm_mon = tm.tm_mon;
alarm.time.tm_year = tm.tm_year;
}
}
return rtc_set_alarm(rtc, &alarm);
case RTC_RD_TIME:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_time(rtc, &tm);
if (err < 0)
return err;
if (copy_to_user(uarg, &tm, sizeof(tm)))
err = -EFAULT;
return err;
case RTC_SET_TIME:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&tm, uarg, sizeof(tm)))
return -EFAULT;
return rtc_set_time(rtc, &tm);
case RTC_PIE_ON:
err = rtc_irq_set_state(rtc, NULL, 1);
break;
case RTC_PIE_OFF:
err = rtc_irq_set_state(rtc, NULL, 0);
break;
case RTC_IRQP_SET:
err = rtc_irq_set_freq(rtc, NULL, arg);
break;
case RTC_IRQP_READ:
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
#if 0
case RTC_EPOCH_SET:
#ifndef rtc_epoch
/*
* There were no RTC clocks before 1900.
*/
if (arg < 1900) {
err = -EINVAL;
break;
}
rtc_epoch = arg;
err = 0;
#endif
break;
case RTC_EPOCH_READ:
err = put_user(rtc_epoch, (unsigned long __user *)uarg);
break;
#endif
case RTC_WKALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm, uarg, sizeof(alarm)))
return -EFAULT;
return rtc_set_alarm(rtc, &alarm);
case RTC_WKALM_RD:
mutex_unlock(&rtc->ops_lock);
err = rtc_read_alarm(rtc, &alarm);
if (err < 0)
return err;
if (copy_to_user(uarg, &alarm, sizeof(alarm)))
err = -EFAULT;
return err;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
case RTC_UIE_OFF:
mutex_unlock(&rtc->ops_lock);
clear_uie(rtc);
return 0;
case RTC_UIE_ON:
mutex_unlock(&rtc->ops_lock);
err = set_uie(rtc);
return err;
#endif
default:
err = -ENOTTY;
break;
}
done:
mutex_unlock(&rtc->ops_lock);
return err;
}
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
return fasync_helper(fd, file, on, &rtc->async_queue);
}
static int rtc_dev_release(struct inode *inode, struct file *file)
{
struct rtc_device *rtc = file->private_data;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
clear_uie(rtc);
#endif
rtc_irq_set_state(rtc, NULL, 0);
if (rtc->ops->release)
rtc->ops->release(rtc->dev.parent);
if (file->f_flags & FASYNC)
rtc_dev_fasync(-1, file, 0);
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
}
static const struct file_operations rtc_dev_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
.open = rtc_dev_open,
.release = rtc_dev_release,
.fasync = rtc_dev_fasync,
};
/* insertion/removal hooks */
void rtc_dev_prepare(struct rtc_device *rtc)
{
if (!rtc_devt)
return;
if (rtc->id >= RTC_DEV_MAX) {
pr_debug("%s: too many RTC devices\n", rtc->name);
return;
}
rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
}
void rtc_dev_add_device(struct rtc_device *rtc)
{
if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
printk(KERN_WARNING "%s: failed to add char device %d:%d\n",
rtc->name, MAJOR(rtc_devt), rtc->id);
else
pr_debug("%s: dev (%d:%d)\n", rtc->name,
MAJOR(rtc_devt), rtc->id);
}
void rtc_dev_del_device(struct rtc_device *rtc)
{
if (rtc->dev.devt)
cdev_del(&rtc->char_dev);
}
void __init rtc_dev_init(void)
{
int err;
err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
if (err < 0)
printk(KERN_ERR "%s: failed to allocate char dev region\n",
__FILE__);
}
void __exit rtc_dev_exit(void)
{
if (rtc_devt)
unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
}