android_kernel_xiaomi_sm8350/fs/jffs2/background.c
Rafael J. Wysocki 8314418629 Freezer: make kernel threads nonfreezable by default
Currently, the freezer treats all tasks as freezable, except for the kernel
threads that explicitly set the PF_NOFREEZE flag for themselves.  This
approach is problematic, since it requires every kernel thread to either
set PF_NOFREEZE explicitly, or call try_to_freeze(), even if it doesn't
care for the freezing of tasks at all.

It seems better to only require the kernel threads that want to or need to
be frozen to use some freezer-related code and to remove any
freezer-related code from the other (nonfreezable) kernel threads, which is
done in this patch.

The patch causes all kernel threads to be nonfreezable by default (ie.  to
have PF_NOFREEZE set by default) and introduces the set_freezable()
function that should be called by the freezable kernel threads in order to
unset PF_NOFREEZE.  It also makes all of the currently freezable kernel
threads call set_freezable(), so it shouldn't cause any (intentional)
change of behaviour to appear.  Additionally, it updates documentation to
describe the freezing of tasks more accurately.

[akpm@linux-foundation.org: build fixes]
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-17 10:23:02 -07:00

150 lines
4.0 KiB
C

/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
#include <linux/completion.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include "nodelist.h"
static int jffs2_garbage_collect_thread(void *);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
spin_lock(&c->erase_completion_lock);
if (c->gc_task && jffs2_thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
spin_unlock(&c->erase_completion_lock);
}
/* This must only ever be called when no GC thread is currently running */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
{
pid_t pid;
int ret = 0;
BUG_ON(c->gc_task);
init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
if (pid < 0) {
printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
complete(&c->gc_thread_exit);
ret = pid;
} else {
/* Wait for it... */
D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
wait_for_completion(&c->gc_thread_start);
}
return ret;
}
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
{
int wait = 0;
spin_lock(&c->erase_completion_lock);
if (c->gc_task) {
D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
send_sig(SIGKILL, c->gc_task, 1);
wait = 1;
}
spin_unlock(&c->erase_completion_lock);
if (wait)
wait_for_completion(&c->gc_thread_exit);
}
static int jffs2_garbage_collect_thread(void *_c)
{
struct jffs2_sb_info *c = _c;
daemonize("jffs2_gcd_mtd%d", c->mtd->index);
allow_signal(SIGKILL);
allow_signal(SIGSTOP);
allow_signal(SIGCONT);
c->gc_task = current;
complete(&c->gc_thread_start);
set_user_nice(current, 10);
set_freezable();
for (;;) {
allow_signal(SIGHUP);
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
/* Yes, there's a race here; we checked jffs2_thread_should_wake()
before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
matter - We don't care if we miss a wakeup, because the GC thread
is only an optimisation anyway. */
schedule();
}
if (try_to_freeze())
continue;
/* This thread is purely an optimisation. But if it runs when
other things could be running, it actually makes things a
lot worse. Use yield() and put it at the back of the runqueue
every time. Especially during boot, pulling an inode in
with read_inode() is much preferable to having the GC thread
get there first. */
yield();
/* Put_super will send a SIGKILL and then wait on the sem.
*/
while (signal_pending(current)) {
siginfo_t info;
unsigned long signr;
signr = dequeue_signal_lock(current, &current->blocked, &info);
switch(signr) {
case SIGSTOP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
set_current_state(TASK_STOPPED);
schedule();
break;
case SIGKILL:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
goto die;
case SIGHUP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
break;
default:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
disallow_signal(SIGHUP);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
goto die;
}
}
die:
spin_lock(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock(&c->erase_completion_lock);
complete_and_exit(&c->gc_thread_exit, 0);
}