15316ba81a
swap migration's isolate_lru_page() currently uses an IPI to notify other processors that the lru caches need to be drained if the page cannot be found on the LRU. The IPI interrupt may interrupt a processor that is just processing lru requests and cause a race condition. This patch introduces a new function run_on_each_cpu() that uses the keventd() to run the LRU draining on each processor. Processors disable preemption when dealing the LRU caches (these are per processor) and thus executing LRU draining from another process is safe. Thanks to Lee Schermerhorn <lee.schermerhorn@hp.com> for finding this race condition. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
94 lines
2.6 KiB
C
94 lines
2.6 KiB
C
/*
|
|
* workqueue.h --- work queue handling for Linux.
|
|
*/
|
|
|
|
#ifndef _LINUX_WORKQUEUE_H
|
|
#define _LINUX_WORKQUEUE_H
|
|
|
|
#include <linux/timer.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/bitops.h>
|
|
|
|
struct workqueue_struct;
|
|
|
|
struct work_struct {
|
|
unsigned long pending;
|
|
struct list_head entry;
|
|
void (*func)(void *);
|
|
void *data;
|
|
void *wq_data;
|
|
struct timer_list timer;
|
|
};
|
|
|
|
#define __WORK_INITIALIZER(n, f, d) { \
|
|
.entry = { &(n).entry, &(n).entry }, \
|
|
.func = (f), \
|
|
.data = (d), \
|
|
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
|
|
}
|
|
|
|
#define DECLARE_WORK(n, f, d) \
|
|
struct work_struct n = __WORK_INITIALIZER(n, f, d)
|
|
|
|
/*
|
|
* initialize a work-struct's func and data pointers:
|
|
*/
|
|
#define PREPARE_WORK(_work, _func, _data) \
|
|
do { \
|
|
(_work)->func = _func; \
|
|
(_work)->data = _data; \
|
|
} while (0)
|
|
|
|
/*
|
|
* initialize all of a work-struct:
|
|
*/
|
|
#define INIT_WORK(_work, _func, _data) \
|
|
do { \
|
|
INIT_LIST_HEAD(&(_work)->entry); \
|
|
(_work)->pending = 0; \
|
|
PREPARE_WORK((_work), (_func), (_data)); \
|
|
init_timer(&(_work)->timer); \
|
|
} while (0)
|
|
|
|
extern struct workqueue_struct *__create_workqueue(const char *name,
|
|
int singlethread);
|
|
#define create_workqueue(name) __create_workqueue((name), 0)
|
|
#define create_singlethread_workqueue(name) __create_workqueue((name), 1)
|
|
|
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
|
|
|
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
|
|
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
|
|
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
|
|
|
extern int FASTCALL(schedule_work(struct work_struct *work));
|
|
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
|
|
|
|
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
|
|
extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
|
|
extern void flush_scheduled_work(void);
|
|
extern int current_is_keventd(void);
|
|
extern int keventd_up(void);
|
|
|
|
extern void init_workqueues(void);
|
|
void cancel_rearming_delayed_work(struct work_struct *work);
|
|
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
|
|
struct work_struct *);
|
|
|
|
/*
|
|
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
|
* function may still be running on return from cancel_delayed_work(). Run
|
|
* flush_scheduled_work() to wait on it.
|
|
*/
|
|
static inline int cancel_delayed_work(struct work_struct *work)
|
|
{
|
|
int ret;
|
|
|
|
ret = del_timer_sync(&work->timer);
|
|
if (ret)
|
|
clear_bit(0, &work->pending);
|
|
return ret;
|
|
}
|
|
|
|
#endif
|