2005-04-16 18:20:36 -04:00
|
|
|
#ifndef _LINUX_FUTEX_H
|
|
|
|
#define _LINUX_FUTEX_H
|
|
|
|
|
2006-03-27 04:16:22 -05:00
|
|
|
#include <linux/sched.h>
|
|
|
|
|
2007-05-09 05:35:02 -04:00
|
|
|
union ktime;
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/* Second argument to futex syscall */
|
|
|
|
|
|
|
|
|
[PATCH] FUTEX_WAKE_OP: pthread_cond_signal() speedup
ATM pthread_cond_signal is unnecessarily slow, because it wakes one waiter
(which at least on UP usually means an immediate context switch to one of
the waiter threads). This waiter wakes up and after a few instructions it
attempts to acquire the cv internal lock, but that lock is still held by
the thread calling pthread_cond_signal. So it goes to sleep and eventually
the signalling thread is scheduled in, unlocks the internal lock and wakes
the waiter again.
Now, before 2003-09-21 NPTL was using FUTEX_REQUEUE in pthread_cond_signal
to avoid this performance issue, but it was removed when locks were
redesigned to the 3 state scheme (unlocked, locked uncontended, locked
contended).
Following scenario shows why simply using FUTEX_REQUEUE in
pthread_cond_signal together with using lll_mutex_unlock_force in place of
lll_mutex_unlock is not enough and probably why it has been disabled at
that time:
The number is value in cv->__data.__lock.
thr1 thr2 thr3
0 pthread_cond_wait
1 lll_mutex_lock (cv->__data.__lock)
0 lll_mutex_unlock (cv->__data.__lock)
0 lll_futex_wait (&cv->__data.__futex, futexval)
0 pthread_cond_signal
1 lll_mutex_lock (cv->__data.__lock)
1 pthread_cond_signal
2 lll_mutex_lock (cv->__data.__lock)
2 lll_futex_wait (&cv->__data.__lock, 2)
2 lll_futex_requeue (&cv->__data.__futex, 0, 1, &cv->__data.__lock)
# FUTEX_REQUEUE, not FUTEX_CMP_REQUEUE
2 lll_mutex_unlock_force (cv->__data.__lock)
0 cv->__data.__lock = 0
0 lll_futex_wake (&cv->__data.__lock, 1)
1 lll_mutex_lock (cv->__data.__lock)
0 lll_mutex_unlock (cv->__data.__lock)
# Here, lll_mutex_unlock doesn't know there are threads waiting
# on the internal cv's lock
Now, I believe it is possible to use FUTEX_REQUEUE in pthread_cond_signal,
but it will cost us not one, but 2 extra syscalls and, what's worse, one of
these extra syscalls will be done for every single waiting loop in
pthread_cond_*wait.
We would need to use lll_mutex_unlock_force in pthread_cond_signal after
requeue and lll_mutex_cond_lock in pthread_cond_*wait after lll_futex_wait.
Another alternative is to do the unlocking pthread_cond_signal needs to do
(the lock can't be unlocked before lll_futex_wake, as that is racy) in the
kernel.
I have implemented both variants, futex-requeue-glibc.patch is the first
one and futex-wake_op{,-glibc}.patch is the unlocking inside of the kernel.
The kernel interface allows userland to specify how exactly an unlocking
operation should look like (some atomic arithmetic operation with optional
constant argument and comparison of the previous futex value with another
constant).
It has been implemented just for ppc*, x86_64 and i?86, for other
architectures I'm including just a stub header which can be used as a
starting point by maintainers to write support for their arches and ATM
will just return -ENOSYS for FUTEX_WAKE_OP. The requeue patch has been
(lightly) tested just on x86_64, the wake_op patch on ppc64 kernel running
32-bit and 64-bit NPTL and x86_64 kernel running 32-bit and 64-bit NPTL.
With the following benchmark on UP x86-64 I get:
for i in nptl-orig nptl-requeue nptl-wake_op; do echo time elf/ld.so --library-path .:$i /tmp/bench; \
for j in 1 2; do echo ( time elf/ld.so --library-path .:$i /tmp/bench ) 2>&1; done; done
time elf/ld.so --library-path .:nptl-orig /tmp/bench
real 0m0.655s user 0m0.253s sys 0m0.403s
real 0m0.657s user 0m0.269s sys 0m0.388s
time elf/ld.so --library-path .:nptl-requeue /tmp/bench
real 0m0.496s user 0m0.225s sys 0m0.271s
real 0m0.531s user 0m0.242s sys 0m0.288s
time elf/ld.so --library-path .:nptl-wake_op /tmp/bench
real 0m0.380s user 0m0.176s sys 0m0.204s
real 0m0.382s user 0m0.175s sys 0m0.207s
The benchmark is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00001.txt
Older futex-requeue-glibc.patch version is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00002.txt
Older futex-wake_op-glibc.patch version is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00003.txt
Will post a new version (just x86-64 fixes so that the patch
applies against pthread_cond_signal.S) to libc-hacker ml soon.
Attached is the kernel FUTEX_WAKE_OP patch as well as a simple-minded
testcase that will not test the atomicity of the operation, but at least
check if the threads that should have been woken up are woken up and
whether the arithmetic operation in the kernel gave the expected results.
Acked-by: Ingo Molnar <mingo@redhat.com>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Jamie Lokier <jamie@shareable.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:25 -04:00
|
|
|
#define FUTEX_WAIT 0
|
|
|
|
#define FUTEX_WAKE 1
|
|
|
|
#define FUTEX_FD 2
|
|
|
|
#define FUTEX_REQUEUE 3
|
|
|
|
#define FUTEX_CMP_REQUEUE 4
|
|
|
|
#define FUTEX_WAKE_OP 5
|
2006-06-27 05:54:58 -04:00
|
|
|
#define FUTEX_LOCK_PI 6
|
|
|
|
#define FUTEX_UNLOCK_PI 7
|
|
|
|
#define FUTEX_TRYLOCK_PI 8
|
2005-04-16 18:20:36 -04:00
|
|
|
|
FUTEX: new PRIVATE futexes
Analysis of current linux futex code :
--------------------------------------
A central hash table futex_queues[] holds all contexts (futex_q) of waiting
threads.
Each futex_wait()/futex_wait() has to obtain a spinlock on a hash slot to
perform lookups or insert/deletion of a futex_q.
When a futex_wait() is done, calling thread has to :
1) - Obtain a read lock on mmap_sem to be able to validate the user pointer
(calling find_vma()). This validation tells us if the futex uses
an inode based store (mapped file), or mm based store (anonymous mem)
2) - compute a hash key
3) - Atomic increment of reference counter on an inode or a mm_struct
4) - lock part of futex_queues[] hash table
5) - perform the test on value of futex.
(rollback is value != expected_value, returns EWOULDBLOCK)
(various loops if test triggers mm faults)
6) queue the context into hash table, release the lock got in 4)
7) - release the read_lock on mmap_sem
<block>
8) Eventually unqueue the context (but rarely, as this part may be done
by the futex_wake())
Futexes were designed to improve scalability but current implementation has
various problems :
- Central hashtable :
This means scalability problems if many processes/threads want to use
futexes at the same time.
This means NUMA unbalance because this hashtable is located on one node.
- Using mmap_sem on every futex() syscall :
Even if mmap_sem is a rw_semaphore, up_read()/down_read() are doing atomic
ops on mmap_sem, dirtying cache line :
- lot of cache line ping pongs on SMP configurations.
mmap_sem is also extensively used by mm code (page faults, mmap()/munmap())
Highly threaded processes might suffer from mmap_sem contention.
mmap_sem is also used by oprofile code. Enabling oprofile hurts threaded
programs because of contention on the mmap_sem cache line.
- Using an atomic_inc()/atomic_dec() on inode ref counter or mm ref counter:
It's also a cache line ping pong on SMP. It also increases mmap_sem hold time
because of cache misses.
Most of these scalability problems come from the fact that futexes are in
one global namespace. As we use a central hash table, we must make sure
they are all using the same reference (given by the mm subsystem). We
chose to force all futexes be 'shared'. This has a cost.
But fact is POSIX defined PRIVATE and SHARED, allowing clear separation,
and optimal performance if carefuly implemented. Time has come for linux
to have better threading performance.
The goal is to permit new futex commands to avoid :
- Taking the mmap_sem semaphore, conflicting with other subsystems.
- Modifying a ref_count on mm or an inode, still conflicting with mm or fs.
This is possible because, for one process using PTHREAD_PROCESS_PRIVATE
futexes, we only need to distinguish futexes by their virtual address, no
matter the underlying mm storage is.
If glibc wants to exploit this new infrastructure, it should use new
_PRIVATE futex subcommands for PTHREAD_PROCESS_PRIVATE futexes. And be
prepared to fallback on old subcommands for old kernels. Using one global
variable with the FUTEX_PRIVATE_FLAG or 0 value should be OK.
PTHREAD_PROCESS_SHARED futexes should still use the old subcommands.
Compatibility with old applications is preserved, they still hit the
scalability problems, but new applications can fly :)
Note : the same SHARED futex (mapped on a file) can be used by old binaries
*and* new binaries, because both binaries will use the old subcommands.
Note : Vast majority of futexes should be using PROCESS_PRIVATE semantic,
as this is the default semantic. Almost all applications should benefit
of this changes (new kernel and updated libc)
Some bench results on a Pentium M 1.6 GHz (SMP kernel on a UP machine)
/* calling futex_wait(addr, value) with value != *addr */
433 cycles per futex(FUTEX_WAIT) call (mixing 2 futexes)
424 cycles per futex(FUTEX_WAIT) call (using one futex)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (mixing 2 futexes)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (using one futex)
For reference :
187 cycles per getppid() call
188 cycles per umask() call
181 cycles per ni_syscall() call
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Pierre Peiffer <pierre.peiffer@bull.net>
Cc: "Ulrich Drepper" <drepper@gmail.com>
Cc: "Nick Piggin" <nickpiggin@yahoo.com.au>
Cc: "Ingo Molnar" <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 05:35:04 -04:00
|
|
|
#define FUTEX_PRIVATE_FLAG 128
|
|
|
|
#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
|
|
|
|
|
|
|
|
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
|
|
|
|
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
|
|
|
|
|
2006-03-27 04:16:22 -05:00
|
|
|
/*
|
|
|
|
* Support for robust futexes: the kernel cleans up held futexes at
|
|
|
|
* thread exit time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-lock list entry - embedded in user-space locks, somewhere close
|
|
|
|
* to the futex field. (Note: user-space uses a double-linked list to
|
|
|
|
* achieve O(1) list add and remove, but the kernel only needs to know
|
|
|
|
* about the forward link)
|
|
|
|
*
|
|
|
|
* NOTE: this structure is part of the syscall ABI, and must not be
|
|
|
|
* changed.
|
|
|
|
*/
|
|
|
|
struct robust_list {
|
|
|
|
struct robust_list __user *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-thread list head:
|
|
|
|
*
|
|
|
|
* NOTE: this structure is part of the syscall ABI, and must only be
|
|
|
|
* changed if the change is first communicated with the glibc folks.
|
|
|
|
* (When an incompatible change is done, we'll increase the structure
|
|
|
|
* size, which glibc will detect)
|
|
|
|
*/
|
|
|
|
struct robust_list_head {
|
|
|
|
/*
|
|
|
|
* The head of the list. Points back to itself if empty:
|
|
|
|
*/
|
|
|
|
struct robust_list list;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This relative offset is set by user-space, it gives the kernel
|
|
|
|
* the relative position of the futex field to examine. This way
|
|
|
|
* we keep userspace flexible, to freely shape its data-structure,
|
|
|
|
* without hardcoding any particular offset into the kernel:
|
|
|
|
*/
|
|
|
|
long futex_offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The death of the thread may race with userspace setting
|
|
|
|
* up a lock's links. So to handle this race, userspace first
|
|
|
|
* sets this field to the address of the to-be-taken lock,
|
|
|
|
* then does the lock acquire, and then adds itself to the
|
|
|
|
* list, and then clears this field. Hence the kernel will
|
|
|
|
* always have full knowledge of all locks that the thread
|
|
|
|
* _might_ have taken. We check the owner TID in any case,
|
|
|
|
* so only truly owned locks will be handled.
|
|
|
|
*/
|
|
|
|
struct robust_list __user *list_op_pending;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Are there any waiters for this robust futex:
|
|
|
|
*/
|
|
|
|
#define FUTEX_WAITERS 0x80000000
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The kernel signals via this bit that a thread holding a futex
|
|
|
|
* has exited without unlocking the futex. The kernel also does
|
|
|
|
* a FUTEX_WAKE on such futexes, after setting the bit, to wake
|
|
|
|
* up any possible waiters:
|
|
|
|
*/
|
|
|
|
#define FUTEX_OWNER_DIED 0x40000000
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The rest of the robust-futex field is for the TID:
|
|
|
|
*/
|
2007-06-17 15:11:10 -04:00
|
|
|
#define FUTEX_TID_MASK 0x3fffffff
|
2006-03-27 04:16:22 -05:00
|
|
|
|
|
|
|
/*
|
2006-03-27 04:16:28 -05:00
|
|
|
* This limit protects against a deliberately circular list.
|
|
|
|
* (Not worth introducing an rlimit for it)
|
2006-03-27 04:16:22 -05:00
|
|
|
*/
|
2006-03-27 04:16:28 -05:00
|
|
|
#define ROBUST_LIST_LIMIT 2048
|
2006-03-27 04:16:22 -05:00
|
|
|
|
2006-12-10 05:19:11 -05:00
|
|
|
#ifdef __KERNEL__
|
2007-05-09 05:35:02 -04:00
|
|
|
long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
|
[PATCH] pi-futex: futex code cleanups
We are pleased to announce "lightweight userspace priority inheritance" (PI)
support for futexes. The following patchset and glibc patch implements it,
ontop of the robust-futexes patchset which is included in 2.6.16-mm1.
We are calling it lightweight for 3 reasons:
- in the user-space fastpath a PI-enabled futex involves no kernel work
(or any other PI complexity) at all. No registration, no extra kernel
calls - just pure fast atomic ops in userspace.
- in the slowpath (in the lock-contention case), the system call and
scheduling pattern is in fact better than that of normal futexes, due to
the 'integrated' nature of FUTEX_LOCK_PI. [more about that further down]
- the in-kernel PI implementation is streamlined around the mutex
abstraction, with strict rules that keep the implementation relatively
simple: only a single owner may own a lock (i.e. no read-write lock
support), only the owner may unlock a lock, no recursive locking, etc.
Priority Inheritance - why, oh why???
-------------------------------------
Many of you heard the horror stories about the evil PI code circling Linux for
years, which makes no real sense at all and is only used by buggy applications
and which has horrible overhead. Some of you have dreaded this very moment,
when someone actually submits working PI code ;-)
So why would we like to see PI support for futexes?
We'd like to see it done purely for technological reasons. We dont think it's
a buggy concept, we think it's useful functionality to offer to applications,
which functionality cannot be achieved in other ways. We also think it's the
right thing to do, and we think we've got the right arguments and the right
numbers to prove that. We also believe that we can address all the
counter-arguments as well. For these reasons (and the reasons outlined below)
we are submitting this patch-set for upstream kernel inclusion.
What are the benefits of PI?
The short reply:
----------------
User-space PI helps achieving/improving determinism for user-space
applications. In the best-case, it can help achieve determinism and
well-bound latencies. Even in the worst-case, PI will improve the statistical
distribution of locking related application delays.
The longer reply:
-----------------
Firstly, sharing locks between multiple tasks is a common programming
technique that often cannot be replaced with lockless algorithms. As we can
see it in the kernel [which is a quite complex program in itself], lockless
structures are rather the exception than the norm - the current ratio of
lockless vs. locky code for shared data structures is somewhere between 1:10
and 1:100. Lockless is hard, and the complexity of lockless algorithms often
endangers to ability to do robust reviews of said code. I.e. critical RT
apps often choose lock structures to protect critical data structures, instead
of lockless algorithms. Furthermore, there are cases (like shared hardware,
or other resource limits) where lockless access is mathematically impossible.
Media players (such as Jack) are an example of reasonable application design
with multiple tasks (with multiple priority levels) sharing short-held locks:
for example, a highprio audio playback thread is combined with medium-prio
construct-audio-data threads and low-prio display-colory-stuff threads. Add
video and decoding to the mix and we've got even more priority levels.
So once we accept that synchronization objects (locks) are an unavoidable fact
of life, and once we accept that multi-task userspace apps have a very fair
expectation of being able to use locks, we've got to think about how to offer
the option of a deterministic locking implementation to user-space.
Most of the technical counter-arguments against doing priority inheritance
only apply to kernel-space locks. But user-space locks are different, there
we cannot disable interrupts or make the task non-preemptible in a critical
section, so the 'use spinlocks' argument does not apply (user-space spinlocks
have the same priority inversion problems as other user-space locking
constructs). Fact is, pretty much the only technique that currently enables
good determinism for userspace locks (such as futex-based pthread mutexes) is
priority inheritance:
Currently (without PI), if a high-prio and a low-prio task shares a lock [this
is a quite common scenario for most non-trivial RT applications], even if all
critical sections are coded carefully to be deterministic (i.e. all critical
sections are short in duration and only execute a limited number of
instructions), the kernel cannot guarantee any deterministic execution of the
high-prio task: any medium-priority task could preempt the low-prio task while
it holds the shared lock and executes the critical section, and could delay it
indefinitely.
Implementation:
---------------
As mentioned before, the userspace fastpath of PI-enabled pthread mutexes
involves no kernel work at all - they behave quite similarly to normal
futex-based locks: a 0 value means unlocked, and a value==TID means locked.
(This is the same method as used by list-based robust futexes.) Userspace uses
atomic ops to lock/unlock these mutexes without entering the kernel.
To handle the slowpath, we have added two new futex ops:
FUTEX_LOCK_PI
FUTEX_UNLOCK_PI
If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to TID
fails], then FUTEX_LOCK_PI is called. The kernel does all the remaining work:
if there is no futex-queue attached to the futex address yet then the code
looks up the task that owns the futex [it has put its own TID into the futex
value], and attaches a 'PI state' structure to the futex-queue. The pi_state
includes an rt-mutex, which is a PI-aware, kernel-based synchronization
object. The 'other' task is made the owner of the rt-mutex, and the
FUTEX_WAITERS bit is atomically set in the futex value. Then this task tries
to lock the rt-mutex, on which it blocks. Once it returns, it has the mutex
acquired, and it sets the futex value to its own TID and returns. Userspace
has no other work to perform - it now owns the lock, and futex value contains
FUTEX_WAITERS|TID.
If the unlock side fastpath succeeds, [i.e. userspace manages to do a TID ->
0 atomic transition of the futex value], then no kernel work is triggered.
If the unlock fastpath fails (because the FUTEX_WAITERS bit is set), then
FUTEX_UNLOCK_PI is called, and the kernel unlocks the futex on the behalf of
userspace - and it also unlocks the attached pi_state->rt_mutex and thus wakes
up any potential waiters.
Note that under this approach, contrary to other PI-futex approaches, there is
no prior 'registration' of a PI-futex. [which is not quite possible anyway,
due to existing ABI properties of pthread mutexes.]
Also, under this scheme, 'robustness' and 'PI' are two orthogonal properties
of futexes, and all four combinations are possible: futex, robust-futex,
PI-futex, robust+PI-futex.
glibc support:
--------------
Ulrich Drepper and Jakub Jelinek have written glibc support for PI-futexes
(and robust futexes), enabling robust and PI (PTHREAD_PRIO_INHERIT) POSIX
mutexes. (PTHREAD_PRIO_PROTECT support will be added later on too, no
additional kernel changes are needed for that). [NOTE: The glibc patch is
obviously inofficial and unsupported without matching upstream kernel
functionality.]
the patch-queue and the glibc patch can also be downloaded from:
http://redhat.com/~mingo/PI-futex-patches/
Many thanks go to the people who helped us create this kernel feature: Steven
Rostedt, Esben Nielsen, Benedikt Spranger, Daniel Walker, John Cooper, Arjan
van de Ven, Oleg Nesterov and others. Credits for related prior projects goes
to Dirk Grambow, Inaky Perez-Gonzalez, Bill Huey and many others.
Clean up the futex code, before adding more features to it:
- use u32 as the futex field type - that's the ABI
- use __user and pointers to u32 instead of unsigned long
- code style / comment style cleanups
- rename hash-bucket name from 'bh' to 'hb'.
I checked the pre and post futex.o object files to make sure this
patch has no code effects.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Jakub Jelinek <jakub@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 05:54:47 -04:00
|
|
|
u32 __user *uaddr2, u32 val2, u32 val3);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2006-07-28 23:17:57 -04:00
|
|
|
extern int
|
|
|
|
handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
|
2006-03-27 04:16:22 -05:00
|
|
|
|
2007-05-08 03:26:42 -04:00
|
|
|
/*
|
|
|
|
* Futexes are matched on equal values of this key.
|
|
|
|
* The key type depends on whether it's a shared or private mapping.
|
|
|
|
* Don't rearrange members without looking at hash_futex().
|
|
|
|
*
|
|
|
|
* offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
|
FUTEX: new PRIVATE futexes
Analysis of current linux futex code :
--------------------------------------
A central hash table futex_queues[] holds all contexts (futex_q) of waiting
threads.
Each futex_wait()/futex_wait() has to obtain a spinlock on a hash slot to
perform lookups or insert/deletion of a futex_q.
When a futex_wait() is done, calling thread has to :
1) - Obtain a read lock on mmap_sem to be able to validate the user pointer
(calling find_vma()). This validation tells us if the futex uses
an inode based store (mapped file), or mm based store (anonymous mem)
2) - compute a hash key
3) - Atomic increment of reference counter on an inode or a mm_struct
4) - lock part of futex_queues[] hash table
5) - perform the test on value of futex.
(rollback is value != expected_value, returns EWOULDBLOCK)
(various loops if test triggers mm faults)
6) queue the context into hash table, release the lock got in 4)
7) - release the read_lock on mmap_sem
<block>
8) Eventually unqueue the context (but rarely, as this part may be done
by the futex_wake())
Futexes were designed to improve scalability but current implementation has
various problems :
- Central hashtable :
This means scalability problems if many processes/threads want to use
futexes at the same time.
This means NUMA unbalance because this hashtable is located on one node.
- Using mmap_sem on every futex() syscall :
Even if mmap_sem is a rw_semaphore, up_read()/down_read() are doing atomic
ops on mmap_sem, dirtying cache line :
- lot of cache line ping pongs on SMP configurations.
mmap_sem is also extensively used by mm code (page faults, mmap()/munmap())
Highly threaded processes might suffer from mmap_sem contention.
mmap_sem is also used by oprofile code. Enabling oprofile hurts threaded
programs because of contention on the mmap_sem cache line.
- Using an atomic_inc()/atomic_dec() on inode ref counter or mm ref counter:
It's also a cache line ping pong on SMP. It also increases mmap_sem hold time
because of cache misses.
Most of these scalability problems come from the fact that futexes are in
one global namespace. As we use a central hash table, we must make sure
they are all using the same reference (given by the mm subsystem). We
chose to force all futexes be 'shared'. This has a cost.
But fact is POSIX defined PRIVATE and SHARED, allowing clear separation,
and optimal performance if carefuly implemented. Time has come for linux
to have better threading performance.
The goal is to permit new futex commands to avoid :
- Taking the mmap_sem semaphore, conflicting with other subsystems.
- Modifying a ref_count on mm or an inode, still conflicting with mm or fs.
This is possible because, for one process using PTHREAD_PROCESS_PRIVATE
futexes, we only need to distinguish futexes by their virtual address, no
matter the underlying mm storage is.
If glibc wants to exploit this new infrastructure, it should use new
_PRIVATE futex subcommands for PTHREAD_PROCESS_PRIVATE futexes. And be
prepared to fallback on old subcommands for old kernels. Using one global
variable with the FUTEX_PRIVATE_FLAG or 0 value should be OK.
PTHREAD_PROCESS_SHARED futexes should still use the old subcommands.
Compatibility with old applications is preserved, they still hit the
scalability problems, but new applications can fly :)
Note : the same SHARED futex (mapped on a file) can be used by old binaries
*and* new binaries, because both binaries will use the old subcommands.
Note : Vast majority of futexes should be using PROCESS_PRIVATE semantic,
as this is the default semantic. Almost all applications should benefit
of this changes (new kernel and updated libc)
Some bench results on a Pentium M 1.6 GHz (SMP kernel on a UP machine)
/* calling futex_wait(addr, value) with value != *addr */
433 cycles per futex(FUTEX_WAIT) call (mixing 2 futexes)
424 cycles per futex(FUTEX_WAIT) call (using one futex)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (mixing 2 futexes)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (using one futex)
For reference :
187 cycles per getppid() call
188 cycles per umask() call
181 cycles per ni_syscall() call
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Pierre Peiffer <pierre.peiffer@bull.net>
Cc: "Ulrich Drepper" <drepper@gmail.com>
Cc: "Nick Piggin" <nickpiggin@yahoo.com.au>
Cc: "Ingo Molnar" <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 05:35:04 -04:00
|
|
|
* We use the two low order bits of offset to tell what is the kind of key :
|
|
|
|
* 00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
|
|
|
|
* (no reference on an inode or mm)
|
|
|
|
* 01 : Shared futex (PTHREAD_PROCESS_SHARED)
|
|
|
|
* mapped on a file (reference on the underlying inode)
|
|
|
|
* 10 : Shared futex (PTHREAD_PROCESS_SHARED)
|
|
|
|
* (but private mapping on an mm, and reference taken on it)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */
|
|
|
|
#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
|
|
|
|
|
2007-05-08 03:26:42 -04:00
|
|
|
union futex_key {
|
|
|
|
struct {
|
|
|
|
unsigned long pgoff;
|
|
|
|
struct inode *inode;
|
|
|
|
int offset;
|
|
|
|
} shared;
|
|
|
|
struct {
|
|
|
|
unsigned long address;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
int offset;
|
|
|
|
} private;
|
|
|
|
struct {
|
|
|
|
unsigned long word;
|
|
|
|
void *ptr;
|
|
|
|
int offset;
|
|
|
|
} both;
|
|
|
|
};
|
FUTEX: new PRIVATE futexes
Analysis of current linux futex code :
--------------------------------------
A central hash table futex_queues[] holds all contexts (futex_q) of waiting
threads.
Each futex_wait()/futex_wait() has to obtain a spinlock on a hash slot to
perform lookups or insert/deletion of a futex_q.
When a futex_wait() is done, calling thread has to :
1) - Obtain a read lock on mmap_sem to be able to validate the user pointer
(calling find_vma()). This validation tells us if the futex uses
an inode based store (mapped file), or mm based store (anonymous mem)
2) - compute a hash key
3) - Atomic increment of reference counter on an inode or a mm_struct
4) - lock part of futex_queues[] hash table
5) - perform the test on value of futex.
(rollback is value != expected_value, returns EWOULDBLOCK)
(various loops if test triggers mm faults)
6) queue the context into hash table, release the lock got in 4)
7) - release the read_lock on mmap_sem
<block>
8) Eventually unqueue the context (but rarely, as this part may be done
by the futex_wake())
Futexes were designed to improve scalability but current implementation has
various problems :
- Central hashtable :
This means scalability problems if many processes/threads want to use
futexes at the same time.
This means NUMA unbalance because this hashtable is located on one node.
- Using mmap_sem on every futex() syscall :
Even if mmap_sem is a rw_semaphore, up_read()/down_read() are doing atomic
ops on mmap_sem, dirtying cache line :
- lot of cache line ping pongs on SMP configurations.
mmap_sem is also extensively used by mm code (page faults, mmap()/munmap())
Highly threaded processes might suffer from mmap_sem contention.
mmap_sem is also used by oprofile code. Enabling oprofile hurts threaded
programs because of contention on the mmap_sem cache line.
- Using an atomic_inc()/atomic_dec() on inode ref counter or mm ref counter:
It's also a cache line ping pong on SMP. It also increases mmap_sem hold time
because of cache misses.
Most of these scalability problems come from the fact that futexes are in
one global namespace. As we use a central hash table, we must make sure
they are all using the same reference (given by the mm subsystem). We
chose to force all futexes be 'shared'. This has a cost.
But fact is POSIX defined PRIVATE and SHARED, allowing clear separation,
and optimal performance if carefuly implemented. Time has come for linux
to have better threading performance.
The goal is to permit new futex commands to avoid :
- Taking the mmap_sem semaphore, conflicting with other subsystems.
- Modifying a ref_count on mm or an inode, still conflicting with mm or fs.
This is possible because, for one process using PTHREAD_PROCESS_PRIVATE
futexes, we only need to distinguish futexes by their virtual address, no
matter the underlying mm storage is.
If glibc wants to exploit this new infrastructure, it should use new
_PRIVATE futex subcommands for PTHREAD_PROCESS_PRIVATE futexes. And be
prepared to fallback on old subcommands for old kernels. Using one global
variable with the FUTEX_PRIVATE_FLAG or 0 value should be OK.
PTHREAD_PROCESS_SHARED futexes should still use the old subcommands.
Compatibility with old applications is preserved, they still hit the
scalability problems, but new applications can fly :)
Note : the same SHARED futex (mapped on a file) can be used by old binaries
*and* new binaries, because both binaries will use the old subcommands.
Note : Vast majority of futexes should be using PROCESS_PRIVATE semantic,
as this is the default semantic. Almost all applications should benefit
of this changes (new kernel and updated libc)
Some bench results on a Pentium M 1.6 GHz (SMP kernel on a UP machine)
/* calling futex_wait(addr, value) with value != *addr */
433 cycles per futex(FUTEX_WAIT) call (mixing 2 futexes)
424 cycles per futex(FUTEX_WAIT) call (using one futex)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (mixing 2 futexes)
334 cycles per futex(FUTEX_WAIT_PRIVATE) call (using one futex)
For reference :
187 cycles per getppid() call
188 cycles per umask() call
181 cycles per ni_syscall() call
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Pierre Peiffer <pierre.peiffer@bull.net>
Cc: "Ulrich Drepper" <drepper@gmail.com>
Cc: "Nick Piggin" <nickpiggin@yahoo.com.au>
Cc: "Ingo Molnar" <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 05:35:04 -04:00
|
|
|
int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
|
|
|
|
union futex_key *key);
|
2007-05-08 03:26:42 -04:00
|
|
|
void get_futex_key_refs(union futex_key *key);
|
|
|
|
void drop_futex_key_refs(union futex_key *key);
|
|
|
|
|
2006-03-27 04:16:22 -05:00
|
|
|
#ifdef CONFIG_FUTEX
|
|
|
|
extern void exit_robust_list(struct task_struct *curr);
|
2006-06-27 05:54:58 -04:00
|
|
|
extern void exit_pi_state_list(struct task_struct *curr);
|
2006-03-27 04:16:22 -05:00
|
|
|
#else
|
|
|
|
static inline void exit_robust_list(struct task_struct *curr)
|
|
|
|
{
|
|
|
|
}
|
2006-06-27 05:54:58 -04:00
|
|
|
static inline void exit_pi_state_list(struct task_struct *curr)
|
|
|
|
{
|
|
|
|
}
|
2006-03-27 04:16:22 -05:00
|
|
|
#endif
|
2006-12-10 05:19:11 -05:00
|
|
|
#endif /* __KERNEL__ */
|
2006-03-27 04:16:22 -05:00
|
|
|
|
[PATCH] FUTEX_WAKE_OP: pthread_cond_signal() speedup
ATM pthread_cond_signal is unnecessarily slow, because it wakes one waiter
(which at least on UP usually means an immediate context switch to one of
the waiter threads). This waiter wakes up and after a few instructions it
attempts to acquire the cv internal lock, but that lock is still held by
the thread calling pthread_cond_signal. So it goes to sleep and eventually
the signalling thread is scheduled in, unlocks the internal lock and wakes
the waiter again.
Now, before 2003-09-21 NPTL was using FUTEX_REQUEUE in pthread_cond_signal
to avoid this performance issue, but it was removed when locks were
redesigned to the 3 state scheme (unlocked, locked uncontended, locked
contended).
Following scenario shows why simply using FUTEX_REQUEUE in
pthread_cond_signal together with using lll_mutex_unlock_force in place of
lll_mutex_unlock is not enough and probably why it has been disabled at
that time:
The number is value in cv->__data.__lock.
thr1 thr2 thr3
0 pthread_cond_wait
1 lll_mutex_lock (cv->__data.__lock)
0 lll_mutex_unlock (cv->__data.__lock)
0 lll_futex_wait (&cv->__data.__futex, futexval)
0 pthread_cond_signal
1 lll_mutex_lock (cv->__data.__lock)
1 pthread_cond_signal
2 lll_mutex_lock (cv->__data.__lock)
2 lll_futex_wait (&cv->__data.__lock, 2)
2 lll_futex_requeue (&cv->__data.__futex, 0, 1, &cv->__data.__lock)
# FUTEX_REQUEUE, not FUTEX_CMP_REQUEUE
2 lll_mutex_unlock_force (cv->__data.__lock)
0 cv->__data.__lock = 0
0 lll_futex_wake (&cv->__data.__lock, 1)
1 lll_mutex_lock (cv->__data.__lock)
0 lll_mutex_unlock (cv->__data.__lock)
# Here, lll_mutex_unlock doesn't know there are threads waiting
# on the internal cv's lock
Now, I believe it is possible to use FUTEX_REQUEUE in pthread_cond_signal,
but it will cost us not one, but 2 extra syscalls and, what's worse, one of
these extra syscalls will be done for every single waiting loop in
pthread_cond_*wait.
We would need to use lll_mutex_unlock_force in pthread_cond_signal after
requeue and lll_mutex_cond_lock in pthread_cond_*wait after lll_futex_wait.
Another alternative is to do the unlocking pthread_cond_signal needs to do
(the lock can't be unlocked before lll_futex_wake, as that is racy) in the
kernel.
I have implemented both variants, futex-requeue-glibc.patch is the first
one and futex-wake_op{,-glibc}.patch is the unlocking inside of the kernel.
The kernel interface allows userland to specify how exactly an unlocking
operation should look like (some atomic arithmetic operation with optional
constant argument and comparison of the previous futex value with another
constant).
It has been implemented just for ppc*, x86_64 and i?86, for other
architectures I'm including just a stub header which can be used as a
starting point by maintainers to write support for their arches and ATM
will just return -ENOSYS for FUTEX_WAKE_OP. The requeue patch has been
(lightly) tested just on x86_64, the wake_op patch on ppc64 kernel running
32-bit and 64-bit NPTL and x86_64 kernel running 32-bit and 64-bit NPTL.
With the following benchmark on UP x86-64 I get:
for i in nptl-orig nptl-requeue nptl-wake_op; do echo time elf/ld.so --library-path .:$i /tmp/bench; \
for j in 1 2; do echo ( time elf/ld.so --library-path .:$i /tmp/bench ) 2>&1; done; done
time elf/ld.so --library-path .:nptl-orig /tmp/bench
real 0m0.655s user 0m0.253s sys 0m0.403s
real 0m0.657s user 0m0.269s sys 0m0.388s
time elf/ld.so --library-path .:nptl-requeue /tmp/bench
real 0m0.496s user 0m0.225s sys 0m0.271s
real 0m0.531s user 0m0.242s sys 0m0.288s
time elf/ld.so --library-path .:nptl-wake_op /tmp/bench
real 0m0.380s user 0m0.176s sys 0m0.204s
real 0m0.382s user 0m0.175s sys 0m0.207s
The benchmark is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00001.txt
Older futex-requeue-glibc.patch version is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00002.txt
Older futex-wake_op-glibc.patch version is at:
http://sourceware.org/ml/libc-alpha/2005-03/txt00003.txt
Will post a new version (just x86-64 fixes so that the patch
applies against pthread_cond_signal.S) to libc-hacker ml soon.
Attached is the kernel FUTEX_WAKE_OP patch as well as a simple-minded
testcase that will not test the atomicity of the operation, but at least
check if the threads that should have been woken up are woken up and
whether the arithmetic operation in the kernel gave the expected results.
Acked-by: Ingo Molnar <mingo@redhat.com>
Cc: Ulrich Drepper <drepper@redhat.com>
Cc: Jamie Lokier <jamie@shareable.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:25 -04:00
|
|
|
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
|
|
|
|
#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
|
|
|
|
#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
|
|
|
|
#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */
|
|
|
|
#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */
|
|
|
|
|
|
|
|
#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */
|
|
|
|
|
|
|
|
#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */
|
|
|
|
#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */
|
|
|
|
#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */
|
|
|
|
#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */
|
|
|
|
#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */
|
|
|
|
#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */
|
|
|
|
|
|
|
|
/* FUTEX_WAKE_OP will perform atomically
|
|
|
|
int oldval = *(int *)UADDR2;
|
|
|
|
*(int *)UADDR2 = oldval OP OPARG;
|
|
|
|
if (oldval CMP CMPARG)
|
|
|
|
wake UADDR2; */
|
|
|
|
|
|
|
|
#define FUTEX_OP(op, oparg, cmp, cmparg) \
|
|
|
|
(((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
|
|
|
|
| ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#endif
|