6dbde35308
It is an optimization and a cleanup, and adds the following new generic percpu methods: percpu_read() percpu_write() percpu_add() percpu_sub() percpu_and() percpu_or() percpu_xor() and implements support for them on x86. (other architectures will fall back to a default implementation) The advantage is that for example to read a local percpu variable, instead of this sequence: return __get_cpu_var(var); ffffffff8102ca2b: 48 8b 14 fd 80 09 74 mov -0x7e8bf680(,%rdi,8),%rdx ffffffff8102ca32: 81 ffffffff8102ca33: 48 c7 c0 d8 59 00 00 mov $0x59d8,%rax ffffffff8102ca3a: 48 8b 04 10 mov (%rax,%rdx,1),%rax We can get a single instruction by using the optimized variants: return percpu_read(var); ffffffff8102ca3f: 65 48 8b 05 91 8f fd mov %gs:0x7efd8f91(%rip),%rax I also cleaned up the x86-specific APIs and made the x86 code use these new generic percpu primitives. tj: * fixed generic percpu_sub() definition as Roel Kluin pointed out * added percpu_and() for completeness's sake * made generic percpu ops atomic against preemption Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Tejun Heo <tj@kernel.org>
61 lines
1.6 KiB
C
61 lines
1.6 KiB
C
#ifndef _XEN_MULTICALLS_H
|
|
#define _XEN_MULTICALLS_H
|
|
|
|
#include "xen-ops.h"
|
|
|
|
/* Multicalls */
|
|
struct multicall_space
|
|
{
|
|
struct multicall_entry *mc;
|
|
void *args;
|
|
};
|
|
|
|
/* Allocate room for a multicall and its args */
|
|
struct multicall_space __xen_mc_entry(size_t args);
|
|
|
|
DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
|
|
|
|
/* Call to start a batch of multiple __xen_mc_entry()s. Must be
|
|
paired with xen_mc_issue() */
|
|
static inline void xen_mc_batch(void)
|
|
{
|
|
/* need to disable interrupts until this entry is complete */
|
|
local_irq_save(__get_cpu_var(xen_mc_irq_flags));
|
|
}
|
|
|
|
static inline struct multicall_space xen_mc_entry(size_t args)
|
|
{
|
|
xen_mc_batch();
|
|
return __xen_mc_entry(args);
|
|
}
|
|
|
|
/* Flush all pending multicalls */
|
|
void xen_mc_flush(void);
|
|
|
|
/* Issue a multicall if we're not in a lazy mode */
|
|
static inline void xen_mc_issue(unsigned mode)
|
|
{
|
|
if ((paravirt_get_lazy_mode() & mode) == 0)
|
|
xen_mc_flush();
|
|
|
|
/* restore flags saved in xen_mc_batch */
|
|
local_irq_restore(percpu_read(xen_mc_irq_flags));
|
|
}
|
|
|
|
/* Set up a callback to be called when the current batch is flushed */
|
|
void xen_mc_callback(void (*fn)(void *), void *data);
|
|
|
|
/*
|
|
* Try to extend the arguments of the previous multicall command. The
|
|
* previous command's op must match. If it does, then it attempts to
|
|
* extend the argument space allocated to the multicall entry by
|
|
* arg_size bytes.
|
|
*
|
|
* The returned multicall_space will return with mc pointing to the
|
|
* command on success, or NULL on failure, and args pointing to the
|
|
* newly allocated space.
|
|
*/
|
|
struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size);
|
|
|
|
#endif /* _XEN_MULTICALLS_H */
|