2005-04-16 18:20:36 -04:00
|
|
|
#ifndef __ASM_SYSTEM_H
|
|
|
|
#define __ASM_SYSTEM_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <asm/segment.h>
|
2007-05-08 03:35:02 -04:00
|
|
|
#include <asm/cmpxchg.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2007-12-18 12:05:58 -05:00
|
|
|
/* entries in ARCH_DLINFO: */
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
# define AT_VECTOR_SIZE_ARCH 2
|
|
|
|
#else
|
|
|
|
# define AT_VECTOR_SIZE_ARCH 1
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
|
|
|
|
#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
|
|
|
|
|
|
|
|
/* frame pointer must be last for get_wchan */
|
2006-09-26 04:52:41 -04:00
|
|
|
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
|
|
|
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#define __EXTRA_CLOBBER \
|
|
|
|
,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
|
|
|
|
|
2006-09-26 04:52:41 -04:00
|
|
|
/* Save restore flags to clear handle leaking NT */
|
2005-04-16 18:20:36 -04:00
|
|
|
#define switch_to(prev,next,last) \
|
|
|
|
asm volatile(SAVE_CONTEXT \
|
|
|
|
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
|
|
|
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
|
|
|
|
"call __switch_to\n\t" \
|
|
|
|
".globl thread_return\n" \
|
|
|
|
"thread_return:\n\t" \
|
|
|
|
"movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
|
|
|
|
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
2006-06-26 07:56:16 -04:00
|
|
|
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
|
2005-04-16 18:20:36 -04:00
|
|
|
"movq %%rax,%%rdi\n\t" \
|
|
|
|
"jc ret_from_fork\n\t" \
|
|
|
|
RESTORE_CONTEXT \
|
|
|
|
: "=a" (last) \
|
|
|
|
: [next] "S" (next), [prev] "D" (prev), \
|
|
|
|
[threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
|
|
|
|
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
|
|
|
|
[tif_fork] "i" (TIF_FORK), \
|
2007-05-09 05:35:17 -04:00
|
|
|
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
2005-04-16 18:20:36 -04:00
|
|
|
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
|
|
|
|
: "memory", "cc" __EXTRA_CLOBBER)
|
|
|
|
|
|
|
|
extern void load_gs_index(unsigned);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load a segment. Fall back on loading the zero
|
|
|
|
* segment if something goes wrong..
|
|
|
|
*/
|
|
|
|
#define loadsegment(seg,value) \
|
|
|
|
asm volatile("\n" \
|
|
|
|
"1:\t" \
|
|
|
|
"movl %k0,%%" #seg "\n" \
|
|
|
|
"2:\n" \
|
|
|
|
".section .fixup,\"ax\"\n" \
|
|
|
|
"3:\t" \
|
|
|
|
"movl %1,%%" #seg "\n\t" \
|
|
|
|
"jmp 2b\n" \
|
|
|
|
".previous\n" \
|
|
|
|
".section __ex_table,\"a\"\n\t" \
|
|
|
|
".align 8\n\t" \
|
|
|
|
".quad 1b,3b\n" \
|
|
|
|
".previous" \
|
|
|
|
: :"r" (value), "r" (0))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear and set 'TS' bit respectively
|
|
|
|
*/
|
|
|
|
#define clts() __asm__ __volatile__ ("clts")
|
|
|
|
|
|
|
|
static inline unsigned long read_cr0(void)
|
|
|
|
{
|
|
|
|
unsigned long cr0;
|
|
|
|
asm volatile("movq %%cr0,%0" : "=r" (cr0));
|
|
|
|
return cr0;
|
2007-07-22 05:12:29 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
static inline void write_cr0(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr0" :: "r" (val));
|
2007-07-22 05:12:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long read_cr2(void)
|
|
|
|
{
|
|
|
|
unsigned long cr2;
|
2007-10-17 12:04:33 -04:00
|
|
|
asm volatile("movq %%cr2,%0" : "=r" (cr2));
|
2007-07-22 05:12:29 -04:00
|
|
|
return cr2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void write_cr2(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr2" :: "r" (val));
|
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
static inline unsigned long read_cr3(void)
|
|
|
|
{
|
|
|
|
unsigned long cr3;
|
2007-10-17 12:04:33 -04:00
|
|
|
asm volatile("movq %%cr3,%0" : "=r" (cr3));
|
2005-04-16 18:20:36 -04:00
|
|
|
return cr3;
|
2007-07-22 05:12:29 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 13:27:06 -04:00
|
|
|
static inline void write_cr3(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
static inline unsigned long read_cr4(void)
|
|
|
|
{
|
|
|
|
unsigned long cr4;
|
2007-10-17 12:04:33 -04:00
|
|
|
asm volatile("movq %%cr4,%0" : "=r" (cr4));
|
2005-04-16 18:20:36 -04:00
|
|
|
return cr4;
|
2007-07-22 05:12:29 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
static inline void write_cr4(unsigned long val)
|
|
|
|
{
|
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 13:27:06 -04:00
|
|
|
asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
|
2007-07-22 05:12:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long read_cr8(void)
|
|
|
|
{
|
|
|
|
unsigned long cr8;
|
2007-10-17 12:04:33 -04:00
|
|
|
asm volatile("movq %%cr8,%0" : "=r" (cr8));
|
2007-07-22 05:12:29 -04:00
|
|
|
return cr8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void write_cr8(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
|
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#define stts() write_cr0(8 | read_cr0())
|
|
|
|
|
|
|
|
#define wbinvd() \
|
2007-07-21 07:37:17 -04:00
|
|
|
__asm__ __volatile__ ("wbinvd": : :"memory")
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
2007-10-17 12:04:37 -04:00
|
|
|
static inline void clflush(volatile void *__p)
|
|
|
|
{
|
|
|
|
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#define nop() __asm__ __volatile__ ("nop")
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define smp_mb() mb()
|
x86: optimise barriers
According to latest memory ordering specification documents from Intel
and AMD, both manufacturers are committed to in-order loads from
cacheable memory for the x86 architecture. Hence, smp_rmb() may be a
simple barrier.
Also according to those documents, and according to existing practice in
Linux (eg. spin_unlock doesn't enforce ordering), stores to cacheable
memory are visible in program order too. Special string stores are safe
-- their constituent stores may be out of order, but they must complete
in order WRT surrounding stores. Nontemporal stores to WB memory can go
out of order, and so they should be fenced explicitly to make them
appear in-order WRT other stores. Hence, smp_wmb() may be a simple
barrier.
http://developer.intel.com/products/processor/manuals/318147.pdf
http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/24593.pdf
In userspace microbenchmarks on a core2 system, fence instructions range
anywhere from around 15 cycles to 50, which may not be totally
insignificant in performance critical paths (code size will go down
too).
However the primary motivation for this is to have the canonical barrier
implementation for x86 architecture.
smp_rmb on buggy pentium pros remains a locked op, which is apparently
required.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-12 21:07:38 -04:00
|
|
|
#define smp_rmb() barrier()
|
|
|
|
#define smp_wmb() barrier()
|
2005-04-16 18:20:36 -04:00
|
|
|
#define smp_read_barrier_depends() do {} while(0)
|
|
|
|
#else
|
|
|
|
#define smp_mb() barrier()
|
|
|
|
#define smp_rmb() barrier()
|
|
|
|
#define smp_wmb() barrier()
|
|
|
|
#define smp_read_barrier_depends() do {} while(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force strict CPU ordering.
|
|
|
|
* And yes, this is required on UP too when we're talking
|
|
|
|
* to devices.
|
|
|
|
*/
|
|
|
|
#define mb() asm volatile("mfence":::"memory")
|
|
|
|
#define rmb() asm volatile("lfence":::"memory")
|
|
|
|
#define wmb() asm volatile("sfence" ::: "memory")
|
2007-10-12 21:06:55 -04:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#define read_barrier_depends() do {} while(0)
|
2006-02-05 02:28:05 -05:00
|
|
|
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
|
|
|
|
|
2006-07-03 03:24:45 -04:00
|
|
|
#include <linux/irqflags.h>
|
2006-01-17 01:03:47 -05:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
void cpu_idle_wait(void);
|
|
|
|
|
|
|
|
extern unsigned long arch_align_stack(unsigned long sp);
|
2006-06-26 07:56:16 -04:00
|
|
|
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#endif
|