android_kernel_xiaomi_sm8350/arch/i386/kernel/cpu/mtrr/mtrr.h
Shaohua Li 3b520b238e [PATCH] MTRR suspend/resume cleanup
There has been some discuss about solving the SMP MTRR suspend/resume
breakage, but I didn't find a patch for it.  This is an intent for it.  The
basic idea is moving mtrr initializing into cpu_identify for all APs (so it
works for cpu hotplug).  For BP, restore_processor_state is responsible for
restoring MTRR.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-07-07 18:23:42 -07:00

98 lines
2.5 KiB
C

/*
* local mtrr defines.
*/
#ifndef TRUE
#define TRUE 1
#define FALSE 0
#endif
#define MTRRcap_MSR 0x0fe
#define MTRRdefType_MSR 0x2ff
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
#define NUM_FIXED_RANGES 88
#define MTRRfix64K_00000_MSR 0x250
#define MTRRfix16K_80000_MSR 0x258
#define MTRRfix16K_A0000_MSR 0x259
#define MTRRfix4K_C0000_MSR 0x268
#define MTRRfix4K_C8000_MSR 0x269
#define MTRRfix4K_D0000_MSR 0x26a
#define MTRRfix4K_D8000_MSR 0x26b
#define MTRRfix4K_E0000_MSR 0x26c
#define MTRRfix4K_E8000_MSR 0x26d
#define MTRRfix4K_F0000_MSR 0x26e
#define MTRRfix4K_F8000_MSR 0x26f
#define MTRR_CHANGE_MASK_FIXED 0x01
#define MTRR_CHANGE_MASK_VARIABLE 0x02
#define MTRR_CHANGE_MASK_DEFTYPE 0x04
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
typedef u8 mtrr_type;
struct mtrr_ops {
u32 vendor;
u32 use_intel_if;
// void (*init)(void);
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base,
unsigned int *size, mtrr_type * type);
int (*get_free_region) (unsigned long base, unsigned long size);
int (*validate_add_page)(unsigned long base, unsigned long size,
unsigned int type);
int (*have_wrcomb)(void);
};
extern int generic_get_free_region(unsigned long base, unsigned long size);
extern int generic_validate_add_page(unsigned long base, unsigned long size,
unsigned int type);
extern struct mtrr_ops generic_mtrr_ops;
extern int positive_have_wrcomb(void);
/* library functions for processor-specific routines */
struct set_mtrr_context {
unsigned long flags;
unsigned long deftype_lo;
unsigned long deftype_hi;
unsigned long cr4val;
unsigned long ccr3;
};
struct mtrr_var_range {
unsigned long base_lo;
unsigned long base_hi;
unsigned long mask_lo;
unsigned long mask_hi;
};
void set_mtrr_done(struct set_mtrr_context *ctxt);
void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
void get_mtrr_state(void);
extern void set_mtrr_ops(struct mtrr_ops * ops);
extern u32 size_or_mask, size_and_mask;
extern struct mtrr_ops * mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
extern unsigned int num_var_ranges;
void mtrr_state_warn(void);
char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned);