android_kernel_xiaomi_sm7250/include/asm-x86/system_64.h
Glauber de Oliveira Costa a6b4655258 x86: unify load_segment macro
This patch unifies the load_segment() macro, making them equal in both
x86_64 and i386 architectures. The common version goes to system.h,
and the old are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 13:31:08 +01:00

154 lines
3.9 KiB
C

#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
#include <linux/kernel.h>
#include <asm/segment.h>
#include <asm/cmpxchg.h>
#ifdef __KERNEL__
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev,next,last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
"call __switch_to\n\t" \
".globl thread_return\n" \
"thread_return:\n\t" \
"movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
"jc ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
: [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
[tif_fork] "i" (TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
: "memory", "cc" __EXTRA_CLOBBER)
extern void load_gs_index(unsigned);
/*
* Clear and set 'TS' bit respectively
*/
#define clts() __asm__ __volatile__ ("clts")
static inline unsigned long read_cr0(void)
{
unsigned long cr0;
asm volatile("movq %%cr0,%0" : "=r" (cr0));
return cr0;
}
static inline void write_cr0(unsigned long val)
{
asm volatile("movq %0,%%cr0" :: "r" (val));
}
static inline unsigned long read_cr2(void)
{
unsigned long cr2;
asm volatile("movq %%cr2,%0" : "=r" (cr2));
return cr2;
}
static inline void write_cr2(unsigned long val)
{
asm volatile("movq %0,%%cr2" :: "r" (val));
}
static inline unsigned long read_cr3(void)
{
unsigned long cr3;
asm volatile("movq %%cr3,%0" : "=r" (cr3));
return cr3;
}
static inline void write_cr3(unsigned long val)
{
asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
}
static inline unsigned long read_cr4(void)
{
unsigned long cr4;
asm volatile("movq %%cr4,%0" : "=r" (cr4));
return cr4;
}
static inline void write_cr4(unsigned long val)
{
asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
}
static inline unsigned long read_cr8(void)
{
unsigned long cr8;
asm volatile("movq %%cr8,%0" : "=r" (cr8));
return cr8;
}
static inline void write_cr8(unsigned long val)
{
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
}
#define stts() write_cr0(8 | read_cr0())
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory")
#endif /* __KERNEL__ */
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#endif
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
#include <linux/irqflags.h>
#endif