GIF89a;
Direktori : /lib/modules/3.10.0-1160.81.1.el7.centos.plus.x86_64/source/arch/x86/include/asm/ |
Current File : //lib/modules/3.10.0-1160.81.1.el7.centos.plus.x86_64/source/arch/x86/include/asm/barrier.h |
#ifndef _ASM_X86_BARRIER_H #define _ASM_X86_BARRIER_H #include <asm/alternative.h> #include <asm/nops.h> /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ #ifdef CONFIG_X86_32 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ X86_FEATURE_XMM2) ::: "memory", "cc") #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ X86_FEATURE_XMM2) ::: "memory", "cc") #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ X86_FEATURE_XMM2) ::: "memory", "cc") #else #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence" ::: "memory") #endif #define gmb() alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC) #define barrier_nospec() gmb() /** * array_index_mask_nospec() - generate a mask that is ~0UL when the * bounds check succeeds and 0 otherwise * @index: array element index * @size: number of elements in array * * Returns: * 0 - (index < size) */ static inline unsigned long array_index_mask_nospec(unsigned long index, unsigned long size) { unsigned long mask; asm volatile ("cmp %1,%2; sbb %0,%0;" :"=r" (mask) :"r"(size),"r" (index) :"cc"); return mask; } /* Override the default implementation from linux/nospec.h. */ #define array_index_mask_nospec array_index_mask_nospec #ifdef CONFIG_X86_PPRO_FENCE #define dma_rmb() rmb() #else #define dma_rmb() barrier() #endif #define dma_wmb() barrier() #ifdef CONFIG_SMP #ifdef CONFIG_X86_32 #define smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") #else #define smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") #endif #define smp_rmb() dma_rmb() #define smp_wmb() barrier() #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif /* SMP */ #if defined(CONFIG_X86_PPRO_FENCE) /* * For either of these options x86 doesn't have a strong TSO memory * model and we should fall back to full barriers. */ #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ }) #else /* regular x86 TSO memory ordering */ #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ }) #endif /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic() do { } while (0) #define smp_mb__after_atomic() do { } while (0) /* * Stop RDTSC speculation. This is needed when you need to use RDTSC * (or get_cycles or vread that possibly accesses the TSC) in a defined * code region. * * (Could use an alternative three way for this if there was one.) */ static __always_inline void rdtsc_barrier(void) { alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); } #include <asm-generic/barrier.h> #endif /* _ASM_X86_BARRIER_H */