From 5ef3f0602977d79c9c7d19748711e42f6dce2558 Mon Sep 17 00:00:00 2001 From: "marja@chromium.org" Date: Mon, 23 Jun 2014 10:31:12 +0000 Subject: [PATCH] Revert "Partial revert of r21901" This reverts r21927. Reason: broke the build. BUG= TBR=jochen@chromium.org, bmeurer@chromium.org Review URL: https://codereview.chromium.org/347423002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21928 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/base/atomicops_internals_x86_gcc.cc | 6 ----- src/base/atomicops_internals_x86_gcc.h | 30 +------------------------ 2 files changed, 1 insertion(+), 35 deletions(-) diff --git a/src/base/atomicops_internals_x86_gcc.cc b/src/base/atomicops_internals_x86_gcc.cc index f2cddbe9f9..ffc8bce0b0 100644 --- a/src/base/atomicops_internals_x86_gcc.cc +++ b/src/base/atomicops_internals_x86_gcc.cc @@ -42,7 +42,6 @@ namespace base { // default values should hopefully be pretty safe. struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { false, // bug can't exist before process spawns multiple threads - false, // no SSE2 }; } } // namespace v8::base @@ -88,11 +87,6 @@ void AtomicOps_Internalx86CPUFeaturesInit() { } else { AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; } - -#if !defined(__SSE2__) - // edx bit 26 is SSE2 which we use to tell use whether we can use mfence - AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); -#endif } class AtomicOpsx86Initializer { diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h index ec87c42121..90e1acf1ee 100644 --- a/src/base/atomicops_internals_x86_gcc.h +++ b/src/base/atomicops_internals_x86_gcc.h @@ -17,9 +17,6 @@ namespace base { struct AtomicOps_x86CPUFeatureStruct { bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence // after acquire compare-and-swap. -#if !defined(__SSE2__) - bool has_sse2; // Processor has SSE2. -#endif }; extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; @@ -94,10 +91,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } -#if defined(__x86_64__) || defined(__SSE2__) - -// 64-bit implementations of memory barrier can be simpler, because it -// "mfence" is guaranteed to exist. +// We require SSE2, so mfence is guaranteed to exist. inline void MemoryBarrier() { __asm__ __volatile__("mfence" : : : "memory"); } @@ -107,28 +101,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); } -#else - -inline void MemoryBarrier() { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - __asm__ __volatile__("mfence" : : : "memory"); - } else { // mfence is faster but not present on PIII - Atomic32 x = 0; - NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII - } -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { - *ptr = value; - __asm__ __volatile__("mfence" : : : "memory"); - } else { - NoBarrier_AtomicExchange(ptr, value); - // acts as a barrier on PIII - } -} -#endif - inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { ATOMICOPS_COMPILER_BARRIER(); *ptr = value; // An x86 store acts as a release barrier.