5d225e
From a58d0ff4935ef14f32f01d4de362bba242f07e0c Mon Sep 17 00:00:00 2001
5d225e
From: Larry Gritz <lg@larrygritz.com>
5d225e
Date: Sat, 4 May 2013 10:22:12 -0700
5d225e
Subject: [PATCH] spinlock tweaks that finally make it as good or better than
5d225e
 TBB.
5d225e
5d225e
---
5d225e
 src/include/thread.h                 | 89 ++++++++++++++++--------------------
5d225e
 src/libOpenImageIO/atomic_test.cpp   |  9 ++--
5d225e
 src/libOpenImageIO/spinlock_test.cpp | 22 +++++++--
5d225e
 src/libtexture/imagecache_pvt.h      |  2 +-
5d225e
 4 files changed, 62 insertions(+), 60 deletions(-)
5d225e
5d225e
diff --git a/src/include/thread.h b/src/include/thread.h
5d225e
index 28645fc..2cd03c1 100644
5d225e
--- a/src/include/thread.h
5d225e
+++ b/src/include/thread.h
5d225e
@@ -78,16 +78,22 @@
5d225e
 // Some day, we hope this is all replaced by use of std::atomic<>.
5d225e
 #if USE_TBB
5d225e
 #  include <tbb/atomic.h>
5d225e
-   using tbb::atomic;
5d225e
 #  include <tbb/spin_mutex.h>
5d225e
+#  define USE_TBB_ATOMIC 1
5d225e
+#  define USE_TBB_SPINLOCK 1
5d225e
+#else
5d225e
+#  define USE_TBB_ATOMIC 0
5d225e
+#  define USE_TBB_SPINLOCK 0
5d225e
 #endif
5d225e
 
5d225e
+
5d225e
 #if defined(_MSC_VER) && !USE_TBB
5d225e
 #  include <windows.h>
5d225e
 #  include <winbase.h>
5d225e
 #  pragma intrinsic (_InterlockedExchangeAdd)
5d225e
 #  pragma intrinsic (_InterlockedCompareExchange)
5d225e
 #  pragma intrinsic (_InterlockedCompareExchange64)
5d225e
+#  pragma intrinsic (_ReadWriteBarrier)
5d225e
 #  if defined(_WIN64)
5d225e
 #    pragma intrinsic(_InterlockedExchangeAdd64)
5d225e
 #  endif
5d225e
@@ -105,10 +111,6 @@
5d225e
 #  endif
5d225e
 #endif
5d225e
 
5d225e
-#ifdef __APPLE__
5d225e
-#  include <libkern/OSAtomic.h>
5d225e
-#endif
5d225e
-
5d225e
 #if defined(__GNUC__) && (defined(_GLIBCXX_ATOMIC_BUILTINS) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 401))
5d225e
 #if !defined(__FreeBSD__) || defined(__x86_64__)
5d225e
 #define USE_GCC_ATOMICS
5d225e
@@ -230,9 +232,6 @@ class thread_specific_ptr {
5d225e
 #elif USE_TBB
5d225e
     atomic<int> *a = (atomic<int> *)at;
5d225e
     return a->fetch_and_add (x);
5d225e
-#elif defined(no__APPLE__)
5d225e
-    // Apple, not inline for Intel (only PPC?)
5d225e
-    return OSAtomicAdd32Barrier (x, at) - x;
5d225e
 #elif defined(_MSC_VER)
5d225e
     // Windows
5d225e
     return _InterlockedExchangeAdd ((volatile LONG *)at, x);
5d225e
@@ -251,9 +250,6 @@ class thread_specific_ptr {
5d225e
 #elif USE_TBB
5d225e
     atomic<long long> *a = (atomic<long long> *)at;
5d225e
     return a->fetch_and_add (x);
5d225e
-#elif defined(no__APPLE__)
5d225e
-    // Apple, not inline for Intel (only PPC?)
5d225e
-    return OSAtomicAdd64Barrier (x, at) - x;
5d225e
 #elif defined(_MSC_VER)
5d225e
     // Windows
5d225e
 #  if defined(_WIN64)
5d225e
@@ -282,8 +278,6 @@ class thread_specific_ptr {
5d225e
 #elif USE_TBB
5d225e
     atomic<int> *a = (atomic<int> *)at;
5d225e
     return a->compare_and_swap (newval, compareval) == newval;
5d225e
-#elif defined(no__APPLE__)
5d225e
-    return OSAtomicCompareAndSwap32Barrier (compareval, newval, at);
5d225e
 #elif defined(_MSC_VER)
5d225e
     return (_InterlockedCompareExchange ((volatile LONG *)at, newval, compareval) == compareval);
5d225e
 #else
5d225e
@@ -301,8 +295,6 @@ class thread_specific_ptr {
5d225e
 #elif USE_TBB
5d225e
     atomic<long long> *a = (atomic<long long> *)at;
5d225e
     return a->compare_and_swap (newval, compareval) == newval;
5d225e
-#elif defined(no__APPLE__)
5d225e
-    return OSAtomicCompareAndSwap64Barrier (compareval, newval, at);
5d225e
 #elif defined(_MSC_VER)
5d225e
     return (_InterlockedCompareExchange64 ((volatile LONGLONG *)at, newval, compareval) == compareval);
5d225e
 #else
5d225e
@@ -317,9 +309,7 @@ class thread_specific_ptr {
5d225e
 inline void
5d225e
 yield ()
5d225e
 {
5d225e
-#if USE_TBB
5d225e
-    __TBB_Yield ();
5d225e
-#elif defined(__GNUC__)
5d225e
+#if defined(__GNUC__)
5d225e
     sched_yield ();
5d225e
 #elif defined(_MSC_VER)
5d225e
     SwitchToThread ();
5d225e
@@ -334,12 +324,12 @@ class thread_specific_ptr {
5d225e
 inline void
5d225e
 pause (int delay)
5d225e
 {
5d225e
-#if USE_TBB
5d225e
-    __TBB_Pause(delay);
5d225e
-#elif defined(__GNUC__)
5d225e
+#if defined(__GNUC__)
5d225e
     for (int i = 0; i < delay; ++i) {
5d225e
         __asm__ __volatile__("pause;");
5d225e
     }
5d225e
+#elif USE_TBB
5d225e
+    __TBB_Pause(delay);
5d225e
 #elif defined(_MSC_VER)
5d225e
     for (int i = 0; i < delay; ++i) {
5d225e
 #if defined (_WIN64)
5d225e
@@ -369,14 +359,17 @@ class atomic_backoff {
5d225e
             yield();
5d225e
         }
5d225e
     }
5d225e
+
5d225e
 private:
5d225e
     int m_count;
5d225e
 };
5d225e
 
5d225e
 
5d225e
 
5d225e
-#if (! USE_TBB)
5d225e
-// If we're not using TBB, we need to define our own atomic<>.
5d225e
+#if USE_TBB_ATOMIC
5d225e
+using tbb::atomic;
5d225e
+#else
5d225e
+// If we're not using TBB's atomic, we need to define our own atomic<>.
5d225e
 
5d225e
 
5d225e
 /// Atomic integer.  Increment, decrement, add, and subtract in a
5d225e
@@ -456,7 +449,7 @@ class atomic {
5d225e
 };
5d225e
 
5d225e
 
5d225e
-#endif /* ! USE_TBB */
5d225e
+#endif /* ! USE_TBB_ATOMIC */
5d225e
 
5d225e
 
5d225e
 #ifdef NOTHREADS
5d225e
@@ -478,7 +471,7 @@ class atomic {
5d225e
 typedef null_mutex spin_mutex;
5d225e
 typedef null_lock<spin_mutex> spin_lock;
5d225e
 
5d225e
-#elif USE_TBB
5d225e
+#elif USE_TBB_SPINLOCK
5d225e
 
5d225e
 // Use TBB's spin locks
5d225e
 typedef tbb::spin_mutex spin_mutex;
5d225e
@@ -529,63 +522,61 @@ class spin_mutex {
5d225e
     /// Acquire the lock, spin until we have it.
5d225e
     ///
5d225e
     void lock () {
5d225e
-#if defined(no__APPLE__)
5d225e
-        // OS X has dedicated spin lock routines, may as well use them.
5d225e
-        OSSpinLockLock ((OSSpinLock *)&m_locked);
5d225e
-#else
5d225e
         // To avoid spinning too tightly, we use the atomic_backoff to
5d225e
         // provide increasingly longer pauses, and if the lock is under
5d225e
         // lots of contention, eventually yield the timeslice.
5d225e
         atomic_backoff backoff;
5d225e
+
5d225e
         // Try to get ownership of the lock. Though experimentation, we
5d225e
         // found that OIIO_UNLIKELY makes this just a bit faster on 
5d225e
         // gcc x86/x86_64 systems.
5d225e
         while (! OIIO_UNLIKELY(try_lock())) {
5d225e
             do {
5d225e
                 backoff();
5d225e
-            } while (*(volatile int *)&m_locked);
5d225e
+            } while (m_locked);
5d225e
+
5d225e
             // The full try_lock() involves a compare_and_swap, which
5d225e
             // writes memory, and that will lock the bus.  But a normal
5d225e
             // read of m_locked will let us spin until the value
5d225e
             // changes, without locking the bus. So it's faster to
5d225e
             // check in this manner until the mutex appears to be free.
5d225e
         }
5d225e
-#endif
5d225e
     }
5d225e
 
5d225e
     /// Release the lock that we hold.
5d225e
     ///
5d225e
     void unlock () {
5d225e
-#if defined(no__APPLE__)
5d225e
-        OSSpinLockUnlock ((OSSpinLock *)&m_locked);
5d225e
-#elif defined(__GNUC__)
5d225e
-        // GCC gives us an intrinsic that is even better, an atomic
5d225e
-        // assignment of 0 with "release" barrier semantics.
5d225e
-        __sync_lock_release ((volatile int *)&m_locked);
5d225e
+#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
5d225e
+        // Fastest way to do it is with a store with "release" semantics
5d225e
+        __asm__ __volatile__("": : :"memory");
5d225e
+        m_locked = 0;
5d225e
+        // N.B. GCC gives us an intrinsic that is even better, an atomic
5d225e
+        // assignment of 0 with "release" barrier semantics:
5d225e
+        //  __sync_lock_release (&m_locked);
5d225e
+        // But empirically we found it not as performant as the above.
5d225e
+#elif defined(_MSC_VER)
5d225e
+        _ReadWriteBarrier();
5d225e
+        m_locked = 0;
5d225e
 #else
5d225e
         // Otherwise, just assign zero to the atomic (but that's a full 
5d225e
         // memory barrier).
5d225e
-        m_locked = 0;
5d225e
+        *(atomic_int *)&m_locked = 0;
5d225e
 #endif
5d225e
     }
5d225e
 
5d225e
     /// Try to acquire the lock.  Return true if we have it, false if
5d225e
     /// somebody else is holding the lock.
5d225e
     bool try_lock () {
5d225e
-#if defined(no__APPLE__)
5d225e
-        return OSSpinLockTry ((OSSpinLock *)&m_locked);
5d225e
-#else
5d225e
-#  if USE_TBB
5d225e
+#if USE_TBB_ATOMIC
5d225e
         // TBB's compare_and_swap returns the original value
5d225e
-        return m_locked.compare_and_swap (0, 1) == 0;
5d225e
-#  elif defined(__GNUC__)
5d225e
+        return (*(atomic_int *)&m_locked).compare_and_swap (0, 1) == 0;
5d225e
+#elif defined(__GNUC__)
5d225e
         // GCC gives us an intrinsic that is even better -- an atomic
5d225e
         // exchange with "acquire" barrier semantics.
5d225e
-        return __sync_lock_test_and_set ((volatile int *)&m_locked, 1) == 0;
5d225e
-#  else
5d225e
+        return __sync_lock_test_and_set (&m_locked, 1) == 0;
5d225e
+#else
5d225e
         // Our compare_and_swap returns true if it swapped
5d225e
-        return m_locked.bool_compare_and_swap (0, 1);
5d225e
-#  endif
5d225e
+        return atomic_compare_and_exchange (&m_locked, 0, 1);
5d225e
 #endif
5d225e
     }
5d225e
 
5d225e
@@ -603,7 +594,7 @@ class spin_mutex {
5d225e
     };
5d225e
 
5d225e
 private:
5d225e
-    atomic_int m_locked;  ///< Atomic counter is zero if nobody holds the lock
5d225e
+    volatile int m_locked;  ///< Atomic counter is zero if nobody holds the lock
5d225e
 };
5d225e
 
5d225e
 
5d225e
diff --git a/src/libOpenImageIO/atomic_test.cpp b/src/libOpenImageIO/atomic_test.cpp
5d225e
index 2c1e807..42d469a 100644
5d225e
--- a/src/libOpenImageIO/atomic_test.cpp
5d225e
+++ b/src/libOpenImageIO/atomic_test.cpp
5d225e
@@ -49,7 +49,7 @@
5d225e
 // and decrementing the crap out of it, and make sure it has the right
5d225e
 // value at the end.
5d225e
 
5d225e
-static int iterations = 160000000;
5d225e
+static int iterations = 40000000;
5d225e
 static int numthreads = 16;
5d225e
 static int ntrials = 1;
5d225e
 static bool verbose = false;
5d225e
@@ -184,16 +184,15 @@ int main (int argc, char *argv[])
5d225e
 
5d225e
     static int threadcounts[] = { 1, 2, 4, 8, 12, 16, 20, 24, 28, 32, 64, 128, 1024, 1<<30 };
5d225e
     for (int i = 0; threadcounts[i] <= numthreads; ++i) {
5d225e
-        int nt = threadcounts[i];
5d225e
+        int nt = wedge ? threadcounts[i] : numthreads;
5d225e
         int its = iterations/nt;
5d225e
 
5d225e
         double range;
5d225e
         double t = time_trial (boost::bind(test_atomics,nt,its),
5d225e
                                ntrials, &range);
5d225e
 
5d225e
-        std::cout << Strutil::format ("%2d\t%s\t%5.1fs, range %.1f\t(%d iters/thread)\n",
5d225e
-                                      nt, Strutil::timeintervalformat(t),
5d225e
-                                      t, range, its);
5d225e
+        std::cout << Strutil::format ("%2d\t%5.1f   range %.2f\t(%d iters/thread)\n",
5d225e
+                                      nt, t, range, its);
5d225e
         if (! wedge)
5d225e
             break;    // don't loop if we're not wedging
5d225e
     }
5d225e
diff --git a/src/libOpenImageIO/spinlock_test.cpp b/src/libOpenImageIO/spinlock_test.cpp
5d225e
index 60c192b..64adbce 100644
5d225e
--- a/src/libOpenImageIO/spinlock_test.cpp
5d225e
+++ b/src/libOpenImageIO/spinlock_test.cpp
5d225e
@@ -50,7 +50,7 @@
5d225e
 // accumulated value is equal to iterations*threads, then the spin locks
5d225e
 // worked.
5d225e
 
5d225e
-static int iterations = 160000000;
5d225e
+static int iterations = 40000000;
5d225e
 static int numthreads = 16;
5d225e
 static int ntrials = 1;
5d225e
 static bool verbose = false;
5d225e
@@ -58,6 +58,7 @@
5d225e
 
5d225e
 static spin_mutex print_mutex;  // make the prints not clobber each other
5d225e
 volatile long long accum = 0;
5d225e
+float faccum = 0;
5d225e
 spin_mutex mymutex;
5d225e
 
5d225e
 
5d225e
@@ -71,10 +72,22 @@
5d225e
         std::cout << "thread " << boost::this_thread::get_id() 
5d225e
                   << ", accum = " << accum << "\n";
5d225e
     }
5d225e
+#if 1
5d225e
     for (int i = 0;  i < iterations;  ++i) {
5d225e
         spin_lock lock (mymutex);
5d225e
         accum += 1;
5d225e
     }
5d225e
+#else
5d225e
+    // Alternate one that mixes in some math to make longer lock hold time,
5d225e
+    // and also more to do between locks.  Interesting contrast in timings.
5d225e
+    float last = 0.0f;
5d225e
+    for (int i = 0;  i < iterations;  ++i) {
5d225e
+        last = fmodf (sinf(last), 1.0f);
5d225e
+        spin_lock lock (mymutex);
5d225e
+        accum += 1;
5d225e
+        faccum = fmod (sinf(faccum+last), 1.0f);
5d225e
+    }
5d225e
+#endif
5d225e
 }
5d225e
 
5d225e
 
5d225e
@@ -134,16 +147,15 @@ int main (int argc, char *argv[])
5d225e
 
5d225e
     static int threadcounts[] = { 1, 2, 4, 8, 12, 16, 20, 24, 28, 32, 64, 128, 1024, 1<<30 };
5d225e
     for (int i = 0; threadcounts[i] <= numthreads; ++i) {
5d225e
-        int nt = threadcounts[i];
5d225e
+        int nt = wedge ? threadcounts[i] : numthreads;
5d225e
         int its = iterations/nt;
5d225e
 
5d225e
         double range;
5d225e
         double t = time_trial (boost::bind(test_spinlock,nt,its),
5d225e
                                ntrials, &range);
5d225e
 
5d225e
-        std::cout << Strutil::format ("%2d\t%s\t%5.1fs, range %.1f\t(%d iters/thread)\n",
5d225e
-                                      nt, Strutil::timeintervalformat(t),
5d225e
-                                      t, range, its);
5d225e
+        std::cout << Strutil::format ("%2d\t%5.1f   range %.2f\t(%d iters/thread)\n",
5d225e
+                                      nt, t, range, its);
5d225e
         if (! wedge)
5d225e
             break;    // don't loop if we're not wedging
5d225e
     }
5d225e
diff --git a/src/libtexture/imagecache_pvt.h b/src/libtexture/imagecache_pvt.h
5d225e
index 5d29782..3a49616 100644
5d225e
--- a/src/libtexture/imagecache_pvt.h
5d225e
+++ b/src/libtexture/imagecache_pvt.h
5d225e
@@ -1003,7 +1003,7 @@ class ImageCacheImpl : public ImageCache {
5d225e
             newval = oldval + incr;
5d225e
             // Now try to atomically swap it, and repeat until we've
5d225e
             // done it with nobody else interfering.
5d225e
-#  if USE_TBB
5d225e
+#  if USE_TBB_ATOMIC
5d225e
         } while (llstat->compare_and_swap (*llnewval,*lloldval) != *lloldval);
5d225e
 #  else
5d225e
         } while (llstat->bool_compare_and_swap (*llnewval,*lloldval));
5d225e
-- 
5d225e
1.8.1.6
5d225e