[131750] trunk/dports/lang/libcxx
jeremyhu at macports.org
jeremyhu at macports.org
Fri Jan 16 22:29:28 PST 2015
Revision: 131750
https://trac.macports.org/changeset/131750
Author: jeremyhu at macports.org
Date: 2015-01-16 22:29:27 -0800 (Fri, 16 Jan 2015)
Log Message:
-----------
libcxx: Pull in David Fang's patch to add (very slow) 64bit atomics to ppc libc++
Modified Paths:
--------------
trunk/dports/lang/libcxx/Portfile
trunk/dports/lang/libcxx/files/0001-buildit-build-fix-for-Leopard.patch
trunk/dports/lang/libcxx/files/0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch
Added Paths:
-----------
trunk/dports/lang/libcxx/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch
trunk/dports/lang/libcxx/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch
Modified: trunk/dports/lang/libcxx/Portfile
===================================================================
--- trunk/dports/lang/libcxx/Portfile 2015-01-17 05:49:11 UTC (rev 131749)
+++ trunk/dports/lang/libcxx/Portfile 2015-01-17 06:29:27 UTC (rev 131750)
@@ -5,6 +5,7 @@
name libcxx
version 3.5.1
+revision 1
epoch 1
categories lang
platforms darwin
@@ -68,7 +69,9 @@
patch.pre_args -p1
patchfiles-append \
0001-buildit-build-fix-for-Leopard.patch \
- 0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch
+ 0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch \
+ 0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch \
+ 0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch
build.dir ${worksrcpath}/lib
build.cmd ./buildit
Modified: trunk/dports/lang/libcxx/files/0001-buildit-build-fix-for-Leopard.patch
===================================================================
--- trunk/dports/lang/libcxx/files/0001-buildit-build-fix-for-Leopard.patch 2015-01-17 05:49:11 UTC (rev 131749)
+++ trunk/dports/lang/libcxx/files/0001-buildit-build-fix-for-Leopard.patch 2015-01-17 06:29:27 UTC (rev 131750)
@@ -1,9 +1,10 @@
-From 357f51a5693be595909be627d11bfba20e3944cd Mon Sep 17 00:00:00 2001
+From bb8bfbe6e5de7d6cebe38a3af0b9a492336714a7 Mon Sep 17 00:00:00 2001
From: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
Date: Fri, 16 Jan 2015 21:10:35 -0800
-Subject: [PATCH 1/2] buildit build fix for Leopard
+Subject: [PATCH 1/4] buildit build fix for Leopard
Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+(cherry picked from commit 357f51a5693be595909be627d11bfba20e3944cd)
---
lib/buildit | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Modified: trunk/dports/lang/libcxx/files/0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch
===================================================================
--- trunk/dports/lang/libcxx/files/0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch 2015-01-17 05:49:11 UTC (rev 131749)
+++ trunk/dports/lang/libcxx/files/0002-buildit-Set-compatibility-version-to-RC_ProjectSourc.patch 2015-01-17 06:29:27 UTC (rev 131750)
@@ -1,10 +1,11 @@
-From f76bf5631a732baf296f331f1c6b6d6fb12b481a Mon Sep 17 00:00:00 2001
+From 6d6c5b2be37820441b003596800add52ad08b9bb Mon Sep 17 00:00:00 2001
From: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
Date: Fri, 16 Jan 2015 21:11:37 -0800
-Subject: [PATCH 2/2] buildit: Set compatibility version to
+Subject: [PATCH 2/4] buildit: Set compatibility version to
RC_ProjectSourceVersion
Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+(cherry picked from commit f76bf5631a732baf296f331f1c6b6d6fb12b481a)
---
lib/buildit | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
Added: trunk/dports/lang/libcxx/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch
===================================================================
--- trunk/dports/lang/libcxx/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch (rev 0)
+++ trunk/dports/lang/libcxx/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch 2015-01-17 06:29:27 UTC (rev 131750)
@@ -0,0 +1,46 @@
+From e34bdf68de77066864ff3238d89fd19e19b3b508 Mon Sep 17 00:00:00 2001
+From: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+Date: Fri, 16 Jan 2015 21:34:36 -0800
+Subject: [PATCH 3/4] Fix <local> and <iterator> when building with Lion and
+ newer Availability.h
+
+Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+(cherry picked from commit e8d7616436baea0e0ff1dd78bd8d8dfc5d367e44)
+---
+ include/iterator | 4 ++--
+ include/locale | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/iterator b/include/iterator
+index f338e01..64b52e7 100644
+--- a/include/iterator
++++ b/include/iterator
+@@ -913,8 +913,8 @@ public:
+ _LIBCPP_INLINE_VISIBILITY bool failed() const _NOEXCEPT {return __sbuf_ == 0;}
+
+ #if !defined(__APPLE__) || \
+- (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
+- (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
++ (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && defined(__MAC_10_8) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && defined(__IPHONE_6_0) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
+
+ template <class _Ch, class _Tr>
+ friend
+diff --git a/include/locale b/include/locale
+index fcff402..10d8ca9 100644
+--- a/include/locale
++++ b/include/locale
+@@ -1467,8 +1467,8 @@ __pad_and_output(_OutputIterator __s,
+ }
+
+ #if !defined(__APPLE__) || \
+- (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
+- (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
++ (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && defined(__MAC_10_8) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && defined(__IPHONE_6_0) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
+
+ template <class _CharT, class _Traits>
+ _LIBCPP_HIDDEN
+--
+2.2.2
+
Added: trunk/dports/lang/libcxx/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch
===================================================================
--- trunk/dports/lang/libcxx/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch (rev 0)
+++ trunk/dports/lang/libcxx/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch 2015-01-17 06:29:27 UTC (rev 131750)
@@ -0,0 +1,320 @@
+From bab1ec954fe3ca9d6733da96c2d1bf46bd9345e7 Mon Sep 17 00:00:00 2001
+From: David Fang <fang at csl.cornell.edu>
+Date: Wed, 15 Jan 2014 21:27:34 -0800
+Subject: [PATCH 4/4] implement atomic<> using mutex/lock_guard for 64b ops on
+ 32b PPC not pretty, not fast, but passes atomic tests
+
+(cherry picked from commit 8ec078a3bf34383cf8a0cbc6294ddf0da6bc3bed)
+
+Conflicts:
+ include/atomic
+---
+ include/__atomic_locked | 240 ++++++++++++++++++++++++++++++++++++++++++++++++
+ include/atomic | 46 ++++++++++
+ 2 files changed, 286 insertions(+)
+ create mode 100644 include/__atomic_locked
+
+diff --git a/include/__atomic_locked b/include/__atomic_locked
+new file mode 100644
+index 0000000..f10dd74
+--- /dev/null
++++ b/include/__atomic_locked
+@@ -0,0 +1,240 @@
++// -*- C++ -*-
++//===--------------------------- __atomic_locked --------------------------===//
++//
++// The LLVM Compiler Infrastructure
++//
++// This file is distributed under the University of Illinois Open Source
++// License. See LICENSE.TXT for details.
++//
++//===----------------------------------------------------------------------===//
++
++#ifndef _LIBCPP_ATOMIC_LOCKED
++#define _LIBCPP_ATOMIC_LOCKED
++
++#include <__mutex_base> // for mutex and lock_guard
++
++/**
++ This provides slow-but-usable lock-based atomic access to
++ structures for which atomic lock-free functions are missing.
++ This is motivated by the desire for 64b atomic operations
++ on 32b PowerPC architectures.
++**/
++
++#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
++#pragma GCC system_header
++#endif
++
++_LIBCPP_BEGIN_NAMESPACE_STD
++
++template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
++struct __atomic_mutex_locked // false
++{
++ mutable _Atomic(_Tp) __a_;
++ mutable mutex __lock_;
++ typedef lock_guard<mutex> lock_type;
++
++ _Tp& na(void) const { return reinterpret_cast<_Tp&>(__a_); }
++ volatile _Tp& na(void) const volatile { return reinterpret_cast<volatile _Tp&>(__a_); }
++
++ _LIBCPP_INLINE_VISIBILITY
++ bool is_lock_free() const volatile _NOEXCEPT
++ {return false;}
++ _LIBCPP_INLINE_VISIBILITY
++ bool is_lock_free() const _NOEXCEPT
++ {return false;}
++ _LIBCPP_INLINE_VISIBILITY
++ void store(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_)); na() = __d; }
++ _LIBCPP_INLINE_VISIBILITY
++ void store(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_); na() = __d; }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp load(memory_order = memory_order_seq_cst) const volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_)); return na(); }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp load(memory_order = memory_order_seq_cst) const _NOEXCEPT
++ { const lock_type g(__lock_); return na(); }
++ _LIBCPP_INLINE_VISIBILITY
++ operator _Tp() const volatile _NOEXCEPT {return load();}
++ _LIBCPP_INLINE_VISIBILITY
++ operator _Tp() const _NOEXCEPT {return load();}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ // or use std::swap
++ const _Tp ret = na(); na() = __d; return ret; }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ // or use std::swap
++ const _Tp ret = na(); na() = __d; return ret; }
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ if (na() == __e) { na() = __d; return true; }
++ else { __e = na(); return false; }
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) _NOEXCEPT
++ { const lock_type g(__lock_);
++ if (na() == __e) { na() = __d; return true; }
++ else { __e = na(); return false; }
++ }
++
++ // for now, _weak inditinguishable from _strong
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) volatile _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __s, __f);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __s, __f);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ {return compare_exchange_strong(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ {return compare_exchange_strong(__e, __d, __m, __m);}
++
++ _LIBCPP_INLINE_VISIBILITY
++#ifndef _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS
++ __atomic_mutex_locked() _NOEXCEPT = default;
++#else
++ __atomic_mutex_locked() _NOEXCEPT : __a_() {}
++#endif // _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS
++
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __a_(__d) {}
++#ifndef _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++ __atomic_mutex_locked(const __atomic_mutex_locked&) = delete;
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) = delete;
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile = delete;
++#else // _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++private:
++ __atomic_mutex_locked(const __atomic_mutex_locked&);
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&);
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile;
++#endif // _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++}; // end struct __atomic_mutex_locked
++
++// atomic<Integral>
++
++template <class _Tp>
++struct __atomic_mutex_locked<_Tp, true>
++ : public __atomic_mutex_locked<_Tp, false>
++{
++ typedef __atomic_mutex_locked<_Tp, false> __base;
++ typedef typename __base::lock_type lock_type;
++ using __base::__lock_;
++ using __base::na;
++
++ _LIBCPP_INLINE_VISIBILITY
++ __atomic_mutex_locked() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() += __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() += __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() -= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() -= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() &= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() &= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() |= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() |= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() ^= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() ^= __op; return ret;
++ }
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++(int) volatile _NOEXCEPT {return fetch_add(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++(int) _NOEXCEPT {return fetch_add(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--(int) volatile _NOEXCEPT {return fetch_sub(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--(int) _NOEXCEPT {return fetch_sub(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++() volatile _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++() _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--() volatile _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--() _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator+=(_Tp __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator+=(_Tp __op) _NOEXCEPT {return fetch_add(__op) + __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator-=(_Tp __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator-=(_Tp __op) _NOEXCEPT {return fetch_sub(__op) - __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator&=(_Tp __op) volatile _NOEXCEPT {return fetch_and(__op) & __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator&=(_Tp __op) _NOEXCEPT {return fetch_and(__op) & __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator|=(_Tp __op) volatile _NOEXCEPT {return fetch_or(__op) | __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator|=(_Tp __op) _NOEXCEPT {return fetch_or(__op) | __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator^=(_Tp __op) volatile _NOEXCEPT {return fetch_xor(__op) ^ __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;}
++};
++
++_LIBCPP_END_NAMESPACE_STD
++
++#endif // _LIBCPP_ATOMIC_LOCKED
+diff --git a/include/atomic b/include/atomic
+index f6ab1cb..e580315 100644
+--- a/include/atomic
++++ b/include/atomic
+@@ -1528,4 +1528,50 @@ typedef atomic<uintmax_t> atomic_uintmax_t;
+
+ _LIBCPP_END_NAMESPACE_STD
+
++#if defined(__ppc__) && !defined(__ppc64__)
++// specialize fallback implementation where 64b atomics are missing
++#include <__atomic_locked>
++
++_LIBCPP_BEGIN_NAMESPACE_STD
++
++template <>
++struct atomic<long long> : public __atomic_mutex_locked<long long>
++{
++ typedef long long _Tp;
++ typedef __atomic_mutex_locked<_Tp> __base;
++ _LIBCPP_INLINE_VISIBILITY
++ atomic() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) volatile _NOEXCEPT
++ {__base::store(__d); return __d;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) _NOEXCEPT
++ {__base::store(__d); return __d;}
++};
++
++template <>
++struct atomic<unsigned long long> :
++ public __atomic_mutex_locked<unsigned long long>
++{
++ typedef unsigned long long _Tp;
++ typedef __atomic_mutex_locked<_Tp> __base;
++ _LIBCPP_INLINE_VISIBILITY
++ atomic() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) volatile _NOEXCEPT
++ {__base::store(__d); return __d;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) _NOEXCEPT
++ {__base::store(__d); return __d;}
++};
++
++_LIBCPP_END_NAMESPACE_STD
++#endif // defined(__ppc__) && !defined(__ppc64__)
++
+ #endif // _LIBCPP_ATOMIC
+--
+2.2.2
+
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/macports-changes/attachments/20150116/cf4c6092/attachment-0001.html>
More information about the macports-changes
mailing list