[131761] trunk/dports/lang/llvm-3.7
jeremyhu at macports.org
jeremyhu at macports.org
Sat Jan 17 15:47:53 PST 2015
Revision: 131761
https://trac.macports.org/changeset/131761
Author: jeremyhu at macports.org
Date: 2015-01-17 15:47:53 -0800 (Sat, 17 Jan 2015)
Log Message:
-----------
llvm-3.7: Commit missing patches
Modified Paths:
--------------
trunk/dports/lang/llvm-3.7/Portfile
Added Paths:
-----------
trunk/dports/lang/llvm-3.7/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch
trunk/dports/lang/llvm-3.7/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch
Modified: trunk/dports/lang/llvm-3.7/Portfile
===================================================================
--- trunk/dports/lang/llvm-3.7/Portfile 2015-01-17 20:10:21 UTC (rev 131760)
+++ trunk/dports/lang/llvm-3.7/Portfile 2015-01-17 23:47:53 UTC (rev 131761)
@@ -69,6 +69,7 @@
set libcxx_rev ${svn.revision}
set clang-modernize_rev ${svn.revision}
version ${llvm_version}-r${svn.revision}
+revision 0
worksrcdir trunk
svn.url https://llvm.org/svn/llvm-project/llvm/trunk
#worksrcdir release_${llvm_version_no_dot}
Added: trunk/dports/lang/llvm-3.7/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch
===================================================================
--- trunk/dports/lang/llvm-3.7/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch (rev 0)
+++ trunk/dports/lang/llvm-3.7/files/0003-Fix-local-and-iterator-when-building-with-Lion-and-n.patch 2015-01-17 23:47:53 UTC (rev 131761)
@@ -0,0 +1,46 @@
+From 5d209f3a5e92a27b5334b77ace93d7bb66aaef80 Mon Sep 17 00:00:00 2001
+From: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+Date: Fri, 16 Jan 2015 21:34:36 -0800
+Subject: [PATCH 3/4] Fix <local> and <iterator> when building with Lion and
+ newer Availability.h
+
+Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu at apple.com>
+(cherry picked from commit e8d7616436baea0e0ff1dd78bd8d8dfc5d367e44)
+---
+ include/iterator | 4 ++--
+ include/locale | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/iterator b/include/iterator
+index bcf142a..0b90d96 100644
+--- a/projects/libcxx/include/iterator
++++ b/projects/libcxx/include/iterator
+@@ -920,8 +920,8 @@ public:
+ _LIBCPP_INLINE_VISIBILITY bool failed() const _NOEXCEPT {return __sbuf_ == 0;}
+
+ #if !defined(__APPLE__) || \
+- (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
+- (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
++ (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && defined(__MAC_10_8) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && defined(__IPHONE_6_0) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
+
+ template <class _Ch, class _Tr>
+ friend
+diff --git a/include/locale b/include/locale
+index 0d01002..789b083 100644
+--- a/projects/libcxx/include/locale
++++ b/projects/libcxx/include/locale
+@@ -1473,8 +1473,8 @@ __pad_and_output(_OutputIterator __s,
+ }
+
+ #if !defined(__APPLE__) || \
+- (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
+- (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
++ (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && defined(__MAC_10_8) && __MAC_OS_X_VERSION_MIN_REQUIRED > __MAC_10_8) || \
++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && defined(__IPHONE_6_0) && __IPHONE_OS_VERSION_MIN_REQUIRED > __IPHONE_6_0)
+
+ template <class _CharT, class _Traits>
+ _LIBCPP_HIDDEN
+--
+2.2.2
+
Added: trunk/dports/lang/llvm-3.7/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch
===================================================================
--- trunk/dports/lang/llvm-3.7/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch (rev 0)
+++ trunk/dports/lang/llvm-3.7/files/0004-implement-atomic-using-mutex-lock_guard-for-64b-ops-.patch 2015-01-17 23:47:53 UTC (rev 131761)
@@ -0,0 +1,318 @@
+From 9f0fbe0ed8c421311713b2df58a6b6a121fa88b7 Mon Sep 17 00:00:00 2001
+From: David Fang <fang at csl.cornell.edu>
+Date: Wed, 15 Jan 2014 21:27:34 -0800
+Subject: [PATCH 4/4] implement atomic<> using mutex/lock_guard for 64b ops on
+ 32b PPC not pretty, not fast, but passes atomic tests
+
+(cherry picked from commit 8ec078a3bf34383cf8a0cbc6294ddf0da6bc3bed)
+---
+ include/__atomic_locked | 240 ++++++++++++++++++++++++++++++++++++++++++++++++
+ include/atomic | 45 +++++++++
+ 2 files changed, 285 insertions(+)
+ create mode 100644 include/__atomic_locked
+
+diff --git a/include/__atomic_locked b/include/__atomic_locked
+new file mode 100644
+index 0000000..f10dd74
+--- /dev/null
++++ b/projects/libcxx/include/__atomic_locked
+@@ -0,0 +1,240 @@
++// -*- C++ -*-
++//===--------------------------- __atomic_locked --------------------------===//
++//
++// The LLVM Compiler Infrastructure
++//
++// This file is distributed under the University of Illinois Open Source
++// License. See LICENSE.TXT for details.
++//
++//===----------------------------------------------------------------------===//
++
++#ifndef _LIBCPP_ATOMIC_LOCKED
++#define _LIBCPP_ATOMIC_LOCKED
++
++#include <__mutex_base> // for mutex and lock_guard
++
++/**
++ This provides slow-but-usable lock-based atomic access to
++ structures for which atomic lock-free functions are missing.
++ This is motivated by the desire for 64b atomic operations
++ on 32b PowerPC architectures.
++**/
++
++#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
++#pragma GCC system_header
++#endif
++
++_LIBCPP_BEGIN_NAMESPACE_STD
++
++template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
++struct __atomic_mutex_locked // false
++{
++ mutable _Atomic(_Tp) __a_;
++ mutable mutex __lock_;
++ typedef lock_guard<mutex> lock_type;
++
++ _Tp& na(void) const { return reinterpret_cast<_Tp&>(__a_); }
++ volatile _Tp& na(void) const volatile { return reinterpret_cast<volatile _Tp&>(__a_); }
++
++ _LIBCPP_INLINE_VISIBILITY
++ bool is_lock_free() const volatile _NOEXCEPT
++ {return false;}
++ _LIBCPP_INLINE_VISIBILITY
++ bool is_lock_free() const _NOEXCEPT
++ {return false;}
++ _LIBCPP_INLINE_VISIBILITY
++ void store(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_)); na() = __d; }
++ _LIBCPP_INLINE_VISIBILITY
++ void store(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_); na() = __d; }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp load(memory_order = memory_order_seq_cst) const volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_)); return na(); }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp load(memory_order = memory_order_seq_cst) const _NOEXCEPT
++ { const lock_type g(__lock_); return na(); }
++ _LIBCPP_INLINE_VISIBILITY
++ operator _Tp() const volatile _NOEXCEPT {return load();}
++ _LIBCPP_INLINE_VISIBILITY
++ operator _Tp() const _NOEXCEPT {return load();}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ // or use std::swap
++ const _Tp ret = na(); na() = __d; return ret; }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ // or use std::swap
++ const _Tp ret = na(); na() = __d; return ret; }
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ if (na() == __e) { na() = __d; return true; }
++ else { __e = na(); return false; }
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) _NOEXCEPT
++ { const lock_type g(__lock_);
++ if (na() == __e) { na() = __d; return true; }
++ else { __e = na(); return false; }
++ }
++
++ // for now, _weak inditinguishable from _strong
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) volatile _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __s, __f);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __s, memory_order __f) _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __s, __f);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_weak(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ {return compare_exchange_weak(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ {return compare_exchange_strong(__e, __d, __m, __m);}
++ _LIBCPP_INLINE_VISIBILITY
++ bool compare_exchange_strong(_Tp& __e, _Tp __d,
++ memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ {return compare_exchange_strong(__e, __d, __m, __m);}
++
++ _LIBCPP_INLINE_VISIBILITY
++#ifndef _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS
++ __atomic_mutex_locked() _NOEXCEPT = default;
++#else
++ __atomic_mutex_locked() _NOEXCEPT : __a_() {}
++#endif // _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS
++
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __a_(__d) {}
++#ifndef _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++ __atomic_mutex_locked(const __atomic_mutex_locked&) = delete;
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) = delete;
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile = delete;
++#else // _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++private:
++ __atomic_mutex_locked(const __atomic_mutex_locked&);
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&);
++ __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile;
++#endif // _LIBCPP_HAS_NO_DELETED_FUNCTIONS
++}; // end struct __atomic_mutex_locked
++
++// atomic<Integral>
++
++template <class _Tp>
++struct __atomic_mutex_locked<_Tp, true>
++ : public __atomic_mutex_locked<_Tp, false>
++{
++ typedef __atomic_mutex_locked<_Tp, false> __base;
++ typedef typename __base::lock_type lock_type;
++ using __base::__lock_;
++ using __base::na;
++
++ _LIBCPP_INLINE_VISIBILITY
++ __atomic_mutex_locked() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() += __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() += __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() -= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() -= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() &= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() &= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() |= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() |= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
++ { const lock_type g(const_cast<mutex&>(__lock_));
++ const _Tp ret = na(); na() ^= __op; return ret;
++ }
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
++ { const lock_type g(__lock_);
++ const _Tp ret = na(); na() ^= __op; return ret;
++ }
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++(int) volatile _NOEXCEPT {return fetch_add(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++(int) _NOEXCEPT {return fetch_add(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--(int) volatile _NOEXCEPT {return fetch_sub(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--(int) _NOEXCEPT {return fetch_sub(_Tp(1));}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++() volatile _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator++() _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--() volatile _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator--() _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator+=(_Tp __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator+=(_Tp __op) _NOEXCEPT {return fetch_add(__op) + __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator-=(_Tp __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator-=(_Tp __op) _NOEXCEPT {return fetch_sub(__op) - __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator&=(_Tp __op) volatile _NOEXCEPT {return fetch_and(__op) & __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator&=(_Tp __op) _NOEXCEPT {return fetch_and(__op) & __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator|=(_Tp __op) volatile _NOEXCEPT {return fetch_or(__op) | __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator|=(_Tp __op) _NOEXCEPT {return fetch_or(__op) | __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator^=(_Tp __op) volatile _NOEXCEPT {return fetch_xor(__op) ^ __op;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;}
++};
++
++_LIBCPP_END_NAMESPACE_STD
++
++#endif // _LIBCPP_ATOMIC_LOCKED
+diff --git a/include/atomic b/include/atomic
+index 0427a91..7710cd1 100644
+--- a/projects/libcxx/include/atomic
++++ b/projects/libcxx/include/atomic
+@@ -1793,6 +1793,51 @@ typedef atomic<uintmax_t> atomic_uintmax_t;
+
+ _LIBCPP_END_NAMESPACE_STD
+
++#if defined(__ppc__) && !defined(__ppc64__)
++// specialize fallback implementation where 64b atomics are missing
++#include <__atomic_locked>
++
++_LIBCPP_BEGIN_NAMESPACE_STD
++
++template <>
++struct atomic<long long> : public __atomic_mutex_locked<long long>
++{
++ typedef long long _Tp;
++ typedef __atomic_mutex_locked<_Tp> __base;
++ _LIBCPP_INLINE_VISIBILITY
++ atomic() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) volatile _NOEXCEPT
++ {__base::store(__d); return __d;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) _NOEXCEPT
++ {__base::store(__d); return __d;}
++};
++
++template <>
++struct atomic<unsigned long long> :
++ public __atomic_mutex_locked<unsigned long long>
++{
++ typedef unsigned long long _Tp;
++ typedef __atomic_mutex_locked<_Tp> __base;
++ _LIBCPP_INLINE_VISIBILITY
++ atomic() _NOEXCEPT _LIBCPP_DEFAULT
++ _LIBCPP_INLINE_VISIBILITY
++ _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {}
++
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) volatile _NOEXCEPT
++ {__base::store(__d); return __d;}
++ _LIBCPP_INLINE_VISIBILITY
++ _Tp operator=(_Tp __d) _NOEXCEPT
++ {__base::store(__d); return __d;}
++};
++
++_LIBCPP_END_NAMESPACE_STD
++#endif // defined(__ppc__) && !defined(__ppc64__)
+ #endif // !_LIBCPP_HAS_NO_THREADS
+
+ #endif // _LIBCPP_ATOMIC
+--
+2.2.2
+
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.macosforge.org/pipermail/macports-changes/attachments/20150117/58a54005/attachment.html>
More information about the macports-changes
mailing list