diff options
author | Steve Arnold <stephen.arnold42@gmail.com> | 2016-04-20 20:16:29 -0700 |
---|---|---|
committer | Steve Arnold <stephen.arnold42@gmail.com> | 2016-04-20 20:17:38 -0700 |
commit | 4d5f7bbc3bdb0337d8e2ab6d8fb214def3b6651d (patch) | |
tree | ab506ece65cd07ec9a67f740351657c07d56aa1d /sys-devel | |
parent | x11-libs/libcryptui: remove stale temp fixes (diff) | |
download | arm-4d5f7bbc3bdb0337d8e2ab6d8fb214def3b6651d.tar.gz arm-4d5f7bbc3bdb0337d8e2ab6d8fb214def3b6651d.tar.bz2 arm-4d5f7bbc3bdb0337d8e2ab6d8fb214def3b6651d.zip |
sys-devel/gcc: add some recent fixes for arm/arm64 and sse
Diffstat (limited to 'sys-devel')
3 files changed, 1094 insertions, 3 deletions
diff --git a/sys-devel/gcc/files/5.3.0/gcc-5.3.0-local_atomic-dont-redefine-inline.patch b/sys-devel/gcc/files/5.3.0/gcc-5.3.0-local_atomic-dont-redefine-inline.patch new file mode 100644 index 0000000..ddefa4b --- /dev/null +++ b/sys-devel/gcc/files/5.3.0/gcc-5.3.0-local_atomic-dont-redefine-inline.patch @@ -0,0 +1,1062 @@ +--- trunk/libitm/local_atomic 2015/01/05 12:33:28 219188 ++++ trunk/libitm/local_atomic 2015/08/20 17:55:24 227040 +@@ -41,8 +41,7 @@ + #ifndef _GLIBCXX_ATOMIC + #define _GLIBCXX_ATOMIC 1 + +-#undef __always_inline +-#define __always_inline __attribute__((always_inline)) ++#define __libitm_always_inline __attribute__((always_inline)) + + // #pragma GCC system_header + +@@ -74,7 +73,7 @@ + memory_order_seq_cst + } memory_order; + +- inline __always_inline memory_order ++ inline __libitm_always_inline memory_order + __calculate_memory_order(memory_order __m) noexcept + { + const bool __cond1 = __m == memory_order_release; +@@ -84,13 +83,13 @@ + return __mo2; + } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_thread_fence(memory_order __m) noexcept + { + __atomic_thread_fence (__m); + } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_signal_fence(memory_order __m) noexcept + { + __atomic_thread_fence (__m); +@@ -280,19 +279,19 @@ + // Conversion to ATOMIC_FLAG_INIT. + atomic_flag(bool __i) noexcept : __atomic_flag_base({ __i }) { } + +- __always_inline bool ++ __libitm_always_inline bool + test_and_set(memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_test_and_set (&_M_i, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_test_and_set (&_M_i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + clear(memory_order __m = memory_order_seq_cst) noexcept + { + // __glibcxx_assert(__m != memory_order_consume); +@@ -302,7 +301,7 @@ + __atomic_clear (&_M_i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + clear(memory_order __m = memory_order_seq_cst) volatile noexcept + { + // __glibcxx_assert(__m != memory_order_consume); +@@ -455,7 +454,7 @@ + is_lock_free() const volatile noexcept + { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); } + +- __always_inline void ++ __libitm_always_inline void + store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept + { + // __glibcxx_assert(__m != memory_order_acquire); +@@ -465,7 +464,7 @@ + __atomic_store_n(&_M_i, __i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + store(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -476,7 +475,7 @@ + __atomic_store_n(&_M_i, __i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -485,7 +484,7 @@ + return __atomic_load_n(&_M_i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -494,21 +493,21 @@ + return __atomic_load_n(&_M_i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + exchange(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_exchange_n(&_M_i, __i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + exchange(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_exchange_n(&_M_i, __i, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m1, memory_order __m2) noexcept + { +@@ -519,7 +518,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -531,7 +530,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -539,7 +538,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -547,7 +546,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m1, memory_order __m2) noexcept + { +@@ -558,7 +557,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -570,7 +569,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -578,7 +577,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -586,52 +585,52 @@ + __calculate_memory_order(__m)); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_add(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_add(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_add(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_add(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_sub(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_sub(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_sub(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_sub(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_and(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_and(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_and(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_and(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_or(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_or(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_or(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_or(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_xor(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_xor(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_xor(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_xor(&_M_i, __i, __m); } +@@ -733,7 +732,7 @@ + is_lock_free() const volatile noexcept + { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -744,7 +743,7 @@ + __atomic_store_n(&_M_p, __p, __m); + } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -755,7 +754,7 @@ + __atomic_store_n(&_M_p, __p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -764,7 +763,7 @@ + return __atomic_load_n(&_M_p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -773,21 +772,21 @@ + return __atomic_load_n(&_M_p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_exchange_n(&_M_p, __p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_exchange_n(&_M_p, __p, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) noexcept +@@ -799,7 +798,7 @@ + return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -811,22 +810,22 @@ + return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_add(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_add(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_sub(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_sub(&_M_p, __d, __m); } +@@ -870,67 +869,67 @@ + bool + is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); } + +- __always_inline void ++ __libitm_always_inline void + store(bool __i, memory_order __m = memory_order_seq_cst) noexcept + { _M_base.store(__i, __m); } + +- __always_inline void ++ __libitm_always_inline void + store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept + { _M_base.store(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + load(memory_order __m = memory_order_seq_cst) const noexcept + { return _M_base.load(__m); } + +- __always_inline bool ++ __libitm_always_inline bool + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { return _M_base.load(__m); } + +- __always_inline bool ++ __libitm_always_inline bool + exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.exchange(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + exchange(bool __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.exchange(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m); } +@@ -980,11 +979,11 @@ + store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept + { __atomic_store(&_M_i, &__i, _m); } + +- __always_inline void ++ __libitm_always_inline void + store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept + { __atomic_store(&_M_i, &__i, _m); } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + load(memory_order _m = memory_order_seq_cst) const noexcept + { + _Tp tmp; +@@ -992,7 +991,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + load(memory_order _m = memory_order_seq_cst) const volatile noexcept + { + _Tp tmp; +@@ -1000,7 +999,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + exchange(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept + { + _Tp tmp; +@@ -1008,7 +1007,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + exchange(_Tp __i, + memory_order _m = memory_order_seq_cst) volatile noexcept + { +@@ -1017,50 +1016,50 @@ + return tmp; + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) volatile noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) noexcept + { return compare_exchange_weak(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return compare_exchange_weak(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) volatile noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) noexcept + { return compare_exchange_strong(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return compare_exchange_strong(__e, __i, __m, __m); } +@@ -1153,46 +1152,46 @@ + is_lock_free() const volatile noexcept + { return _M_b.is_lock_free(); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.store(__p, __m); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.store(__p, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { return _M_b.load(__m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { return _M_b.load(__m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.exchange(__p, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.exchange(__p, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -1200,7 +1199,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -1208,18 +1207,18 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -1227,7 +1226,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -1235,22 +1234,22 @@ + __calculate_memory_order(__m)); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.fetch_add(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.fetch_add(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.fetch_sub(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.fetch_sub(__d, __m); } +@@ -1544,98 +1543,98 @@ + + + // Function definitions, atomic_flag operations. +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set_explicit(atomic_flag* __a, + memory_order __m) noexcept + { return __a->test_and_set(__m); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, + memory_order __m) noexcept + { return __a->test_and_set(__m); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept + { __a->clear(__m); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear_explicit(volatile atomic_flag* __a, + memory_order __m) noexcept + { __a->clear(__m); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set(atomic_flag* __a) noexcept + { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept + { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear(atomic_flag* __a) noexcept + { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear(volatile atomic_flag* __a) noexcept + { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } + + + // Function templates generally applicable to atomic types. + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_is_lock_free(const atomic<_ITp>* __a) noexcept + { return __a->is_lock_free(); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept + { return __a->is_lock_free(); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept; + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept; + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { __a->store(__i, __m); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { __a->store(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept + { return __a->load(__m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load_explicit(const volatile atomic<_ITp>* __a, + memory_order __m) noexcept + { return __a->load(__m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->exchange(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->exchange(__i, __m); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1643,7 +1642,7 @@ + { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1651,7 +1650,7 @@ + { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1659,7 +1658,7 @@ + { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1668,37 +1667,37 @@ + + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept + { atomic_store_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept + { atomic_store_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load(const atomic<_ITp>* __a) noexcept + { return atomic_load_explicit(__a, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load(const volatile atomic<_ITp>* __a) noexcept + { return atomic_load_explicit(__a, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept + { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept + { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1708,7 +1707,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1718,7 +1717,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1728,7 +1727,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1742,158 +1741,158 @@ + // intergral types as specified in the standard, excluding address + // types. + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_add(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_add(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_sub(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_sub(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_and(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_and(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_or(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_or(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_xor(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_xor(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } + + + // Partial specializations for pointers. + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_add(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_add(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_add(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_add(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a, + ptrdiff_t __d, memory_order __m) noexcept + { return __a->fetch_sub(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_sub(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_sub(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_sub(__d); } + // @} group atomics diff --git a/sys-devel/gcc/files/5.3.0/gcc-5.3.0-save_require_frame-pointer_when_stack_misaligned.patch b/sys-devel/gcc/files/5.3.0/gcc-5.3.0-save_require_frame-pointer_when_stack_misaligned.patch new file mode 100644 index 0000000..3343d8c --- /dev/null +++ b/sys-devel/gcc/files/5.3.0/gcc-5.3.0-save_require_frame-pointer_when_stack_misaligned.patch @@ -0,0 +1,22 @@ +--- a/gcc/config/i386/i386.c 2016/01/18 16:18:49 232527 ++++ b/gcc/config/i386/i386.c 2016/01/18 16:19:53 232528 +@@ -9690,6 +9690,10 @@ + if (TARGET_64BIT_MS_ABI && get_frame_size () > SEH_MAX_FRAME_SIZE) + return true; + ++ /* SSE saves require frame-pointer when stack is misaligned. */ ++ if (TARGET_64BIT_MS_ABI && ix86_incoming_stack_boundary < 128) ++ return true; ++ + /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER + turns off the frame pointer by default. Turn it back on now if + we've not got a leaf function. */ +@@ -13065,6 +13065,8 @@ + m->fs.fp_valid = true; + } + ++ m->fs.sp_valid = !frame_pointer_needed; ++ + if (!int_registers_saved) + ix86_emit_save_regs_using_mov (frame.reg_save_offset); + if (!sse_registers_saved) diff --git a/sys-devel/gcc/gcc-5.3.0.ebuild b/sys-devel/gcc/gcc-5.3.0.ebuild index 3225928..e8fd2d4 100644 --- a/sys-devel/gcc/gcc-5.3.0.ebuild +++ b/sys-devel/gcc/gcc-5.3.0.ebuild @@ -1,4 +1,4 @@ -# Copyright 1999-2015 Gentoo Foundation +# Copyright 1999-2016 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Id$ @@ -40,9 +40,16 @@ src_prepare() { ewarn "Please rebuild gcc after upgrading to >=glibc-2.12 #362315" EPATCH_EXCLUDE+=" 10_all_default-fortify-source.patch" fi - is_crosscompile && EPATCH_EXCLUDE+=" 05_all_gcc-spec-env.patch" + if is_crosscompile ; then + EPATCH_EXCLUDE+=" 05_all_gcc-spec-env.patch" + fi + + # add some arm/arm64 goodness + epatch "${FILESDIR}"/${PV}/${P}-arm-neon-array-size.patch + epatch "${FILESDIR}"/${PV}/${P}-local_atomic-dont-redefine-inline.patch - [[ ${ARCH} == arm* ]] && epatch "${FILESDIR}"/${PV}/${P}-arm-neon-array-size.patch + # add upstream alignment patch + epatch "${FILESDIR}"/${PV}/${P}-save_require_frame-pointer_when_stack_misaligned.patch toolchain_src_prepare } |