diff --git a/boost.spec b/boost.spec index 7421a2c23266a77531c35e0a91cc007290ceeccf..9846098dbb0adf15716d9f32a7ebea572668df96 100644 --- a/boost.spec +++ b/boost.spec @@ -1,4 +1,4 @@ -%define anolis_release .0.1 +%define anolis_release .0.2 # Support for documentation installation As the %%doc macro erases the # target directory ($RPM_BUILD_ROOT%%{_docdir}/%%{name}), manually # installed documentation must be saved into a temporary dedicated @@ -165,6 +165,7 @@ Patch1006: 1006-Name-the-empty_value-template-parameters.patch Patch1007: 1007-Improve-C++11-allocator-support.patch Patch1008: 1008-Use-boost::allocator_rebind-instead-of-A::template-rebind.patch Patch1009: 1009-Implement-allocator-access-utilities.patch +Patch1010: boost_1_66_0-sw.patch # End Anolis customized patches %bcond_with tests @@ -683,6 +684,7 @@ find ./boost -name '*.hpp' -perm /111 | xargs chmod a-x %patch1007 -p2 %patch1008 -p2 %patch1009 -p2 +%patch1010 -p1 %build PYTHON3_ABIFLAGS=$(/usr/bin/python3-config --abiflags) @@ -1342,6 +1344,9 @@ fi %{_mandir}/man1/bjam.1* %changelog +* Thu Apr 18 2024 wxiat - 1.66.0-13.0.2 +- add sw patch + * Wed Jul 05 2023 Liwei Ge - 1.66.0-13.0.1 - backport patches to fit with C++20 until upstream chase up: * Replace-std::allocate-with-std::allocator_traits.patch diff --git a/boost_1_66_0-sw.patch b/boost_1_66_0-sw.patch new file mode 100644 index 0000000000000000000000000000000000000000..9e1a4d5d8e293321d0267cead47fa367b015a68d --- /dev/null +++ b/boost_1_66_0-sw.patch @@ -0,0 +1,1805 @@ +diff -uNar boost_1_66_0.org/boost/atomic/detail/caps_gcc_sw_64.hpp boost_1_66_0.new/boost/atomic/detail/caps_gcc_sw_64.hpp +--- boost_1_66_0.org/boost/atomic/detail/caps_gcc_sw_64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/boost/atomic/detail/caps_gcc_sw_64.hpp 2024-04-18 13:53:40.552733880 +0800 +@@ -0,0 +1,34 @@ ++/* ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or copy at ++ * http://www.boost.org/LICENSE_1_0.txt) ++ * ++ * Copyright (c) 2009 Helge Bahmann ++ * Copyright (c) 2013 Tim Blechmann ++ * Copyright (c) 2014 Andrey Semashev ++ */ ++/*! ++ * \file atomic/detail/caps_gcc_sw_64.hpp ++ * ++ * This header defines feature capabilities macros ++ */ ++ ++#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SW_64_HPP_INCLUDED_ ++#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SW_64_HPP_INCLUDED_ ++ ++#include ++ ++#ifdef BOOST_HAS_PRAGMA_ONCE ++#pragma once ++#endif ++ ++#define BOOST_ATOMIC_INT8_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT16_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT32_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT64_LOCK_FREE 2 ++#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 ++ ++#define BOOST_ATOMIC_THREAD_FENCE 2 ++#define BOOST_ATOMIC_SIGNAL_FENCE 2 ++ ++#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SW_64_HPP_INCLUDED_ +diff -uNar boost_1_66_0.org/boost/atomic/detail/ops_gcc_sw_64.hpp boost_1_66_0.new/boost/atomic/detail/ops_gcc_sw_64.hpp +--- boost_1_66_0.org/boost/atomic/detail/ops_gcc_sw_64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/boost/atomic/detail/ops_gcc_sw_64.hpp 2024-04-18 15:10:32.172733880 +0800 +@@ -0,0 +1,1038 @@ ++/* ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or copy at ++ * http://www.boost.org/LICENSE_1_0.txt) ++ * ++ * Copyright (c) 2009 Helge Bahmann ++ * Copyright (c) 2013 Tim Blechmann ++ * Copyright (c) 2014 Andrey Semashev ++ */ ++/*! ++ * \file atomic/detail/ops_gcc_sw_64.hpp ++ * ++ * This header contains implementation of the \c operations template. ++ */ ++ ++#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SW_64_HPP_INCLUDED_ ++#define BOOST_ATOMIC_DETAIL_OPS_GCC_SW_64_HPP_INCLUDED_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef BOOST_HAS_PRAGMA_ONCE ++#pragma once ++#endif ++ ++namespace boost { ++namespace atomics { ++namespace detail { ++ ++/* ++ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html ++ (HP OpenVMS systems documentation) and the Sw_64 Architecture Reference Manual. ++ */ ++ ++/* ++ NB: The most natural thing would be to write the increment/decrement ++ operators along the following lines: ++ ++ __asm__ __volatile__ ++ ( ++ "1: ldl_l %0,%1 \n" ++ "addl %0,1,%0 \n" ++ "stl_c %0,%1 \n" ++ "beq %0,1b\n" ++ : "=&b" (tmp) ++ : "m" (value) ++ : "cc" ++ ); ++ ++ However according to the comments on the HP website and matching ++ comments in the Linux kernel sources this defies branch prediction, ++ as the cpu assumes that backward branches are always taken; so ++ instead copy the trick from the Linux kernel, introduce a forward ++ branch and back again. ++ ++ I have, however, had a hard time measuring the difference between ++ the two versions in microbenchmarks -- I am leaving it in nevertheless ++ as it apparently does not hurt either. ++*/ ++ ++struct gcc_sw_64_operations_base ++{ ++ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false; ++ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true; ++ ++ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT ++ { ++ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++ ++ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT ++ { ++ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++ ++ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT ++ { ++ if (order == memory_order_seq_cst) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++}; ++ ++ ++template< bool Signed > ++struct operations< 4u, Signed > : ++ public gcc_sw_64_operations_base ++{ ++ typedef typename storage_traits< 4u >::type storage_type; ++ ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u; ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u; ++ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed; ++ ++ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ fence_before(order); ++ storage = v; ++ fence_after_store(order); ++ } ++ ++ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type v = storage; ++ fence_after(order); ++ return v; ++ } ++ ++ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, tmp; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "mov %5, %1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_weak( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ fence_before(success_order); ++ int success; ++ storage_type current; ++ storage_type tmp1,tmp2; ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %4,%6\n\t" ++ "lldw %2, 0(%4)\n\t" // current = *(&storage) ++ "cmpeq %2, %0, %5\n\t" // success = current == expected ++ "wr_f %5\n\t" // success = current == expected ++ "mov %2, %0\n\t" // expected = current ++ "lstw %1, 0(%4)\n\t" // storage = desired; desired = store succeeded ++ "rd_f %1\n\t" // storage = desired; desired = store succeeded ++ "beq %5, 2f\n\t" // if (success == 0) goto end ++ "mov %1, %3\n\t" // success = desired ++ "2:\n\t" ++ : "+r" (expected), // %0 ++ "+r" (desired), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage) // %6 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_strong( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ int success; ++ storage_type current, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(success_order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %4,%6\n\t" ++ "mov %7, %1\n\t" // tmp = desired ++ "lldw %2, 0(%4)\n\t" // current = *(&storage) ++ "cmpeq %2, %0, %5\n\t" // success = current == expected ++ "wr_f %5\n\t" // success = current == expected ++ "mov %2, %0\n\t" // expected = current ++ "lstw %1, 0(%4)\n\t" // storage = tmp; tmp = store succeeded ++ "rd_f %1\n\t" // storage = tmp; tmp = store succeeded ++ "beq %5, 2f\n\t" // if (success == 0) goto end ++ "beq %1, 3f\n\t" // if (tmp == 0) goto retry ++ "mov %1, %3\n\t" // success = tmp ++ "2:\n\t" ++ ++ ".subsection 2\n\t" ++ "3: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "+r" (expected), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage), // %6 ++ "r" (desired) // %7 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "addw %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "subw %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "and %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "bis %0, %5, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "xor %0, %5, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ return !!exchange(storage, (storage_type)1, order); ++ } ++ ++ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ store(storage, 0, order); ++ } ++}; ++ ++ ++template< > ++struct operations< 1u, false > : ++ public operations< 4u, false > ++{ ++ typedef operations< 4u, false > base_type; ++ typedef base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "zapnot %1, #1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "zapnot %1, #1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++}; ++ ++template< > ++struct operations< 1u, true > : ++ public operations< 4u, true > ++{ ++ typedef operations< 4u, true > base_type; ++ typedef base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "sextb %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "sextb %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++}; ++ ++ ++template< > ++struct operations< 2u, false > : ++ public operations< 4u, false > ++{ ++ typedef operations< 4u, false > base_type; ++ typedef base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "zapnot %1, #3, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "zapnot %1, #3, %1\n" ++ "lstw %1, %2\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++}; ++ ++template< > ++struct operations< 2u, true > : ++ public operations< 4u, true > ++{ ++ typedef operations< 4u, true > base_type; ++ typedef base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "sexth %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "sexth %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++}; ++ ++ ++template< bool Signed > ++struct operations< 8u, Signed > : ++ public gcc_sw_64_operations_base ++{ ++ typedef typename storage_traits< 8u >::type storage_type; ++ ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u; ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u; ++ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed; ++ ++ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ fence_before(order); ++ storage = v; ++ fence_after_store(order); ++ } ++ ++ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type v = storage; ++ fence_after(order); ++ return v; ++ } ++ ++ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "mov %5, %1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_weak( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ fence_before(success_order); ++ int success; ++ storage_type current; ++ storage_type tmp1,tmp2; ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %4,%6\n" ++ "lldl %2, 0(%4)\n" // current = *(&storage) ++ "cmpeq %2, %0, %5\n" // success = current == expected ++ "wr_f %5 \n" ++ "mov %2, %0\n" // expected = current ++ "lstl %1, 0(%4)\n" // storage = desired; desired = store succeeded ++ "rd_f %1 \n" ++ "beq %5, 2f\n" // if (success == 0) goto end ++ "mov %1, %3\n" // success = desired ++ "2:\n\t" ++ : "+r" (expected), // %0 ++ "+r" (desired), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage) // %6 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_strong( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ int success; ++ storage_type current, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(success_order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %4,%6\n" ++ "mov %7, %1\n" // tmp = desired ++ "lldl %2, 0(%4)\n" // current = *(&storage) ++ "cmpeq %2, %0, %5\n" // success = current == expected ++ "wr_f %5 \n" ++ "mov %2, %0\n" // expected = current ++ "lstl %1, 0(%4)\n" // storage = tmp; tmp = store succeeded ++ "rd_f %1 \n" ++ "beq %5, 2f\n" // if (success == 0) goto end ++ "beq %1, 3f\n" // if (tmp == 0) goto retry ++ "mov %1, %3\n" // success = tmp ++ "2:\n\t" ++ ++ ".subsection 2\n\t" ++ "3: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "+r" (expected), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage), // %6 ++ "r" (desired) // %7 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addl %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subl %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "and %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "bis %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "xor %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ return !!exchange(storage, (storage_type)1, order); ++ } ++ ++ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ store(storage, (storage_type)0, order); ++ } ++}; ++ ++ ++BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT ++{ ++ if (order != memory_order_relaxed) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++} ++ ++BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT ++{ ++ if (order != memory_order_relaxed) ++ __asm__ __volatile__ ("" ::: "memory"); ++} ++ ++} // namespace detail ++} // namespace atomics ++} // namespace boost ++ ++#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_SW_64_HPP_INCLUDED_ +diff -uNar boost_1_66_0.org/boost/atomic/detail/platform.hpp boost_1_66_0.new/boost/atomic/detail/platform.hpp +--- boost_1_66_0.org/boost/atomic/detail/platform.hpp 2017-12-14 07:56:41.000000000 +0800 ++++ boost_1_66_0.new/boost/atomic/detail/platform.hpp 2024-04-18 13:53:40.562733880 +0800 +@@ -68,6 +68,10 @@ + + #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sparc + ++#elif defined(__GNUC__) && defined(__sw_64__) ++ ++#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sw_64 ++ + #elif defined(__GNUC__) && defined(__alpha__) + + #define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha +diff -uNar boost_1_66_0.org/boost/numeric/interval/detail/sw_64_rounding_control.hpp boost_1_66_0.new/boost/numeric/interval/detail/sw_64_rounding_control.hpp +--- boost_1_66_0.org/boost/numeric/interval/detail/sw_64_rounding_control.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/boost/numeric/interval/detail/sw_64_rounding_control.hpp 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,113 @@ ++/* Boost interval/detail/sw_64_rounding_control.hpp file ++ * ++ * Copyright 2005 Felix Höfling, Guillaume Melquiond ++ * ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or ++ * copy at http://www.boost.org/LICENSE_1_0.txt) ++ */ ++ ++#ifndef BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP ++#define BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP ++ ++#if !defined(sw_64) && !defined(__sw_64__) ++#error This header only works on Sw_64 CPUs. ++#endif ++ ++#if defined(__GNUC__) || defined(__digital__) || defined(__DECCXX) ++ ++#include // write_rnd() and read_rnd() ++ ++namespace boost { ++namespace numeric { ++namespace interval_lib { ++ ++namespace detail { ++#if defined(__GNUC__ ) ++ typedef union { ++ ::boost::long_long_type imode; ++ double dmode; ++ } rounding_mode_struct; ++ ++ // set bits 59-58 (DYN), ++ // clear all exception bits and disable overflow (51) and inexact exceptions (62) ++ static const rounding_mode_struct mode_upward = { 0x4C08000000000000LL }; ++ static const rounding_mode_struct mode_downward = { 0x4408000000000000LL }; ++ static const rounding_mode_struct mode_to_nearest = { 0x4808000000000000LL }; ++ static const rounding_mode_struct mode_toward_zero = { 0x4008000000000000LL }; ++ ++ struct sw_64_rounding_control ++ { ++ typedef double rounding_mode; ++ ++ static void set_rounding_mode(const rounding_mode mode) ++ { __asm__ __volatile__ ("wfpcr %0" : : "f"(mode)); } ++ ++ static void get_rounding_mode(rounding_mode& mode) ++ { __asm__ __volatile__ ("rfpcr %0" : "=f"(mode)); } ++ ++ static void downward() { set_rounding_mode(mode_downward.dmode); } ++ static void upward() { set_rounding_mode(mode_upward.dmode); } ++ static void to_nearest() { set_rounding_mode(mode_to_nearest.dmode); } ++ static void toward_zero() { set_rounding_mode(mode_toward_zero.dmode); } ++ }; ++#elif defined(__digital__) || defined(__DECCXX) ++ ++#if defined(__DECCXX) && !(defined(__FLT_ROUNDS) && __FLT_ROUNDS == -1) ++#error Dynamic rounding mode not enabled. See cxx man page for details. ++#endif ++ ++ struct sw_64_rounding_control ++ { ++ typedef unsigned int rounding_mode; ++ ++ static void set_rounding_mode(const rounding_mode& mode) { write_rnd(mode); } ++ static void get_rounding_mode(rounding_mode& mode) { mode = read_rnd(); } ++ ++ static void downward() { set_rounding_mode(FP_RND_RM); } ++ static void upward() { set_rounding_mode(FP_RND_RP); } ++ static void to_nearest() { set_rounding_mode(FP_RND_RN); } ++ static void toward_zero() { set_rounding_mode(FP_RND_RZ); } ++ }; ++#endif ++} // namespace detail ++ ++extern "C" { ++ float rintf(float); ++ double rint(double); ++ long double rintl(long double); ++} ++ ++template<> ++struct rounding_control: ++ detail::sw_64_rounding_control ++{ ++ static float force_rounding(const float r) ++ { volatile float _r = r; return _r; } ++ static float to_int(const float& x) { return rintf(x); } ++}; ++ ++template<> ++struct rounding_control: ++ detail::sw_64_rounding_control ++{ ++ static const double & force_rounding(const double& r) { return r; } ++ static double to_int(const double& r) { return rint(r); } ++}; ++ ++template<> ++struct rounding_control: ++ detail::sw_64_rounding_control ++{ ++ static const long double & force_rounding(const long double& r) { return r; } ++ static long double to_int(const long double& r) { return rintl(r); } ++}; ++ ++} // namespace interval_lib ++} // namespace numeric ++} // namespace boost ++ ++#undef BOOST_NUMERIC_INTERVAL_NO_HARDWARE ++#endif ++ ++#endif /* BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP */ +diff -uNar boost_1_66_0.org/boost/numeric/interval/hw_rounding.hpp boost_1_66_0.new/boost/numeric/interval/hw_rounding.hpp +--- boost_1_66_0.org/boost/numeric/interval/hw_rounding.hpp 2017-12-14 07:56:46.000000000 +0800 ++++ boost_1_66_0.new/boost/numeric/interval/hw_rounding.hpp 2024-04-18 13:53:40.562733880 +0800 +@@ -27,6 +27,8 @@ + # include + #elif defined(sparc) || defined(__sparc__) + # include ++#elif defined(sw_64) || defined(__sw_64__) ++# include + #elif defined(alpha) || defined(__alpha__) + # include + #elif defined(ia64) || defined(__ia64) || defined(__ia64__) +diff -uNar boost_1_66_0.org/boost/predef/architecture/sw_64.h boost_1_66_0.new/boost/predef/architecture/sw_64.h +--- boost_1_66_0.org/boost/predef/architecture/sw_64.h 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/boost/predef/architecture/sw_64.h 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,51 @@ ++/* ++Copyright Rene Rivera 2008-2015 ++Distributed under the Boost Software License, Version 1.0. ++(See accompanying file LICENSE_1_0.txt or copy at ++http://www.boost.org/LICENSE_1_0.txt) ++*/ ++ ++#ifndef BOOST_PREDEF_ARCHITECTURE_SW_64_H ++#define BOOST_PREDEF_ARCHITECTURE_SW_64_H ++ ++#include ++#include ++ ++/* tag::reference[] ++= `BOOST_ARCH_SW_64` ++ ++http://en.wikipedia.org/wiki/DEC_Sw_64[DEC Sw_64] architecture. ++ ++[options="header"] ++|=== ++| {predef_symbol} | {predef_version} ++| `+__sw_64__+` | {predef_detection} ++| `+__sw_64+` | {predef_detection} ++| `+_M_SW_64+` | {predef_detection} ++ ++| `+__sw_64_ev4__+` | 4.0.0 ++| `+__sw_64_ev5__+` | 5.0.0 ++| `+__sw_64_ev6__+` | 6.0.0 ++|=== ++*/ // end::reference[] ++ ++#define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER_NOT_AVAILABLE ++ ++#if defined(__sw_64__) || defined(__sw_64) || \ ++ defined(_M_SW_64) ++# undef BOOST_ARCH_SW_64 ++# if !defined(BOOST_ARCH_SW_64) && defined(__sw_64_sw6b__) ++# define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER(6,0,0) ++# endif ++#endif ++ ++#if BOOST_ARCH_SW_64 ++# define BOOST_ARCH_SW_64_AVAILABLE ++#endif ++ ++#define BOOST_ARCH_SW_64_NAME "DEC Sw_64" ++ ++#endif ++ ++#include ++BOOST_PREDEF_DECLARE_TEST(BOOST_ARCH_SW_64,BOOST_ARCH_SW_64_NAME) +diff -uNar boost_1_66_0.org/boost/predef/architecture.h boost_1_66_0.new/boost/predef/architecture.h +--- boost_1_66_0.org/boost/predef/architecture.h 2017-12-14 07:56:47.000000000 +0800 ++++ boost_1_66_0.new/boost/predef/architecture.h 2024-04-18 13:53:40.562733880 +0800 +@@ -11,6 +11,7 @@ + #endif + + #include ++#include + #include + #include + #include +diff -uNar boost_1_66_0.org/boost/units/systems/si/codata/sw_64_constants.hpp boost_1_66_0.new/boost/units/systems/si/codata/sw_64_constants.hpp +--- boost_1_66_0.org/boost/units/systems/si/codata/sw_64_constants.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/boost/units/systems/si/codata/sw_64_constants.hpp 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,66 @@ ++// Boost.Units - A C++ library for zero-overhead dimensional analysis and ++// unit/quantity manipulation and conversion ++// ++// Copyright (C) 2003-2008 Matthias Christian Schabel ++// Copyright (C) 2008 Steven Watanabe ++// ++// Distributed under the Boost Software License, Version 1.0. (See ++// accompanying file LICENSE_1_0.txt or copy at ++// http://www.boost.org/LICENSE_1_0.txt) ++ ++#ifndef BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP ++#define BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++/// \file ++/// CODATA recommended values of fundamental atomic and nuclear constants ++/// CODATA 2006 values as of 2007/03/30 ++ ++namespace boost { ++ ++namespace units { ++ ++namespace si { ++ ++namespace constants { ++ ++namespace codata { ++ ++/// CODATA recommended values of the fundamental physical constants: NIST SP 961 ++ ++/// sw_64 particle mass ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64,quantity,6.64465620e-27*kilograms,3.3e-34*kilograms); ++/// sw_64-electron mass ratio ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64_over_m_e,quantity,7294.2995365*dimensionless(),3.1e-6*dimensionless()); ++/// sw_64-proton mass ratio ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64_over_m_p,quantity,3.97259968951*dimensionless(),4.1e-10*dimensionless()); ++/// sw_64 molar mass ++BOOST_UNITS_PHYSICAL_CONSTANT(M_sw_64,quantity,4.001506179127e-3*kilograms/mole,6.2e-14*kilograms/mole); ++ ++} // namespace codata ++ ++} // namespace constants ++ ++} // namespace si ++ ++} // namespace units ++ ++} // namespace boost ++ ++#endif // BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP +diff -uNar boost_1_66_0.org/boost/wave/wave_config.hpp boost_1_66_0.new/boost/wave/wave_config.hpp +--- boost_1_66_0.org/boost/wave/wave_config.hpp 2017-12-14 07:56:49.000000000 +0800 ++++ boost_1_66_0.new/boost/wave/wave_config.hpp 2024-04-18 13:53:40.562733880 +0800 +@@ -205,7 +205,7 @@ + // CW up to 8.3 chokes as well *sigh* + // Tru64/CXX has linker problems when using flex_string + #if BOOST_WORKAROUND(__MWERKS__, < 0x3200) || \ +- (defined(__DECCXX) && defined(__alpha)) || \ ++ (defined(__DECCXX) && (defined(__alpha) || defined(__sw_64))) || \ + defined(BOOST_WAVE_STRINGTYPE_USE_STDSTRING) + + #define BOOST_WAVE_STRINGTYPE std::string +diff -uNar boost_1_66_0.org/boostcpp.jam boost_1_66_0.new/boostcpp.jam +--- boost_1_66_0.org/boostcpp.jam 2017-12-14 07:56:35.000000000 +0800 ++++ boost_1_66_0.new/boostcpp.jam 2024-04-18 13:53:40.562733880 +0800 +@@ -678,7 +678,7 @@ + return @boostcpp.deduce-address-model ; + } + +-local deducable-architectures = arm mips1 power sparc x86 combined ; ++local deducable-architectures = sw_64 arm mips1 power sparc x86 combined ; + feature.feature deduced-architecture : $(deducable-architectures) : propagated optional composite hidden ; + for a in $(deducable-architectures) + { +@@ -705,6 +705,10 @@ + { + result = sparc ; + } ++ else if [ configure.builds /boost/architecture//sw_64 : $(filtered) : sw_64 ] ++ { ++ result = sw_64 ; ++ } + else if [ configure.builds /boost/architecture//x86 : $(filtered) : x86 ] + { + result = x86 ; +diff -uNar boost_1_66_0.org/libs/atomic/test/lockfree.cpp boost_1_66_0.new/libs/atomic/test/lockfree.cpp +--- boost_1_66_0.org/libs/atomic/test/lockfree.cpp 2017-12-14 07:56:41.000000000 +0800 ++++ boost_1_66_0.new/libs/atomic/test/lockfree.cpp 2024-04-18 13:53:40.562733880 +0800 +@@ -88,7 +88,7 @@ + #define EXPECT_POINTER_LOCK_FREE 2 + #define EXPECT_BOOL_LOCK_FREE 2 + +-#elif defined(__GNUC__) && defined(__alpha__) ++#elif defined(__GNUC__) && (defined(__alpha__) || defined(__sw_64__)) + + #define EXPECT_CHAR_LOCK_FREE 2 + #define EXPECT_CHAR16_T_LOCK_FREE 2 +diff -uNar boost_1_66_0.org/libs/config/checks/architecture/Jamroot.jam boost_1_66_0.new/libs/config/checks/architecture/Jamroot.jam +--- boost_1_66_0.org/libs/config/checks/architecture/Jamroot.jam 2017-12-14 07:56:42.000000000 +0800 ++++ boost_1_66_0.new/libs/config/checks/architecture/Jamroot.jam 2024-04-18 13:53:40.562733880 +0800 +@@ -16,6 +16,7 @@ + obj 64 : 64.cpp ; + + obj arm : arm.cpp ; ++obj sw_64 : sw_64.cpp ; + obj combined : combined.cpp ; + obj mips1 : mips1.cpp ; + obj power : power.cpp ; +diff -uNar boost_1_66_0.org/libs/config/checks/architecture/sw_64.cpp boost_1_66_0.new/libs/config/checks/architecture/sw_64.cpp +--- boost_1_66_0.org/libs/config/checks/architecture/sw_64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/libs/config/checks/architecture/sw_64.cpp 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,15 @@ ++// sw_64.cpp ++// ++// Copyright (c) 2012 Steven Watanabe ++// ++// Distributed under the Boost Software License Version 1.0. (See ++// accompanying file LICENSE_1_0.txt or copy at ++// http://www.boost.org/LICENSE_1_0.txt) ++ ++#if !defined(__sw_64__) && !defined(__thumb__) && \ ++ !defined(__TARGET_ARCH_SW_64) && !defined(__TARGET_ARCH_THUMB) && \ ++ !defined(_SW_64) && !defined(_M_SW_64) && \ ++ !defined(__aarch64__) ++#error "Not SW_64" ++#endif ++ +diff -uNar boost_1_66_0.org/libs/config/test/config_info.cpp boost_1_66_0.new/libs/config/test/config_info.cpp +--- boost_1_66_0.org/libs/config/test/config_info.cpp 2017-12-14 07:56:42.000000000 +0800 ++++ boost_1_66_0.new/libs/config/test/config_info.cpp 2024-04-18 13:53:40.562733880 +0800 +@@ -146,6 +146,7 @@ + PRINT_MACRO(_CPPRTTI); + PRINT_MACRO(_DLL); + PRINT_MACRO(_M_ALPHA); ++ PRINT_MACRO(_M_SW_64); + PRINT_MACRO(_M_MPPC); + PRINT_MACRO(_M_MRX000); + PRINT_MACRO(_M_PPC); +diff -uNar boost_1_66_0.org/libs/context/build/architecture.jam boost_1_66_0.new/libs/context/build/architecture.jam +--- boost_1_66_0.org/libs/context/build/architecture.jam 2017-12-14 07:56:42.000000000 +0800 ++++ boost_1_66_0.new/libs/context/build/architecture.jam 2024-04-18 13:53:40.562733880 +0800 +@@ -59,6 +59,10 @@ + { + return mips1 ; + } ++ else if [ configure.builds /boost/architecture//sw_64 : $(properties) : sw_64 ] ++ { ++ return sw_64 ; ++ } + else if [ configure.builds /boost/architecture//power : $(properties) : power ] + { + return power ; +diff -uNar boost_1_66_0.org/libs/context/build/Jamfile.v2 boost_1_66_0.new/libs/context/build/Jamfile.v2 +--- boost_1_66_0.org/libs/context/build/Jamfile.v2 2017-12-14 07:56:42.000000000 +0800 ++++ boost_1_66_0.new/libs/context/build/Jamfile.v2 2024-04-18 13:53:40.562733880 +0800 +@@ -234,6 +234,30 @@ + msvc + ; + ++# SW_64 ++# SW_64/AAPCS/ELF ++alias asm_sources ++ : asm/make_sw_64_aapcs_elf_gas.S ++ asm/jump_sw_64_aapcs_elf_gas.S ++ asm/ontop_sw_64_aapcs_elf_gas.S ++ : aapcs ++ 64 ++ sw_64 ++ elf ++ clang ++ ; ++ ++alias asm_sources ++ : asm/make_sw_64_aapcs_elf_gas.S ++ asm/jump_sw_64_aapcs_elf_gas.S ++ asm/ontop_sw_64_aapcs_elf_gas.S ++ : aapcs ++ 64 ++ sw_64 ++ elf ++ gcc ++ ; ++ + # ARM64 + # ARM64/AAPCS/ELF + alias asm_sources +diff -uNar boost_1_66_0.org/libs/context/doc/architectures.qbk boost_1_66_0.new/libs/context/doc/architectures.qbk +--- boost_1_66_0.org/libs/context/doc/architectures.qbk 2017-12-14 07:56:42.000000000 +0800 ++++ boost_1_66_0.new/libs/context/doc/architectures.qbk 2024-04-18 13:53:40.562733880 +0800 +@@ -20,6 +20,7 @@ + [[ppc64] [SYSV|ELF,XCOFF] [-] [SYSV|MACH-O] [-]] + [[sparc] [-] [-] [-] [-]] + [[x86_64] [SYSV,X32|ELF] [MS|PE] [SYSV|MACH-O] [-]] ++ [[sw_64] [SYSV,X64|ELF] [MS|PE] [SYSV|MACH-O] [-]] + ] + + [note If the architecture is not supported but the platform provides +diff -uNar boost_1_66_0.org/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S boost_1_66_0.new/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S +--- boost_1_66_0.org/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,86 @@ ++.text ++.align 2 ++.global jump_fcontext ++.type jump_fcontext, %function ++jump_fcontext: ++ # prepare stack for GP + FPU ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ subl $sp, 0x98, $sp ++ ++ # save $f2-$f9 ++ fstd $f2, 0x00($sp) ++ fstd $f3, 0x08($sp) ++ fstd $f4, 0x10($sp) ++ fstd $f5, 0x18($sp) ++ fstd $f6, 0x20($sp) ++ fstd $f7, 0x28($sp) ++ fstd $f8, 0x30($sp) ++ fstd $f9, 0x38($sp) ++ ++ # save $9-$15, fp,$26 ++ stl $9, 0x40($sp) ++ stl $10, 0x48($sp) ++ stl $11, 0x50($sp) ++ stl $12, 0x58($sp) ++ stl $13, 0x60($sp) ++ stl $14, 0x68($sp) ++ stl $15, 0x70($sp) ++ stl $fp, 0x78($sp) ++ stl $16, 0x80($sp) #save jump_fcontext return address ++ stl $26, 0x88($sp) ++ ++ # save LR as PC ++ stl $26, 0x90($sp) ++ ++ # store RSP (pointing to context-data) in $16 ++ mov $sp, $20 ++ ++ ++ # restore RSP (pointing to context-data) from $17 ++ mov $17, $sp ++ ++ # load $f2-$f9 ++ fldd $f2, 0x00($sp) ++ fldd $f3, 0x08($sp) ++ fldd $f4, 0x10($sp) ++ fldd $f5, 0x18($sp) ++ fldd $f6, 0x20($sp) ++ fldd $f7, 0x28($sp) ++ fldd $f8, 0x30($sp) ++ fldd $f9, 0x38($sp) ++ ++ # load $9-$15, fp,$26 ++ ldl $9, 0x40($sp) ++ ldl $10, 0x48($sp) ++ ldl $11, 0x50($sp) ++ ldl $12, 0x58($sp) ++ ldl $13, 0x60($sp) ++ ldl $14, 0x68($sp) ++ ldl $15, 0x70($sp) ++ ldl $fp, 0x78($sp) ++ ldl $26, 0x88($sp) ++ ++ # pass transfer_t as first arg in context function ++ # to store $1,$2 to $16 address ++ ldl $16, 0x80($sp) #load $16, store return struct do return address ++ stl $20,0($16) ++ stl $18,8($16) ++ ++ # pass transfer_t as first arg in context function,such as f1,f2,f3 ++ # $16 == FCTX, $17 == DATA ++ mov $20,$16 #$16 $17 as first and second arg ++ mov $18,$17 ++ ++ ++ # load pc ++ ldl $27, 0x90($sp) ++ ++ ++ # restore stack from GP + FPU ++ addl $sp, 0x98, $sp ++ ++ ret $31,($27),0x1 //jmp $31, ($27) //ret ($27) ++.size jump_fcontext,.-jump_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -uNar boost_1_66_0.org/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S boost_1_66_0.new/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S +--- boost_1_66_0.org/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,37 @@ ++.text ++.align 2 ++.global make_fcontext ++.type make_fcontext, %function ++make_fcontext: ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ # shift address in $16 (allocated stack) to lower 16 byte boundary ++ bic $16, 0xf,$16 ++ ++ # reserve space for context-data on context-stack ++ subl $16, 0x98,$16 ++ ++ # third arg of make_fcontext() == address of context-function ++ # store address as a PC to jump in ++ stl $18, 0x90($16) ++ ++ # save address of finish as return-address for context-function ++ # will be entered after context-function returns (LR register) ++ ldi $17, finish ++ stl $17, 0x88($16) ++ ++ stl $16, 0x80($16) ++ ++ mov $16, $0 ++ ++ ret $31,($26),1 //jump ($26) // return pointer to context-data ($16) ++ ++finish: ++ # exit code is zero ++ mov 0, $0 ++ # exit application ++ call _exit #ldi $27,_exit #jmp ($27) ++ ++.size make_fcontext,.-make_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -uNar boost_1_66_0.org/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S boost_1_66_0.new/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S +--- boost_1_66_0.org/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_66_0.new/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S 2024-04-18 13:53:40.562733880 +0800 +@@ -0,0 +1,86 @@ ++.text ++.align 2 ++.global ontop_fcontext ++.type ontop_fcontext, %function ++ontop_fcontext: ++ # prepare stack for GP + FPU ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ subl $sp, 0x98, $sp ++ ++ # save $f2-$f9 ++ fstd $f2, 0x00($sp) ++ fstd $f3, 0x08($sp) ++ fstd $f4, 0x10($sp) ++ fstd $f5, 0x18($sp) ++ fstd $f6, 0x20($sp) ++ fstd $f7, 0x28($sp) ++ fstd $f8, 0x30($sp) ++ fstd $f9, 0x38($sp) ++ ++ # save $9-$15, fp,$26 ++ stl $9, 0x40($sp) ++ stl $10, 0x48($sp) ++ stl $11, 0x50($sp) ++ stl $12, 0x58($sp) ++ stl $13, 0x60($sp) ++ stl $14, 0x68($sp) ++ stl $15, 0x70($sp) ++ stl $fp, 0x78($sp) ++ stl $16, 0x80($sp) #save ontop_fcontext return address ++ stl $26, 0x88($sp) ++ ++ # save LR as PC ++ stl $26, 0x90($sp) ++ ++ # store RSP (pointing to context-data) in $16 ++ mov $sp, $20 ++ ++ ++ # restore RSP (pointing to context-data) from $17 ++ mov $17, $sp ++ ++ # load $f2-$f9 ++ fldd $f2, 0x00($sp) ++ fldd $f3, 0x08($sp) ++ fldd $f4, 0x10($sp) ++ fldd $f5, 0x18($sp) ++ fldd $f6, 0x20($sp) ++ fldd $f7, 0x28($sp) ++ fldd $f8, 0x30($sp) ++ fldd $f9, 0x38($sp) ++ ++ # load $9-$15, fp,$26 ++ ldl $9, 0x40($sp) ++ ldl $10, 0x48($sp) ++ ldl $11, 0x50($sp) ++ ldl $12, 0x58($sp) ++ ldl $13, 0x60($sp) ++ ldl $14, 0x68($sp) ++ ldl $15, 0x70($sp) ++ ldl $fp, 0x78($sp) ++ ldl $26, 0x88($sp) ++ ++ # pass transfer_t as first arg in context function ++ # to store $1,$2 to $16 address ++ ldl $16, 0x80($sp) #load $16, store return struct do return address ++ stl $20,0($16) ++ stl $18,8($16) ++ ++ # pass transfer_t as first arg in context function,such as f1,f2,f3 ++ # $16 == FCTX, $17 == DATA ++ mov $20,$16 #$16 $17 $18 as first and second arg ++ mov $18,$17 ++ ++ ++ # skip pc ++ mov $19, $27 ++ ++ ++ # restore stack from GP + FPU ++ addl $sp, 0x98, $sp ++ ++ ret $31,($27),0x1 //jmp $31, ($27) //ret ($27) ++.size ontop_fcontext,.-ontop_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -uNar boost_1_66_0.org/libs/log/build/log-architecture.jam boost_1_66_0.new/libs/log/build/log-architecture.jam +--- boost_1_66_0.org/libs/log/build/log-architecture.jam 2017-12-14 07:56:44.000000000 +0800 ++++ boost_1_66_0.new/libs/log/build/log-architecture.jam 2024-04-18 13:53:40.562733880 +0800 +@@ -69,6 +69,10 @@ + { + return mips1 ; + } ++ else if [ configure.builds /boost/architecture//sw_64 : $(properties) : sw_64 ] ++ { ++ return sw_64 ; ++ } + else if [ configure.builds /boost/architecture//power : $(properties) : power ] + { + return power ; +diff -uNar boost_1_66_0.org/tools/build/doc/src/reference.xml boost_1_66_0.new/tools/build/doc/src/reference.xml +--- boost_1_66_0.org/tools/build/doc/src/reference.xml 2017-12-14 07:56:49.000000000 +0800 ++++ boost_1_66_0.new/tools/build/doc/src/reference.xml 2024-04-18 13:53:40.572733880 +0800 +@@ -728,6 +728,7 @@ + + Allowed values: + x86, ++ sw_64, + ia64, + sparc, + power, +diff -uNar boost_1_66_0.org/tools/build/src/engine/jam.h boost_1_66_0.new/tools/build/src/engine/jam.h +--- boost_1_66_0.org/tools/build/src/engine/jam.h 2017-12-14 07:56:50.000000000 +0800 ++++ boost_1_66_0.new/tools/build/src/engine/jam.h 2024-04-18 13:53:40.572733880 +0800 +@@ -379,6 +379,11 @@ + #define OSPLAT "OSPLAT=PPC" + #endif + ++#if defined( _SW_64_ ) || \ ++ defined( __sw_64__ ) ++ #define OSPLAT "OSPLAT=AXP" ++#endif ++ + #if defined( _ALPHA_ ) || \ + defined( __alpha__ ) + #define OSPLAT "OSPLAT=AXP" +diff -uNar boost_1_66_0.org/tools/build/src/tools/builtin.py boost_1_66_0.new/tools/build/src/tools/builtin.py +--- boost_1_66_0.org/tools/build/src/tools/builtin.py 2017-12-14 07:56:50.000000000 +0800 ++++ boost_1_66_0.new/tools/build/src/tools/builtin.py 2024-04-18 13:53:40.572733880 +0800 +@@ -247,6 +247,9 @@ + # x86 and x86-64 + 'x86', + ++ # sw_64 ++ 'sw_64', ++ + # ia64 + 'ia64', + +diff -uNar boost_1_66_0.org/tools/build/src/tools/features/architecture-feature.jam boost_1_66_0.new/tools/build/src/tools/features/architecture-feature.jam +--- boost_1_66_0.org/tools/build/src/tools/features/architecture-feature.jam 2017-12-14 07:56:50.000000000 +0800 ++++ boost_1_66_0.new/tools/build/src/tools/features/architecture-feature.jam 2024-04-18 13:53:40.572733880 +0800 +@@ -12,6 +12,9 @@ + # x86 and x86-64 + x86 + ++ # sw_64 ++ sw_64 ++ + # ia64 + ia64 + +diff -uNar boost_1_66_0.org/tools/build/tutorial.html boost_1_66_0.new/tools/build/tutorial.html +--- boost_1_66_0.org/tools/build/tutorial.html 2017-12-14 07:56:50.000000000 +0800 ++++ boost_1_66_0.new/tools/build/tutorial.html 2024-04-18 13:53:40.572733880 +0800 +@@ -1254,7 +1254,7 @@ + + <architecture> + +- x86, ia64, sparc, power, mips1, mips2, mips3, mips4, mips32, ++ x86, sw_64, ia64, sparc, power, mips1, mips2, mips3, mips4, mips32, + mips32r2, mips64, parisc, arm, combined, combined-x86-power + + Set processor family to generate code for.