From d63fff1f3233e798abd02d066a0b09d3fa1ed03e Mon Sep 17 00:00:00 2001 From: Jingyun Hua Date: Tue, 23 May 2023 08:32:51 +0000 Subject: [PATCH] Add loongarch64 support Signed-off-by: Jingyun Hua (cherry picked from commit df91ebafd464a6b03b4c50e816fabf4ac63fd354) --- ...rotobuf2-add-support-for-loongarch64.patch | 379 ++++++++++++++++++ protobuf2.spec | 9 +- 2 files changed, 387 insertions(+), 1 deletion(-) create mode 100644 0001-protobuf2-add-support-for-loongarch64.patch diff --git a/0001-protobuf2-add-support-for-loongarch64.patch b/0001-protobuf2-add-support-for-loongarch64.patch new file mode 100644 index 0000000..7be1285 --- /dev/null +++ b/0001-protobuf2-add-support-for-loongarch64.patch @@ -0,0 +1,379 @@ +From 9c9e1156d8e2e8e0e30c32669e3ca67d5b10c902 Mon Sep 17 00:00:00 2001 +From: Jingyun Hua +Date: Tue, 23 May 2023 08:15:12 +0000 +Subject: [PATCH] protobuf2: add support for loongarch64 + +Signed-off-by: Jingyun Hua +--- + autogen.sh | 5 +- + configure.ac | 2 +- + src/Makefile.am | 1 + + src/google/protobuf/stubs/atomicops.h | 2 + + .../atomicops_internals_loongarch64_gcc.h | 286 ++++++++++++++++++ + src/google/protobuf/stubs/platform_macros.h | 3 + + 6 files changed, 296 insertions(+), 3 deletions(-) + create mode 100644 src/google/protobuf/stubs/atomicops_internals_loongarch64_gcc.h + +diff --git a/autogen.sh b/autogen.sh +index c3e026d..d22d73c 100755 +--- a/autogen.sh ++++ b/autogen.sh +@@ -19,8 +19,9 @@ fi + # directory is set up as an SVN external. + if test ! -e gtest; then + echo "Google Test not present. Fetching gtest-1.5.0 from the web..." +- curl http://googletest.googlecode.com/files/gtest-1.5.0.tar.bz2 | tar jx +- mv gtest-1.5.0 gtest ++# curl http://googletest.googlecode.com/files/gtest-1.5.0.tar.bz2 | tar jx ++ curl https://github.com/google/googletest/archive/release-1.5.0.tar.gz |tar jx ++ mv googletest-release-1.5.0 gtest + fi + + set -ex +diff --git a/configure.ac b/configure.ac +index b232529..48ef60a 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -29,7 +29,7 @@ AS_IF([test "x${ac_cv_env_CXXFLAGS_set}" = "x"], + + AC_CANONICAL_TARGET + +-AM_INIT_AUTOMAKE ++AM_INIT_AUTOMAKE([subdir-objects]) + + AC_ARG_WITH([zlib], + [AS_HELP_STRING([--with-zlib], +diff --git a/src/Makefile.am b/src/Makefile.am +index 6f5c21d..00cce72 100644 +--- a/src/Makefile.am ++++ b/src/Makefile.am +@@ -48,6 +48,7 @@ nobase_include_HEADERS = \ + google/protobuf/stubs/atomicops_internals_pnacl.h \ + google/protobuf/stubs/atomicops_internals_x86_gcc.h \ + google/protobuf/stubs/atomicops_internals_x86_msvc.h \ ++ google/protobuf/stubs/atomicops_internals_loongarch64_gcc.h \ + google/protobuf/stubs/common.h \ + google/protobuf/stubs/platform_macros.h \ + google/protobuf/stubs/once.h \ +diff --git a/src/google/protobuf/stubs/atomicops.h b/src/google/protobuf/stubs/atomicops.h +index 883b125..3c0e154 100644 +--- a/src/google/protobuf/stubs/atomicops.h ++++ b/src/google/protobuf/stubs/atomicops.h +@@ -184,6 +184,8 @@ GOOGLE_PROTOBUF_ATOMICOPS_ERROR + #include + #elif defined(__pnacl__) + #include ++#elif defined(GOOGLE_PROTOBUF_ARCH_LOONGARCH64) ++#include + #else + #include + #endif +diff --git a/src/google/protobuf/stubs/atomicops_internals_loongarch64_gcc.h b/src/google/protobuf/stubs/atomicops_internals_loongarch64_gcc.h +new file mode 100644 +index 0000000..4bbdf70 +--- /dev/null ++++ b/src/google/protobuf/stubs/atomicops_internals_loongarch64_gcc.h +@@ -0,0 +1,286 @@ ++// Protocol Buffers - Google's data interchange format ++// Copyright 2012 Google Inc. All rights reserved. ++// http://code.google.com/p/protobuf/ ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following disclaimer ++// in the documentation and/or other materials provided with the ++// distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived from ++// this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// This file is an internal atomic implementation, use atomicops.h instead. ++ ++#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_LOONGARCH64_GCC_H_ ++#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_LOONGARCH64_GCC_H_ ++ ++#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") ++ ++namespace google { ++namespace protobuf { ++namespace internal { ++ ++// Atomically execute: ++// result = *ptr; ++// if (*ptr == old_value) ++// *ptr = new_value; ++// return result; ++// ++// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". ++// Always return the old value of "*ptr" ++// ++// This routine implies no memory barriers. ++ ++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 prev, tmp; ++ __asm__ __volatile__("1:\n" ++ "ll.w %0, %5\n" // prev = *ptr ++ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 ++ "move %2, %4\n" // tmp = new_value ++ "sc.w %2, %1\n" // *ptr = tmp (with atomic check) ++ "beqz %2, 1b\n" // start again on atomic error ++ "2:\n" ++ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) ++ : "r" (old_value), "r" (new_value), "m" (*ptr) ++ : "memory"); ++ return prev; ++} ++ ++// Atomically store new_value into *ptr, returning the previous value held in ++// *ptr. This routine implies no memory barriers. ++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, ++ Atomic32 new_value) { ++ Atomic32 temp, old; ++ __asm__ __volatile__("1:\n" ++ "ll.w %1, %2\n" // old = *ptr ++ "move %0, %3\n" // temp = new_value ++ "sc.w %0, %2\n" // *ptr = temp (with atomic check) ++ "beqz %0, 1b\n" // start again on atomic error ++ : "=&r" (temp), "=&r" (old), "=m" (*ptr) ++ : "r" (new_value), "m" (*ptr) ++ : "memory"); ++ ++ return old; ++} ++ ++// Atomically increment *ptr by "increment". Returns the new value of ++// *ptr with the increment applied. This routine implies no memory barriers. ++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ Atomic32 temp, temp2; ++ ++ __asm__ __volatile__("1:\n" ++ "ll.w %0, %2\n" // temp = *ptr ++ "add.w %1, %0, %3\n" // temp2 = temp + increment ++ "sc.w %1, %2\n" // *ptr = temp2 (with atomic check) ++ "beqz %1, 1b\n" // start again on atomic error ++ "add.w %1, %0, %3\n" // temp2 = temp + increment ++ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) ++ : "r" (increment), "m" (*ptr) ++ : "memory"); ++ // temp2 now holds the final value. ++ return temp2; ++} ++ ++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ MemoryBarrier(); ++ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); ++ MemoryBarrier(); ++ return res; ++} ++ ++// "Acquire" operations ++// ensure that no later memory access can be reordered ahead of the operation. ++// "Release" operations ensure that no previous memory access can be reordered ++// after the operation. "Barrier" operations have both "Acquire" and "Release" ++// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory ++// access. ++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ MemoryBarrier(); ++ return res; ++} ++ ++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ MemoryBarrier(); ++ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++} ++ ++inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { ++ *ptr = value; ++} ++ ++inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { ++ *ptr = value; ++ MemoryBarrier(); ++} ++ ++inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { ++ MemoryBarrier(); ++ *ptr = value; ++} ++ ++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { ++ return *ptr; ++} ++ ++inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { ++ Atomic32 value = *ptr; ++ MemoryBarrier(); ++ return value; ++} ++ ++inline Atomic32 Release_Load(volatile const Atomic32* ptr) { ++ MemoryBarrier(); ++ return *ptr; ++} ++ ++//64bit ++inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, ++ Atomic64 old_value, ++ Atomic64 new_value) { ++ Atomic64 prev, tmp; ++ __asm__ __volatile__("1:\n" ++ "ll.w %0, %5\n" // prev = *ptr ++ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 ++ "move %2, %4\n" // tmp = new_value ++ "sc.w %2, %1\n" // *ptr = tmp (with atomic check) ++ "beqz %2, 1b\n" // start again on atomic error ++ "2:\n" ++ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) ++ : "r" (old_value), "r" (new_value), "m" (*ptr) ++ : "memory"); ++ return prev; ++} ++ ++// Atomically store new_value into *ptr, returning the previous value held in ++// *ptr. This routine implies no memory barriers. ++inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, ++ Atomic64 new_value) { ++ Atomic64 temp, old; ++ __asm__ __volatile__("1:\n" ++ "ll.w %1, %2\n" // old = *ptr ++ "move %0, %3\n" // temp = new_value ++ "sc.w %0, %2\n" // *ptr = temp (with atomic check) ++ "beqz %0, 1b\n" // start again on atomic error ++ : "=&r" (temp), "=&r" (old), "=m" (*ptr) ++ : "r" (new_value), "m" (*ptr) ++ : "memory"); ++ ++ return old; ++} ++ ++// Atomically increment *ptr by "increment". Returns the new value of ++// *ptr with the increment applied. This routine implies no memory barriers. ++inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, ++ Atomic64 increment) { ++ Atomic64 temp, temp2; ++ ++ __asm__ __volatile__("1:\n" ++ "ll.w %0, %2\n" // temp = *ptr ++ "add.w %1, %0, %3\n" // temp2 = temp + increment ++ "sc.w %1, %2\n" // *ptr = temp2 (with atomic check) ++ "beqz %1, 1b\n" // start again on atomic error ++ "add.w %1, %0, %3\n" // temp2 = temp + increment ++ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) ++ : "r" (increment), "m" (*ptr) ++ : "memory"); ++ // temp2 now holds the final value. ++ return temp2; ++} ++ ++inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, ++ Atomic64 increment) { ++ MemoryBarrier(); ++ Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); ++ MemoryBarrier(); ++ return res; ++} ++ ++// "Acquire" operations ++// ensure that no later memory access can be reordered ahead of the operation. ++// "Release" operations ensure that no previous memory access can be reordered ++// after the operation. "Barrier" operations have both "Acquire" and "Release" ++// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory ++// access. ++inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, ++ Atomic64 old_value, ++ Atomic64 new_value) { ++ Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ MemoryBarrier(); ++ return res; ++} ++ ++inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, ++ Atomic64 old_value, ++ Atomic64 new_value) { ++ MemoryBarrier(); ++ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++} ++ ++inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { ++ *ptr = value; ++} ++ ++inline void MemoryBarrier() { ++ __asm__ __volatile__("dbar 0x0" : : : "memory"); ++} ++ ++inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { ++ *ptr = value; ++ MemoryBarrier(); ++} ++ ++inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ++ MemoryBarrier(); ++ *ptr = value; ++} ++ ++inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { ++ return *ptr; ++} ++ ++inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { ++ Atomic64 value = *ptr; ++ MemoryBarrier(); ++ return value; ++} ++ ++inline Atomic64 Release_Load(volatile const Atomic64* ptr) { ++ MemoryBarrier(); ++ return *ptr; ++} ++ ++} // namespace internal ++} // namespace protobuf ++} // namespace google ++ ++#undef ATOMICOPS_COMPILER_BARRIER ++ ++#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_LOONGARCH64_GCC_H_ +diff --git a/src/google/protobuf/stubs/platform_macros.h b/src/google/protobuf/stubs/platform_macros.h +index db691d8..3e17fea 100644 +--- a/src/google/protobuf/stubs/platform_macros.h ++++ b/src/google/protobuf/stubs/platform_macros.h +@@ -69,6 +69,9 @@ + #elif defined(__s390__) + #define GOOGLE_PROTOBUF_ARCH_32_BIT 1 + #define GOOGLE_PROTOBUF_ARCH_S390 1 ++#elif defined(_LOONGARCH_ARCH_LOONGARCH64) ++#define GOOGLE_PROTOBUF_ARCH_LOONGARCH64 1 ++#define GOOGLE_PROTOBUF_ARCH_64_BIT 1 + #else + #error Host architecture was not detected as supported by protobuf + #endif +-- +2.33.0 + diff --git a/protobuf2.spec b/protobuf2.spec index 9ec6fcb..3037ef3 100644 --- a/protobuf2.spec +++ b/protobuf2.spec @@ -6,7 +6,7 @@ Summary: Protocol Buffers - Google's data interchange format Name: protobuf2 Version: 2.5.0 -Release: 3 +Release: 4 License: BSD Source: http://protobuf.googlecode.com/files/protobuf-%{version}.tar.bz2 Source1: ftdetect-proto.vim @@ -15,6 +15,7 @@ Patch1: protobuf-2.5.0-gtest.patch Patch2: protobuf-2.5.0-java-fixes.patch Patch3: 0001-Add-generic-GCC-support-for-atomic-operations.patch Patch4: protobuf-2.5.0-makefile.patch +Patch5: 0001-protobuf2-add-support-for-loongarch64.patch URL: http://code.google.com/p/protobuf/ BuildRequires: automake autoconf libtool pkgconfig zlib-devel emacs emacs-el >= 24.1 maven-plugin-bundle gcc-c++ %if %{with gtest} @@ -134,6 +135,9 @@ rm -rf java/src/test %endif %patch3 -p1 -b .generic-atomics %patch4 -p1 -b .generic-atomics-makefile +%ifarch loongarch64 +%patch5 -p1 +%endif %build iconv -f iso8859-1 -t utf-8 CONTRIBUTORS.txt > CONTRIBUTORS.txt.utf8 @@ -241,6 +245,9 @@ install -p -m 0644 %{SOURCE2} $RPM_BUILD_ROOT%{emacs_startdir} %endif %changelog +* Tue May 23 2023 huajingyun - 2.5.0-4 +- Add loongarch64 support + * Wed Aug 18 2021 lingsheng - 2.5.0-3 - Modify subpackage's install require -- Gitee