diff --git a/3000-add-loongarch64-support.patch b/3000-add-loongarch64-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..ec6e6b78ea9f8d3ec69b0163f67c8e8e5cef02f1 --- /dev/null +++ b/3000-add-loongarch64-support.patch @@ -0,0 +1,66 @@ +From 5671abc0df13895d2f13a4646a6ebd2af29f37ca Mon Sep 17 00:00:00 2001 +From: Huang Yang +Date: Tue, 14 Feb 2023 11:34:45 +0800 +Subject: [PATCH] vdo 6.2.0.298 add loongarch64 support + +--- + utils/uds/atomicDefs.h | 8 +++++++- + utils/uds/cpu.h | 2 +- + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/utils/uds/atomicDefs.h b/utils/uds/atomicDefs.h +index 86d6a4a..2508f40 100644 +--- a/utils/uds/atomicDefs.h ++++ b/utils/uds/atomicDefs.h +@@ -96,6 +96,8 @@ static INLINE void smp_mb(void) + __asm__ __volatile__("bcr 14,0" : : : "memory"); + #elif defined __PPC__ + __asm__ __volatile__("sync" : : : "memory"); ++#elif defined __loongarch64 ++ __asm__ __volatile__("dbar 0" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -121,6 +123,8 @@ static INLINE void smp_rmb(void) + __asm__ __volatile__("bcr 14,0" : : : "memory"); + #elif defined __PPC__ + __asm__ __volatile__("lwsync" : : : "memory"); ++#elif defined __loongarch64 ++ __asm__ __volatile__("dbar 0" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -146,6 +150,8 @@ static INLINE void smp_wmb(void) + __asm__ __volatile__("bcr 14,0" : : : "memory"); + #elif defined __PPC__ + __asm__ __volatile__("lwsync" : : : "memory"); ++#elif defined __loongarch64 ++ __asm__ __volatile__("dbar 0" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -172,7 +178,7 @@ static INLINE void smp_mb__before_atomic(void) + static INLINE void smp_read_barrier_depends(void) + { + #if defined(__x86_64__) || defined(__PPC__) || defined(__s390__) \ +- || defined(__aarch64__) ++ || defined(__aarch64__) || defined(__loongarch64) + // Nothing needed for these architectures. + #else + // Default to playing it safe. +diff --git a/utils/uds/cpu.h b/utils/uds/cpu.h +index 8b12a16..d987a1a 100644 +--- a/utils/uds/cpu.h ++++ b/utils/uds/cpu.h +@@ -36,7 +36,7 @@ + #define CACHE_LINE_BYTES 128 + #elif defined(__s390x__) + #define CACHE_LINE_BYTES 256 +-#elif defined(__x86_64__) || defined(__aarch64__) ++#elif defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) + #define CACHE_LINE_BYTES 64 + #else + #error "unknown cache line size" +-- +2.33.0 + diff --git a/3001-add-sw_64-support.patch b/3001-add-sw_64-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..12d519f6d114f5a4ec4b761a4168e31e07b10c70 --- /dev/null +++ b/3001-add-sw_64-support.patch @@ -0,0 +1,53 @@ +diff --git a/utils/uds/atomicDefs.h b/utils/uds/atomicDefs.h +index ea55f57..28de90d 100644 +--- a/utils/uds/atomicDefs.h ++++ b/utils/uds/atomicDefs.h +@@ -98,6 +98,8 @@ static INLINE void smp_mb(void) + __asm__ __volatile__("sync" : : : "memory"); + #elif defined __loongarch64 + __asm__ __volatile__("dbar 0" : : : "memory"); ++#elif defined __sw_64 ++ __asm__ __volatile__("memb" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -125,6 +127,8 @@ static INLINE void smp_rmb(void) + __asm__ __volatile__("lwsync" : : : "memory"); + #elif defined __loongarch64 + __asm__ __volatile__("dbar 0" : : : "memory"); ++#elif defined __sw_64 ++ __asm__ __volatile__("memb" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -152,6 +156,8 @@ static INLINE void smp_wmb(void) + __asm__ __volatile__("lwsync" : : : "memory"); + #elif defined __loongarch64 + __asm__ __volatile__("dbar 0" : : : "memory"); ++#elif defined __sw_64 ++ __asm__ __volatile__("memb" : : : "memory"); + #else + #error "no fence defined" + #endif +@@ -178,7 +184,7 @@ static INLINE void smp_mb__before_atomic(void) + static INLINE void smp_read_barrier_depends(void) + { + #if defined(__x86_64__) || defined(__PPC__) || defined(__s390__) \ +- || defined(__aarch64__) || defined(__loongarch64) ++ || defined(__aarch64__) || defined(__loongarch64) || defined(__sw_64__) + // Nothing needed for these architectures. + #else + // Default to playing it safe. +diff --git a/utils/uds/cpu.h b/utils/uds/cpu.h +index e26d653..67ce745 100644 +--- a/utils/uds/cpu.h ++++ b/utils/uds/cpu.h +@@ -36,7 +36,7 @@ + #define CACHE_LINE_BYTES 128 + #elif defined(__s390x__) + #define CACHE_LINE_BYTES 256 +-#elif defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) ++#elif defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) || defined(__sw_64__) + #define CACHE_LINE_BYTES 64 + #else + #error "unknown cache line size" diff --git a/3002-del-Wcast-align-for-sw_64.patch b/3002-del-Wcast-align-for-sw_64.patch new file mode 100644 index 0000000000000000000000000000000000000000..6744027add1225c39511d59853bcfd0a5483ec9b --- /dev/null +++ b/3002-del-Wcast-align-for-sw_64.patch @@ -0,0 +1,36 @@ +diff --git a/utils/uds/Makefile b/utils/uds/Makefile +index e33d682..22ec52c 100644 +--- a/utils/uds/Makefile ++++ b/utils/uds/Makefile +@@ -33,7 +33,6 @@ ifeq ($(origin CC), default) + endif + + WARNS = -Wall \ +- -Wcast-align \ + -Werror \ + -Wextra \ + -Winit-self \ +diff --git a/utils/vdo/base/Makefile b/utils/vdo/base/Makefile +index fb009a7..9f43146 100644 +--- a/utils/vdo/base/Makefile ++++ b/utils/vdo/base/Makefile +@@ -24,7 +24,6 @@ UDS_DIR = ../../uds + + WARNS = \ + -Wall \ +- -Wcast-align \ + -Werror \ + -Wextra \ + -Winit-self \ +diff --git a/utils/vdo/user/Makefile b/utils/vdo/user/Makefile +index 8946fb3..6dc547f 100644 +--- a/utils/vdo/user/Makefile ++++ b/utils/vdo/user/Makefile +@@ -26,7 +26,6 @@ VDO_BASE_DIR = ../base + + WARNS = \ + -Wall \ +- -Wcast-align \ + -Werror \ + -Wextra \ + -Winit-self \ diff --git a/vdo.spec b/vdo.spec index 1d143cdd09d786fd0f191d179beae6ee49e0e474..3d0c29f31fb61cc1cfb5a1a64d018145f758e79b 100644 --- a/vdo.spec +++ b/vdo.spec @@ -1,14 +1,22 @@ Name: vdo Version: 6.2.6.14 -Release: 1 +Release: 2 Summary: Management tools for Virtual Data Optimizer License: GPLv2 URL: http://github.com/dm-vdo/vdo Source0: https://github.com/dm-vdo/vdo/archive/refs/tags/%{version}.tar.gz Patch0002: 0002-Ignore-misaligned-pointers.patch +Patch3000: 3000-add-loongarch64-support.patch +Patch3001: 3001-add-sw_64-support.patch +%ifarch sw_64 +Patch3002: 3002-del-Wcast-align-for-sw_64.patch +%endif BuildRequires: gcc libuuid-devel device-mapper-devel device-mapper-event-devel -BuildRequires: valgrind-devel python3 python3-devel zlib-devel systemd +BuildRequires: python3 python3-devel zlib-devel systemd +%ifarch %{valgrind_arches} +BuildRequires: valgrind-devel +%endif %{?systemd_requires} Requires: lvm2 >= 2.02 python3-PyYAML >= 3.10 libuuid >= 2.23 kmod-kvdo >= 6.2 util-linux >= 2.32.1 Provides: kvdo-kmod-common = %{version} @@ -80,6 +88,9 @@ done %{_mandir}/man8/* %changelog +* Thu Aug 10 2023 yeqinglong - 6.2.6.14-2 +- add loongarch64 and sw_64 support + * Fri May 20 2022 houyingchao - 6.2.6.14-1 - Upgrade to 6.2.6.14