From 7a85bc44f1279ddcab0ab59f7d0a89c9c23866a3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Jan 2022 22:57:01 +0300 Subject: [PATCH 1/6] fanotify: Fix stale file descriptor in copy_event_to_user() stable inclusion from stable-5.10.97 commit 7b4741644cf718c422187e74fb07661ef1d68e85 category: bugfix issue: I5AFE6 CVE: CVE-2022-1998 Signed-off-by: lizongfeng --------------------------------------- fanotify: Fix stale file descriptor in copy_event_to_user() commit ee12595147ac1fbfb5bcb23837e26dd58d94b15d upstream. This code calls fd_install() which gives the userspace access to the fd. Then if copy_info_records_to_user() fails it calls put_unused_fd(fd) but that will not release it and leads to a stale entry in the file descriptor table. Generally you can't trust the fd after a call to fd_install(). The fix is to delay the fd_install() until everything else has succeeded. Fortunately it requires CAP_SYS_ADMIN to reach this code so the security impact is less. Fixes: f644bc449b37 ("fanotify: fix copy_event_to_user() fid error clean up") Link: https://lore.kernel.org/r/20220128195656.GA26981@kili Signed-off-by: Dan Carpenter Reviewed-by: Mathias Krause Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/fanotify/fanotify_user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 086b6bacbad1..18e014fa0648 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -366,9 +366,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; - if (f) - fd_install(fd, f); - /* Event info records order is: dir fid + name, child fid */ if (fanotify_event_dir_fh_len(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : @@ -432,6 +429,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, count -= ret; } + if (f) + fd_install(fd, f); + return metadata.event_len; out_close_fd: -- Gitee From b64cf8c7f40f1463c1a8457ad7d4f7808b757eed Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Mon, 23 May 2022 19:11:02 +0100 Subject: [PATCH 2/6] lockdown: also lock down previous kgdb use stable inclusion from stable-5.10.119 commit a8f4d63142f947cd22fa615b8b3b8921cdaf4991 category: bugfix issue: I5AFE6 CVE: CVE-2022-21499 Signed-off-by: lizongfeng --------------------------------------- lockdown: also lock down previous kgdb use commit eadb2f47a3ced5c64b23b90fd2a3463f63726066 upstream. KGDB and KDB allow read and write access to kernel memory, and thus should be restricted during lockdown. An attacker with access to a serial port (for example, via a hypervisor console, which some cloud vendors provide over the network) could trigger the debugger so it is important that the debugger respect the lockdown mode when/if it is triggered. Fix this by integrating lockdown into kdb's existing permissions mechanism. Unfortunately kgdb does not have any permissions mechanism (although it certainly could be added later) so, for now, kgdb is simply and brutally disabled by immediately exiting the gdb stub without taking any action. For lockdowns established early in the boot (e.g. the normal case) then this should be fine but on systems where kgdb has set breakpoints before the lockdown is enacted than "bad things" will happen. CVE: CVE-2022-21499 Co-developed-by: Stephen Brennan Signed-off-by: Stephen Brennan Reviewed-by: Douglas Anderson Signed-off-by: Daniel Thompson Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/security.h | 2 ++ kernel/debug/debug_core.c | 24 ++++++++++++++ kernel/debug/kdb/kdb_main.c | 62 +++++++++++++++++++++++++++++++++++-- security/security.c | 2 ++ 4 files changed, 87 insertions(+), 3 deletions(-) diff --git a/include/linux/security.h b/include/linux/security.h index 35355429648e..330029ef7e89 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -121,10 +121,12 @@ enum lockdown_reason { LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, LOCKDOWN_BPF_WRITE_USER, + LOCKDOWN_DBG_WRITE_KERNEL, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, + LOCKDOWN_DBG_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 8661eb2b1771..0f31b22abe8d 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -756,6 +757,29 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, continue; kgdb_connected = 0; } else { + /* + * This is a brutal way to interfere with the debugger + * and prevent gdb being used to poke at kernel memory. + * This could cause trouble if lockdown is applied when + * there is already an active gdb session. For now the + * answer is simply "don't do that". Typically lockdown + * *will* be applied before the debug core gets started + * so only developers using kgdb for fairly advanced + * early kernel debug can be biten by this. Hopefully + * they are sophisticated enough to take care of + * themselves, especially with help from the lockdown + * message printed on the console! + */ + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { + if (IS_ENABLED(CONFIG_KGDB_KDB)) { + /* Switch back to kdb if possible... */ + dbg_kdb_mode = 1; + continue; + } else { + /* ... otherwise just bail */ + break; + } + } error = gdb_serial_stub(ks); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 930ac1b25ec7..4e09fab52faf 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "kdb_private.h" #undef MODULE_PARAM_PREFIX @@ -197,10 +198,62 @@ struct task_struct *kdb_curr_task(int cpu) } /* - * Check whether the flags of the current command and the permissions - * of the kdb console has allow a command to be run. + * Update the permissions flags (kdb_cmd_enabled) to match the + * current lockdown state. + * + * Within this function the calls to security_locked_down() are "lazy". We + * avoid calling them if the current value of kdb_cmd_enabled already excludes + * flags that might be subject to lockdown. Additionally we deliberately check + * the lockdown flags independently (even though read lockdown implies write + * lockdown) since that results in both simpler code and clearer messages to + * the user on first-time debugger entry. + * + * The permission masks during a read+write lockdown permits the following + * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). + * + * The INSPECT commands are not blocked during lockdown because they are + * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes + * forcing them to have no arguments) and lsmod. These commands do expose + * some kernel state but do not allow the developer seated at the console to + * choose what state is reported. SIGNAL and REBOOT should not be controversial, + * given these are allowed for root during lockdown already. + */ +static void kdb_check_for_lockdown(void) +{ + const int write_flags = KDB_ENABLE_MEM_WRITE | + KDB_ENABLE_REG_WRITE | + KDB_ENABLE_FLOW_CTRL; + const int read_flags = KDB_ENABLE_MEM_READ | + KDB_ENABLE_REG_READ; + + bool need_to_lockdown_write = false; + bool need_to_lockdown_read = false; + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) + need_to_lockdown_write = + security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) + need_to_lockdown_read = + security_locked_down(LOCKDOWN_DBG_READ_KERNEL); + + /* De-compose KDB_ENABLE_ALL if required */ + if (need_to_lockdown_write || need_to_lockdown_read) + if (kdb_cmd_enabled & KDB_ENABLE_ALL) + kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; + + if (need_to_lockdown_write) + kdb_cmd_enabled &= ~write_flags; + + if (need_to_lockdown_read) + kdb_cmd_enabled &= ~read_flags; +} + +/* + * Check whether the flags of the current command, the permissions of the kdb + * console and the lockdown state allow a command to be run. */ -static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, +static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, bool no_args) { /* permissions comes from userspace so needs massaging slightly */ @@ -1194,6 +1247,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); + + kdb_check_for_lockdown(); + kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ diff --git a/security/security.c b/security/security.c index a864ff824dd3..0373a925e2c5 100644 --- a/security/security.c +++ b/security/security.c @@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", + [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", + [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", -- Gitee From 475f416552bd3b3b07c4a612496d454632b3985d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 3 Jun 2022 13:17:04 +0100 Subject: [PATCH 3/6] io_uring: don't re-import iovecs from callbacks stable inclusion from stable-5.10.120 commit 57d01bcae7041cfb86553091718d12bf36c082aa category: bugfix issue: I5AFE6 CVE: CVE-2022-1508 Signed-off-by: lizongfeng --------------------------------------- io_uring: don't re-import iovecs from callbacks We can't re-import or modify iterators from iocb callbacks, it's not safe as it might be reverted and/or reexpanded while unwinding stack. It's also not safe to resubmit as io-wq thread will race with stack undwinding for the iterator and other data. Disallow resubmission from callbacks, it can fail some cases that were handled before, but the possibility of such a failure was a part of the API from the beginning and so it should be fine. Signed-off-by: Pavel Begunkov Signed-off-by: Greg Kroah-Hartman --- fs/io_uring.c | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 69afabd97024..e8b5fab8b636 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2568,45 +2568,6 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res, #ifdef CONFIG_BLOCK static bool io_resubmit_prep(struct io_kiocb *req, int error) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - ssize_t ret = -ECANCELED; - struct iov_iter iter; - int rw; - - if (error) { - ret = error; - goto end_req; - } - - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - rw = READ; - break; - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - case IORING_OP_WRITE: - rw = WRITE; - break; - default: - printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n", - req->opcode); - goto end_req; - } - - if (!req->async_data) { - ret = io_import_iovec(rw, req, &iovec, &iter, false); - if (ret < 0) - goto end_req; - ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); - if (!ret) - return true; - kfree(iovec); - } else { - return true; - } -end_req: req_set_fail_links(req); return false; } -- Gitee From 9132cd60f7d56db2d50563e5da4cec9181ba12aa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 3 Jun 2022 13:17:05 +0100 Subject: [PATCH 4/6] io_uring: fix using under-expanded iters stable inclusion from stable-5.10.120 commit 8adb751d294ed3b668f1c7e41bd7ebe49002a744 category: bugfix issue: I5AFE6 CVE: CVE-2022-1508 Signed-off-by: lizongfeng --------------------------------------- io_uring: fix using under-expanded iters [ upstream commit cd65869512ab5668a5d16f789bc4da1319c435c4 ] The issue was first described and addressed in 89c2b3b7491820 ("io_uring: reexpand under-reexpanded iters"), but shortly after reimplemented as. cd65869512ab56 ("io_uring: use iov_iter state save/restore helpers"). Here we follow the approach from the second patch but without in-callback resubmissions, fixups for not yet supported in 5.10 short read retries and replacing iov_iter_state with iter copies to not pull even more dependencies, and because it's just much simpler. Signed-off-by: Pavel Begunkov Signed-off-by: Greg Kroah-Hartman --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e8b5fab8b636..71fea32caa72 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3369,6 +3369,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t io_size, ret, ret2; bool no_async; @@ -3379,6 +3380,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; ret = 0; @@ -3414,7 +3416,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, if (req->file->f_flags & O_NONBLOCK) goto done; /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = 0; goto copy_iov; } else if (ret < 0) { @@ -3497,6 +3499,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t ret, ret2, io_size; @@ -3506,6 +3509,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; @@ -3567,7 +3571,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, } else { copy_iov: /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; -- Gitee From 7dd9c7527ea154777e768069f56dcc023a0b7f77 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 27 May 2022 09:56:18 +0200 Subject: [PATCH 5/6] netfilter: nf_tables: sanitize nft_set_desc_concat_parse() stable inclusion from stable-5.10.120 commit c0aff1faf66b6b7a19103f83e6a5d0fdc64b9048 category: bugfix issue: I5AFE6 CVE: CVE-2022-1972 Signed-off-by: lizongfeng --------------------------------------- netfilter: nf_tables: sanitize nft_set_desc_concat_parse() commit fecf31ee395b0295f2d7260aa29946b7605f7c85 upstream. Add several sanity checks for nft_set_desc_concat_parse(): - validate desc->field_count not larger than desc->field_len array. - field length cannot be larger than desc->field_len (ie. U8_MAX) - total length of the concatenation cannot be larger than register array. Joint work with Florian Westphal. Fixes: f3a2181e16f1 ("netfilter: nf_tables: Support for sets with multiple ranged fields") Reported-by: Reviewed-by: Stefano Brivio Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_tables_api.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index caad63abfda1..f9bc20e53690 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4051,6 +4051,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, u32 len; int err; + if (desc->field_count >= ARRAY_SIZE(desc->field_len)) + return -E2BIG; + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr, nft_concat_policy, NULL); if (err < 0) @@ -4060,9 +4063,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, return -EINVAL; len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN])); - - if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT) - return -E2BIG; + if (!len || len > U8_MAX) + return -EINVAL; desc->field_len[desc->field_count++] = len; @@ -4073,7 +4075,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *attr; - int rem, err; + u32 num_regs = 0; + int rem, err, i; nla_for_each_nested(attr, nla, rem) { if (nla_type(attr) != NFTA_LIST_ELEM) @@ -4084,6 +4087,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, return err; } + for (i = 0; i < desc->field_count; i++) + num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); + + if (num_regs > NFT_REG32_COUNT) + return -E2BIG; + return 0; } -- Gitee From 71f3032136190ae7637534dc99b1a731d039a222 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 7 Jun 2022 00:34:56 +1000 Subject: [PATCH 6/6] powerpc/32: Fix overread/overwrite of thread_struct via ptrace stable inclusion from stable-5.10.122 commit 3be74fc0afbeadc2aff8dc69f3bf9716fbe66486 category: bugfix issue: I5AFE6 CVE: CVE-2022-32981 Signed-off-by: lizongfeng --------------------------------------- powerpc/32: Fix overread/overwrite of thread_struct via ptrace commit 8e1278444446fc97778a5e5c99bca1ce0bbc5ec9 upstream. The ptrace PEEKUSR/POKEUSR (aka PEEKUSER/POKEUSER) API allows a process to read/write registers of another process. To get/set a register, the API takes an index into an imaginary address space called the "USER area", where the registers of the process are laid out in some fashion. The kernel then maps that index to a particular register in its own data structures and gets/sets the value. The API only allows a single machine-word to be read/written at a time. So 4 bytes on 32-bit kernels and 8 bytes on 64-bit kernels. The way floating point registers (FPRs) are addressed is somewhat complicated, because double precision float values are 64-bit even on 32-bit CPUs. That means on 32-bit kernels each FPR occupies two word-sized locations in the USER area. On 64-bit kernels each FPR occupies one word-sized location in the USER area. Internally the kernel stores the FPRs in an array of u64s, or if VSX is enabled, an array of pairs of u64s where one half of each pair stores the FPR. Which half of the pair stores the FPR depends on the kernel's endianness. To handle the different layouts of the FPRs depending on VSX/no-VSX and big/little endian, the TS_FPR() macro was introduced. Unfortunately the TS_FPR() macro does not take into account the fact that the addressing of each FPR differs between 32-bit and 64-bit kernels. It just takes the index into the "USER area" passed from userspace and indexes into the fp_state.fpr array. On 32-bit there are 64 indexes that address FPRs, but only 32 entries in the fp_state.fpr array, meaning the user can read/write 256 bytes past the end of the array. Because the fp_state sits in the middle of the thread_struct there are various fields than can be overwritten, including some pointers. As such it may be exploitable. It has also been observed to cause systems to hang or otherwise misbehave when using gdbserver, and is probably the root cause of this report which could not be easily reproduced: https://lore.kernel.org/linuxppc-dev/dc38afe9-6b78-f3f5-666b-986939e40fc6@keymile.com/ Rather than trying to make the TS_FPR() macro even more complicated to fix the bug, or add more macros, instead add a special-case for 32-bit kernels. This is more obvious and hopefully avoids a similar bug happening again in future. Note that because 32-bit kernels never have VSX enabled the code doesn't need to consider TS_FPRWIDTH/OFFSET at all. Add a BUILD_BUG_ON() to ensure that 32-bit && VSX is never enabled. Fixes: 87fec0514f61 ("powerpc: PTRACE_PEEKUSR/PTRACE_POKEUSER of FPR registers in little endian builds") Cc: stable@vger.kernel.org # v3.13+ Reported-by: Ariel Miculas Tested-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220609133245.573565-1-mpe@ellerman.id.au Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/ptrace/ptrace.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index f6e51be47c6e..9ea9ee513ae1 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -75,8 +75,13 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.TS_FPR(fpidx), - sizeof(long)); + if (IS_ENABLED(CONFIG_PPC32)) { + // On 32-bit the index we are passed refers to 32-bit words + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + } else { + memcpy(&tmp, &child->thread.TS_FPR(fpidx), + sizeof(long)); + } else tmp = child->thread.fp_state.fpscr; } @@ -108,8 +113,13 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, - sizeof(long)); + if (IS_ENABLED(CONFIG_PPC32)) { + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + } else { + memcpy(&child->thread.TS_FPR(fpidx), &data, + sizeof(long)); + } else child->thread.fp_state.fpscr = data; ret = 0; @@ -478,4 +488,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } -- Gitee