From a01e9f722d8e187493cda6acf645012793bc95fe Mon Sep 17 00:00:00 2001 From: gubin Date: Wed, 25 Jun 2025 17:18:04 +0800 Subject: [PATCH] block/io: accept NULL qiov in bdrv_pad_request cherry-pick from 3f934817c82c2f1bf1c238f8d1065a3be10a3c9e Some operations, e.g. block-stream, perform reads while discarding the results (only copy-on-read matters). In this case, they will pass NULL as the target QEMUIOVector, which will however trip bdrv_pad_request, since it wants to extend its passed vector. In particular, this is the case for the blk_co_preadv() call in stream_populate(). If there is no qiov, no operation can be done with it, but the bytes and offset still need to be updated, so the subsequent aligned read will actually be aligned and not run into an assertion failure. Originally-by: Stefan Reiter Signed-off-by: Thomas Lamprecht Signed-off-by: Fiona Ebner Message-ID: <20240322095009.346989-2-f.ebner@proxmox.com> Reviewed-by: Kevin Wolf Reviewed-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf Signed-off-by: gubin Signed-off-by: gubin --- block/io.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/block/io.c b/block/io.c index a280a5a4c9..27d6a1a04b 100644 --- a/block/io.c +++ b/block/io.c @@ -1756,22 +1756,29 @@ static int bdrv_pad_request(BlockDriverState *bs, return 0; } - sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, - &sliced_head, &sliced_tail, - &sliced_niov); - - /* Guaranteed by bdrv_check_request32() */ - assert(*bytes <= SIZE_MAX); - ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, - sliced_head, *bytes); - if (ret < 0) { - bdrv_padding_finalize(pad); - return ret; + /* + * For prefetching in stream_populate(), no qiov is passed along, because + * only copy-on-read matters. + */ + if (qiov && *qiov) { + sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes, + &sliced_head, &sliced_tail, + &sliced_niov); + + /* Guaranteed by bdrv_check_request32() */ + assert(*bytes <= SIZE_MAX); + ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov, + sliced_head, *bytes); + if (ret < 0) { + bdrv_padding_finalize(pad); + return ret; + } + *qiov = &pad->local_qiov; + *qiov_offset = 0; } + *bytes += pad->head + pad->tail; *offset -= pad->head; - *qiov = &pad->local_qiov; - *qiov_offset = 0; if (padded) { *padded = true; } -- Gitee