From 4aa03e4a8fd06e7b4d13337c54f196c27cd8104b Mon Sep 17 00:00:00 2001 From: wangyk <1093863723@qq.com> Date: Sat, 25 May 2024 20:39:20 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E5=9C=A8=E5=A4=A7?= =?UTF-8?q?=E5=AE=B9=E9=87=8F=E7=8E=AF=E5=A2=83=E4=B8=8B=E8=84=8F=E9=A1=B5?= =?UTF-8?q?=E9=98=9F=E5=88=97=E6=BB=A1=20lwlock=E9=94=81=E6=97=A0=E6=B3=95?= =?UTF-8?q?=E9=87=8A=E6=94=BE=E5=AF=BC=E8=87=B4=E7=9A=84=E5=8D=A1=E6=AD=BB?= =?UTF-8?q?=E7=8E=B0=E8=B1=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/buffer/bufmgr.cpp | 66 +++++++++++------------ 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 5e96bec661..107c308185 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -4142,43 +4142,39 @@ bool SyncFlushOneBuffer(int buf_id, bool get_condition_lock) */ PinBuffer_Locked(buf_desc); - if ((dw_enabled() || ENABLE_DSS) && get_condition_lock) { - /* - * We must use a conditional lock acquisition here to avoid deadlock. If - * page_writer and double_write are enabled, only page_writer is allowed to - * flush the buffers. So the backends (BufferAlloc, FlushRelationBuffers, - * FlushDatabaseBuffers) are not allowed to flush the buffers, instead they - * will just wait for page_writer to flush the required buffer. In some cases - * (for example, btree split, heap_multi_insert), BufferAlloc will be called - * with holding exclusive lock on another buffer. So if we try to acquire - * the shared lock directly here (page_writer), it will block unconditionally - * and the backends will be blocked on the page_writer to flush the buffer, - * resulting in deadlock. - */ - int retry_times = 0; - int i = 0; - Buffer queue_head_buffer = get_dirty_page_queue_head_buffer(); - if (!BufferIsInvalid(queue_head_buffer) && (queue_head_buffer - 1 == buf_id)) { - retry_times = CONDITION_LOCK_RETRY_TIMES; - } - if (ENABLE_DMS) { - /* to speed the rate of flushing dirty page to disk */ - retry_times = CONDITION_LOCK_RETRY_TIMES; - } - for (;;) { - if (!LWLockConditionalAcquire(buf_desc->content_lock, LW_SHARED)) { - i++; - if (i >= retry_times) { - UnpinBuffer(buf_desc, true); - return false; - } - (void)sched_yield(); - continue; + /* + * We must use a conditional lock acquisition here to avoid deadlock. If + * page_writer and double_write are enabled, only page_writer is allowed to + * flush the buffers. So the backends (BufferAlloc, FlushRelationBuffers, + * FlushDatabaseBuffers) are not allowed to flush the buffers, instead they + * will just wait for page_writer to flush the required buffer. In some cases + * (for example, btree split, heap_multi_insert), BufferAlloc will be called + * with holding exclusive lock on another buffer. So if we try to acquire + * the shared lock directly here (page_writer), it will block unconditionally + * and the backends will be blocked on the page_writer to flush the buffer, + * resulting in deadlock. + */ + int retry_times = 0; + int i = 0; + Buffer queue_head_buffer = get_dirty_page_queue_head_buffer(); + if (!BufferIsInvalid(queue_head_buffer) && (queue_head_buffer - 1 == buf_id)) { + retry_times = CONDITION_LOCK_RETRY_TIMES; + } + if (ENABLE_DMS) { + /* to speed the rate of flushing dirty page to disk */ + retry_times = CONDITION_LOCK_RETRY_TIMES; + } + for (;;) { + if (!LWLockConditionalAcquire(buf_desc->content_lock, LW_SHARED)) { + i++; + if (i >= retry_times) { + UnpinBuffer(buf_desc, true); + return false; } - break; + (void)sched_yield(); + continue; } - } else { - (void)LWLockAcquire(buf_desc->content_lock, LW_SHARED); + break; } if (ENABLE_DMS && buf_desc->extra->aio_in_progress) { -- Gitee From 4c0d224783c6b6bb3bbe6024282bbb228c322c63 Mon Sep 17 00:00:00 2001 From: wangyk <1093863723@qq.com> Date: Fri, 31 May 2024 13:01:20 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E4=B8=8Emot=E5=AD=98?= =?UTF-8?q?=E5=82=A8=E5=BC=95=E6=93=8E=E5=86=B2=E7=AA=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/gausskernel/storage/buffer/bufmgr.cpp | 75 +++++++++++++---------- 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 107c308185..3f959e22ee 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -4141,41 +4141,54 @@ bool SyncFlushOneBuffer(int buf_id, bool get_condition_lock) * buffer is clean by the time we've locked it.) */ PinBuffer_Locked(buf_desc); - + /* - * We must use a conditional lock acquisition here to avoid deadlock. If - * page_writer and double_write are enabled, only page_writer is allowed to - * flush the buffers. So the backends (BufferAlloc, FlushRelationBuffers, - * FlushDatabaseBuffers) are not allowed to flush the buffers, instead they - * will just wait for page_writer to flush the required buffer. In some cases - * (for example, btree split, heap_multi_insert), BufferAlloc will be called - * with holding exclusive lock on another buffer. So if we try to acquire - * the shared lock directly here (page_writer), it will block unconditionally - * and the backends will be blocked on the page_writer to flush the buffer, - * resulting in deadlock. - */ - int retry_times = 0; - int i = 0; - Buffer queue_head_buffer = get_dirty_page_queue_head_buffer(); - if (!BufferIsInvalid(queue_head_buffer) && (queue_head_buffer - 1 == buf_id)) { - retry_times = CONDITION_LOCK_RETRY_TIMES; - } - if (ENABLE_DMS) { - /* to speed the rate of flushing dirty page to disk */ - retry_times = CONDITION_LOCK_RETRY_TIMES; - } - for (;;) { - if (!LWLockConditionalAcquire(buf_desc->content_lock, LW_SHARED)) { - i++; - if (i >= retry_times) { - UnpinBuffer(buf_desc, true); - return false; + * To past the fastcheck_single_mot, we have to save LWLockAcquire. + */ + #ifdef ENABLE_MOT + if ((dw_enabled() || ENABLE_DSS) && get_condition_lock) + #endif + { + /* + * We must use a conditional lock acquisition here to avoid deadlock. If + * page_writer and double_write are enabled, only page_writer is allowed to + * flush the buffers. So the backends (BufferAlloc, FlushRelationBuffers, + * FlushDatabaseBuffers) are not allowed to flush the buffers, instead they + * will just wait for page_writer to flush the required buffer. In some cases + * (for example, btree split, heap_multi_insert), BufferAlloc will be called + * with holding exclusive lock on another buffer. So if we try to acquire + * the shared lock directly here (page_writer), it will block unconditionally + * and the backends will be blocked on the page_writer to flush the buffer, + * resulting in deadlock. + */ + int retry_times = 0; + int i = 0; + Buffer queue_head_buffer = get_dirty_page_queue_head_buffer(); + if (!BufferIsInvalid(queue_head_buffer) && (queue_head_buffer - 1 == buf_id)) { + retry_times = CONDITION_LOCK_RETRY_TIMES; + } + if (ENABLE_DMS) { + /* to speed the rate of flushing dirty page to disk */ + retry_times = CONDITION_LOCK_RETRY_TIMES; + } + for (;;) { + if (!LWLockConditionalAcquire(buf_desc->content_lock, LW_SHARED)) { + i++; + if (i >= retry_times) { + UnpinBuffer(buf_desc, true); + return false; + } + (void)sched_yield(); + continue; } - (void)sched_yield(); - continue; + break; } - break; } + #ifdef ENABLE_MOT + else { + (void)LWLockAcquire(buf_desc->content_lock, LW_SHARED); + } + #endif if (ENABLE_DMS && buf_desc->extra->aio_in_progress) { LWLockRelease(buf_desc->content_lock); -- Gitee