From c5a388a1ef071b308ab0985fe831f94639fd4d7c Mon Sep 17 00:00:00 2001 From: youzhongyang Date: Fri, 21 Oct 2022 13:05:13 -0400 Subject: [PATCH] Add delay between zpool add zvol and zpool destroy As investigated by #14026, the zpool_add_004_pos can reliably hang if the timing is not right. This is caused by a race condition between zed doing zpool reopen (due to the zvol being added to the zpool), and the command zpool destroy. This change adds a delay between zpool add zvol and zpool destroy to avoid these issue, but does not address the underlying problem. Reviewed-by: Brian Behlendorf Signed-off-by: Youzhong Yang Issue #14026 Closes #14052 --- .../tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh index fbaed2af1..646edc1a4 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh @@ -74,4 +74,8 @@ log_must zpool add $TESTPOOL $ZVOL_DEVDIR/$TESTPOOL1/$TESTVOL log_must vdevs_in_pool "$TESTPOOL" "$ZVOL_DEVDIR/$TESTPOOL1/$TESTVOL" +# Give zed a chance to finish processing the event, otherwise +# a race condition can lead to stuck "zpool destroy $TESTPOOL" +sleep 1 + log_pass "'zpool add ...' adds zfs volume to the pool successfully"