diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index b0c060aa5..23cc590cc 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -34,7 +34,7 @@ * Copyright (c) 2019, loli10K * Copyright (c) 2021, Colm Buckley * Copyright (c) 2021, 2023, Klara Inc. - * Copyright [2021] Hewlett Packard Enterprise Development LP + * Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP. */ #include @@ -510,16 +510,16 @@ get_usage(zpool_help_t idx) case HELP_REOPEN: return (gettext("\treopen [-n] \n")); case HELP_INITIALIZE: - return (gettext("\tinitialize [-c | -s | -u] [-w] " - "[ ...]\n")); + return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | " + "[ ...]>\n")); case HELP_SCRUB: - return (gettext("\tscrub [-e | -s | -p | -C] [-w] " - " ...\n")); + return (gettext("\tscrub [-e | -s | -p | -C] [-w] <-a | " + " [ ...]>\n")); case HELP_RESILVER: return (gettext("\tresilver ...\n")); case HELP_TRIM: - return (gettext("\ttrim [-dw] [-r ] [-c | -s] " - "[ ...]\n")); + return (gettext("\ttrim [-dw] [-r ] [-c | -s] " + "<-a | [ ...]>\n")); case HELP_STATUS: return (gettext("\tstatus [-DdegiLPpstvx] " "[-c script1[,script2,...]] ...\n" @@ -560,33 +560,6 @@ get_usage(zpool_help_t idx) } } -static void -zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) -{ - uint_t children = 0; - nvlist_t **child; - uint_t i; - - (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, - &child, &children); - - if (children == 0) { - char *path = zpool_vdev_name(g_zfs, zhp, nvroot, - VDEV_NAME_PATH); - - if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && - strcmp(path, VDEV_TYPE_HOLE) != 0) - fnvlist_add_boolean(res, path); - - free(path); - return; - } - - for (i = 0; i < children; i++) { - zpool_collect_leaves(zhp, child[i], res); - } -} - /* * Callback routine that will print out a pool property value. */ @@ -794,22 +767,26 @@ zpool_do_initialize(int argc, char **argv) int c; char *poolname; zpool_handle_t *zhp; - nvlist_t *vdevs; int err = 0; boolean_t wait = B_FALSE; + boolean_t initialize_all = B_FALSE; struct option long_options[] = { {"cancel", no_argument, NULL, 'c'}, {"suspend", no_argument, NULL, 's'}, {"uninit", no_argument, NULL, 'u'}, {"wait", no_argument, NULL, 'w'}, + {"all", no_argument, NULL, 'a'}, {0, 0, 0, 0} }; pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; - while ((c = getopt_long(argc, argv, "csuw", long_options, + while ((c = getopt_long(argc, argv, "acsuw", long_options, NULL)) != -1) { switch (c) { + case 'a': + initialize_all = B_TRUE; + break; case 'c': if (cmd_type != POOL_INITIALIZE_START && cmd_type != POOL_INITIALIZE_CANCEL) { @@ -856,7 +833,18 @@ zpool_do_initialize(int argc, char **argv) argc -= optind; argv += optind; - if (argc < 1) { + initialize_cbdata_t cbdata = { + .wait = wait, + .cmd_type = cmd_type + }; + + if (initialize_all && argc > 0) { + (void) fprintf(stderr, gettext("-a cannot be combined with " + "individual pools or vdevs\n")); + usage(B_FALSE); + } + + if (argc < 1 && !initialize_all) { (void) fprintf(stderr, gettext("missing pool name argument\n")); usage(B_FALSE); return (-1); @@ -868,30 +856,35 @@ zpool_do_initialize(int argc, char **argv) usage(B_FALSE); } - poolname = argv[0]; - zhp = zpool_open(g_zfs, poolname); - if (zhp == NULL) - return (-1); - - vdevs = fnvlist_alloc(); - if (argc == 1) { - /* no individual leaf vdevs specified, so add them all */ - nvlist_t *config = zpool_get_config(zhp, NULL); - nvlist_t *nvroot = fnvlist_lookup_nvlist(config, - ZPOOL_CONFIG_VDEV_TREE); - zpool_collect_leaves(zhp, nvroot, vdevs); + if (argc == 0 && initialize_all) { + /* Initilize each pool recursively */ + err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, zpool_initialize_one, &cbdata); + return (err); + } else if (argc == 1) { + /* no individual leaf vdevs specified, initialize the pool */ + poolname = argv[0]; + zhp = zpool_open(g_zfs, poolname); + if (zhp == NULL) + return (-1); + err = zpool_initialize_one(zhp, &cbdata); } else { + /* individual leaf vdevs specified, initialize them */ + poolname = argv[0]; + zhp = zpool_open(g_zfs, poolname); + if (zhp == NULL) + return (-1); + nvlist_t *vdevs = fnvlist_alloc(); for (int i = 1; i < argc; i++) { fnvlist_add_boolean(vdevs, argv[i]); } + if (wait) + err = zpool_initialize_wait(zhp, cmd_type, vdevs); + else + err = zpool_initialize(zhp, cmd_type, vdevs); + fnvlist_free(vdevs); } - if (wait) - err = zpool_initialize_wait(zhp, cmd_type, vdevs); - else - err = zpool_initialize(zhp, cmd_type, vdevs); - - fnvlist_free(vdevs); zpool_close(zhp); return (err); @@ -8452,10 +8445,14 @@ zpool_do_scrub(int argc, char **argv) boolean_t is_pause = B_FALSE; boolean_t is_stop = B_FALSE; boolean_t is_txg_continue = B_FALSE; + boolean_t scrub_all = B_FALSE; /* check options */ - while ((c = getopt(argc, argv, "spweC")) != -1) { + while ((c = getopt(argc, argv, "aspweC")) != -1) { switch (c) { + case 'a': + scrub_all = B_TRUE; + break; case 'e': is_error_scrub = B_TRUE; break; @@ -8519,7 +8516,7 @@ zpool_do_scrub(int argc, char **argv) argc -= optind; argv += optind; - if (argc < 1) { + if (argc < 1 && !scrub_all) { (void) fprintf(stderr, gettext("missing pool name argument\n")); usage(B_FALSE); } @@ -8591,6 +8588,7 @@ zpool_do_trim(int argc, char **argv) {"rate", required_argument, NULL, 'r'}, {"suspend", no_argument, NULL, 's'}, {"wait", no_argument, NULL, 'w'}, + {"all", no_argument, NULL, 'a'}, {0, 0, 0, 0} }; @@ -8598,11 +8596,16 @@ zpool_do_trim(int argc, char **argv) uint64_t rate = 0; boolean_t secure = B_FALSE; boolean_t wait = B_FALSE; + boolean_t trimall = B_FALSE; + int error; int c; - while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) + while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL)) != -1) { switch (c) { + case 'a': + trimall = B_TRUE; + break; case 'c': if (cmd_type != POOL_TRIM_START && cmd_type != POOL_TRIM_CANCEL) { @@ -8661,7 +8664,18 @@ zpool_do_trim(int argc, char **argv) argc -= optind; argv += optind; - if (argc < 1) { + trimflags_t trim_flags = { + .secure = secure, + .rate = rate, + .wait = wait, + }; + + trim_cbdata_t cbdata = { + .trim_flags = trim_flags, + .cmd_type = cmd_type + }; + + if (argc < 1 && !trimall) { (void) fprintf(stderr, gettext("missing pool name argument\n")); usage(B_FALSE); return (-1); @@ -8669,41 +8683,46 @@ zpool_do_trim(int argc, char **argv) if (wait && (cmd_type != POOL_TRIM_START)) { (void) fprintf(stderr, gettext("-w cannot be used with -c or " - "-s\n")); + "-s options\n")); usage(B_FALSE); } - char *poolname = argv[0]; - zpool_handle_t *zhp = zpool_open(g_zfs, poolname); - if (zhp == NULL) - return (-1); + if (trimall && argc > 0) { + (void) fprintf(stderr, gettext("-a cannot be combined with " + "individual zpools or vdevs\n")); + usage(B_FALSE); + } - trimflags_t trim_flags = { - .secure = secure, - .rate = rate, - .wait = wait, - }; - - nvlist_t *vdevs = fnvlist_alloc(); - if (argc == 1) { + if (argc == 0 && trimall) { + cbdata.trim_flags.fullpool = B_TRUE; + /* Trim each pool recursively */ + error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, + B_FALSE, zpool_trim_one, &cbdata); + } else if (argc == 1) { + char *poolname = argv[0]; + zpool_handle_t *zhp = zpool_open(g_zfs, poolname); + if (zhp == NULL) + return (-1); /* no individual leaf vdevs specified, so add them all */ - nvlist_t *config = zpool_get_config(zhp, NULL); - nvlist_t *nvroot = fnvlist_lookup_nvlist(config, - ZPOOL_CONFIG_VDEV_TREE); - zpool_collect_leaves(zhp, nvroot, vdevs); - trim_flags.fullpool = B_TRUE; + error = zpool_trim_one(zhp, &cbdata); + zpool_close(zhp); } else { - trim_flags.fullpool = B_FALSE; + char *poolname = argv[0]; + zpool_handle_t *zhp = zpool_open(g_zfs, poolname); + if (zhp == NULL) + return (-1); + /* leaf vdevs specified, trim only those */ + cbdata.trim_flags.fullpool = B_FALSE; + nvlist_t *vdevs = fnvlist_alloc(); for (int i = 1; i < argc; i++) { fnvlist_add_boolean(vdevs, argv[i]); } + error = zpool_trim(zhp, cbdata.cmd_type, vdevs, + &cbdata.trim_flags); + fnvlist_free(vdevs); + zpool_close(zhp); } - int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); - - fnvlist_free(vdevs); - zpool_close(zhp); - return (error); } diff --git a/include/libzfs.h b/include/libzfs.h index 485af7938..187d7b449 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -30,6 +30,7 @@ * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. * Copyright (c) 2019 Datto Inc. * Copyright (c) 2021, Colm Buckley + * Copyright (c) 2025 Hewlett Packard Enterprise Development LP. */ #ifndef _LIBZFS_H @@ -288,10 +289,20 @@ typedef struct trimflags { uint64_t rate; } trimflags_t; +typedef struct trim_cbdata { + trimflags_t trim_flags; + pool_trim_func_t cmd_type; +} trim_cbdata_t; + +typedef struct initialize_cbdata { + boolean_t wait; + pool_initialize_func_t cmd_type; +} initialize_cbdata_t; /* * Functions to manipulate pool and vdev state */ _LIBZFS_H int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t); +_LIBZFS_H int zpool_initialize_one(zpool_handle_t *, void *); _LIBZFS_H int zpool_initialize(zpool_handle_t *, pool_initialize_func_t, nvlist_t *); _LIBZFS_H int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t, @@ -304,7 +315,9 @@ _LIBZFS_H int zpool_reguid(zpool_handle_t *); _LIBZFS_H int zpool_set_guid(zpool_handle_t *, const uint64_t *); _LIBZFS_H int zpool_reopen_one(zpool_handle_t *, void *); +_LIBZFS_H void zpool_collect_leaves(zpool_handle_t *, nvlist_t *, nvlist_t *); _LIBZFS_H int zpool_sync_one(zpool_handle_t *, void *); +_LIBZFS_H int zpool_trim_one(zpool_handle_t *, void *); _LIBZFS_H int zpool_ddt_prune(zpool_handle_t *, zpool_ddt_prune_unit_t, uint64_t); diff --git a/lib/libuutil/libuutil.abi b/lib/libuutil/libuutil.abi index 744b53127..6c736c61e 100644 --- a/lib/libuutil/libuutil.abi +++ b/lib/libuutil/libuutil.abi @@ -1475,6 +1475,11 @@ + + + + + @@ -1486,11 +1491,6 @@ - - - - - diff --git a/lib/libzfs/libzfs.abi b/lib/libzfs/libzfs.abi index 06e74387f..0c3e8106c 100644 --- a/lib/libzfs/libzfs.abi +++ b/lib/libzfs/libzfs.abi @@ -483,6 +483,7 @@ + @@ -532,6 +533,7 @@ + @@ -581,6 +583,7 @@ + @@ -1655,6 +1658,11 @@ + + + + + @@ -1666,11 +1674,6 @@ - - - - - @@ -6901,6 +6904,11 @@ + + + + + @@ -6913,6 +6921,17 @@ + + + + + + + + + + + diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index c19e51f0f..b6fb153c4 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -31,6 +31,7 @@ * Copyright (c) 2018, loli10K * Copyright (c) 2021, Colm Buckley * Copyright (c) 2021, 2023, Klara Inc. + * Copyright (c) 2025 Hewlett Packard Enterprise Development LP. */ #include @@ -2436,6 +2437,30 @@ xlate_init_err(int err) return (err); } +int +zpool_initialize_one(zpool_handle_t *zhp, void *data) +{ + int error; + libzfs_handle_t *hdl = zpool_get_handle(zhp); + const char *pool_name = zpool_get_name(zhp); + if (zpool_open_silent(hdl, pool_name, &zhp) != 0) + return (-1); + initialize_cbdata_t *cb = data; + nvlist_t *vdevs = fnvlist_alloc(); + + nvlist_t *config = zpool_get_config(zhp, NULL); + nvlist_t *nvroot = fnvlist_lookup_nvlist(config, + ZPOOL_CONFIG_VDEV_TREE); + zpool_collect_leaves(zhp, nvroot, vdevs); + if (cb->wait) + error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs); + else + error = zpool_initialize(zhp, cb->cmd_type, vdevs); + fnvlist_free(vdevs); + + return (error); +} + /* * Begin, suspend, cancel, or uninit (clear) the initialization (initializing * of all free blocks) for the given vdevs in the given pool. @@ -2556,6 +2581,58 @@ xlate_trim_err(int err) return (err); } +void +zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) +{ + libzfs_handle_t *hdl = zhp->zpool_hdl; + uint_t children = 0; + nvlist_t **child; + uint_t i; + + (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children); + + if (children == 0) { + char *path = zpool_vdev_name(hdl, zhp, nvroot, + VDEV_NAME_PATH); + + if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && + strcmp(path, VDEV_TYPE_HOLE) != 0) + fnvlist_add_boolean(res, path); + + free(path); + return; + } + + for (i = 0; i < children; i++) { + zpool_collect_leaves(zhp, child[i], res); + } +} + +int +zpool_trim_one(zpool_handle_t *zhp, void *data) +{ + int error; + libzfs_handle_t *hdl = zpool_get_handle(zhp); + const char *pool_name = zpool_get_name(zhp); + if (zpool_open_silent(hdl, pool_name, &zhp) != 0) + return (-1); + + trim_cbdata_t *cb = data; + nvlist_t *vdevs = fnvlist_alloc(); + + /* no individual leaf vdevs specified, so add them all */ + nvlist_t *config = zpool_get_config(zhp, NULL); + nvlist_t *nvroot = fnvlist_lookup_nvlist(config, + ZPOOL_CONFIG_VDEV_TREE); + + zpool_collect_leaves(zhp, nvroot, vdevs); + error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags); + fnvlist_free(vdevs); + + return (error); +} + static int zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids) { diff --git a/lib/libzfs_core/libzfs_core.abi b/lib/libzfs_core/libzfs_core.abi index 2af208948..7464b3adb 100644 --- a/lib/libzfs_core/libzfs_core.abi +++ b/lib/libzfs_core/libzfs_core.abi @@ -1426,6 +1426,11 @@ + + + + + @@ -1437,11 +1442,6 @@ - - - - - diff --git a/man/man8/zpool-initialize.8 b/man/man8/zpool-initialize.8 index d7c9d22ab..39579a580 100644 --- a/man/man8/zpool-initialize.8 +++ b/man/man8/zpool-initialize.8 @@ -26,6 +26,7 @@ .\" Copyright (c) 2018 George Melikov. All Rights Reserved. .\" Copyright 2017 Nexenta Systems, Inc. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. +.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP. .\" .Dd May 27, 2021 .Dt ZPOOL-INITIALIZE 8 @@ -39,7 +40,7 @@ .Cm initialize .Op Fl c Ns | Ns Fl s | Ns Fl u .Op Fl w -.Ar pool +.Fl a Ns | Ns Ar pool .Oo Ar device Oc Ns … . .Sh DESCRIPTION @@ -48,6 +49,10 @@ devices, or all eligible devices in the pool if no individual devices are specified. Only leaf data or log devices may be initialized. .Bl -tag -width Ds +.It Fl a , -all +Begin, cancel, suspend initializing on +all +pools. .It Fl c , -cancel Cancel initializing on the specified devices, or all eligible devices if none are specified. diff --git a/man/man8/zpool-scrub.8 b/man/man8/zpool-scrub.8 index 21bd6735e..9b4cf132c 100644 --- a/man/man8/zpool-scrub.8 +++ b/man/man8/zpool-scrub.8 @@ -26,6 +26,7 @@ .\" Copyright (c) 2018, 2021 George Melikov. All Rights Reserved. .\" Copyright 2017 Nexenta Systems, Inc. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. +.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP. .\" .Dd November 18, 2024 .Dt ZPOOL-SCRUB 8 @@ -39,7 +40,7 @@ .Cm scrub .Op Ns Fl e | Ns Fl p | Fl s Ns | Fl C Ns .Op Fl w -.Ar pool Ns … +.Fl a Ns | Ns Ar pool Ns … . .Sh DESCRIPTION Begins a scrub or resumes a paused scrub. @@ -89,6 +90,12 @@ During this period, no completion time estimate will be provided. . .Sh OPTIONS .Bl -tag -width "-s" +.It Fl a , -all +Begin, pause, stop scrub on +all +pools. +Initiating scrubs on multiple pools can put considerable load and memory +pressure on the system, so this operation should be performed with caution. .It Fl s Stop scrubbing. .It Fl p diff --git a/man/man8/zpool-trim.8 b/man/man8/zpool-trim.8 index 06cbd5abf..18723e1be 100644 --- a/man/man8/zpool-trim.8 +++ b/man/man8/zpool-trim.8 @@ -26,6 +26,7 @@ .\" Copyright (c) 2018 George Melikov. All Rights Reserved. .\" Copyright 2017 Nexenta Systems, Inc. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. +.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP. .\" .Dd May 27, 2021 .Dt ZPOOL-TRIM 8 @@ -40,7 +41,7 @@ .Op Fl dw .Op Fl r Ar rate .Op Fl c Ns | Ns Fl s -.Ar pool +.Fl a Ns | Ns Ar pool .Oo Ar device Ns Oc Ns … . .Sh DESCRIPTION @@ -57,6 +58,10 @@ See the documentation for the .Sy autotrim property above for the types of vdev devices which can be trimmed. .Bl -tag -width Ds +.It Fl a , -all +Perform TRIM operation on +all +pools. .It Fl d , -secure Causes a secure TRIM to be initiated. When performing a secure TRIM, the diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index 3eda5d4d9..7cc7a3cf9 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -497,6 +497,7 @@ tags = ['functional', 'cli_root', 'zpool_labelclear'] tests = ['zpool_initialize_attach_detach_add_remove', 'zpool_initialize_fault_export_import_online', 'zpool_initialize_import_export', + 'zpool_initialize_multiple_pools', 'zpool_initialize_offline_export_import_online', 'zpool_initialize_online_offline', 'zpool_initialize_split', @@ -542,6 +543,7 @@ tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos', 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing', 'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies', + 'zpool_scrub_multiple_pools', 'zpool_error_scrub_001_pos', 'zpool_error_scrub_002_pos', 'zpool_error_scrub_003_pos', 'zpool_error_scrub_004_pos'] tags = ['functional', 'cli_root', 'zpool_scrub'] @@ -574,8 +576,8 @@ tags = ['functional', 'cli_root', 'zpool_sync'] [tests/functional/cli_root/zpool_trim] tests = ['zpool_trim_attach_detach_add_remove', - 'zpool_trim_fault_export_import_online', - 'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg', + 'zpool_trim_fault_export_import_online', 'zpool_trim_import_export', + 'zpool_trim_multiple', 'zpool_trim_multiple_pools', 'zpool_trim_neg', 'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline', 'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg', 'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg', diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 194ae4169..388a41607 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -1176,6 +1176,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \ functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \ functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \ + functional/cli_root/zpool_initialize/zpool_initialize_multiple_pools.ksh \ functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \ functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \ functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \ @@ -1239,6 +1240,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \ functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \ functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \ + functional/cli_root/zpool_scrub/zpool_scrub_multiple_pools.ksh \ functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \ functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \ functional/cli_root/zpool_scrub/zpool_scrub_txg_continue_from_last.ksh \ @@ -1291,6 +1293,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \ functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \ functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \ + functional/cli_root/zpool_trim/zpool_trim_multiple_pools.ksh \ functional/cli_root/zpool_trim/zpool_trim_neg.ksh \ functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \ functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_multiple_pools.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_multiple_pools.ksh new file mode 100755 index 000000000..cc7bca544 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_multiple_pools.ksh @@ -0,0 +1,131 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025 Hewlett Packard Enterprise Development LP. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/cli_root/zpool_initialize/zpool_initialize.kshlib + +# +# DESCRIPTION: +# Verify 'zpool initialize -a' works correctly with multiple pools +# +# STRATEGY: +# 1. Create multiple pools. +# 2. Start a initialize operation on all pools using 'zpool initialize -a'. +# 3. Verify that the initializing is active on all pools. +# 4. Wait for the initialize operation to complete. +# 5. Verify that the initialize operation is complete on all pools. +# 6. Start a initializing on all pools using 'zpool initialize -w -a'. +# 7. Verify that the initialize operation is complete on all pools. +# 8. Now test the -u, -c and -s options on multiple pools with -a. +# 9. Verify that the initialize status is correctly updated on all pools. +# + +verify_runnable "global" + +cleanup() { + for pool in {1..4}; do + zpool destroy $TESTPOOL${pool} + rm -rf $TESTDIR${pool} + done + rm -f $DISK1 $DISK2 $DISK3 $DISK4 +} + +log_onexit cleanup + +log_assert "Verify if 'zpool initialize -a' works correctly with multiple pools." + +DEVSIZE='5G' +TESTDIR="$TEST_BASE_DIR/zpool_initialize_multiple_pools" +DISK1="$TEST_BASE_DIR/zpool_disk1.dat" +DISK2="$TEST_BASE_DIR/zpool_disk2.dat" +DISK3="$TEST_BASE_DIR/zpool_disk3.dat" +DISK4="$TEST_BASE_DIR/zpool_disk4.dat" + +truncate -s $DEVSIZE $DISK1 +truncate -s $DEVSIZE $DISK2 +truncate -s $DEVSIZE $DISK3 +truncate -s $DEVSIZE $DISK4 + +for pool in {1..4}; do + DISK[$pool]="$TEST_BASE_DIR/zpool_disk${pool}.dat" + truncate -s $DEVSIZE ${DISK[$pool]} + log_must zpool create $TESTPOOL${pool} ${DISK[$pool]} +done +sync_all_pools + +# Start an initialize operation on all pools using 'zpool initialize -a'. +log_must zpool initialize -a + +# Verify that the initializing is active on all pools. +for pool in {1..4}; do + if [[ -z "$(initialize_progress $TESTPOOL${pool} ${DISK[$pool]})" ]]; then + log_fail "Initializing did not start on pool $TESTPOOL${pool}" + fi +done + +# Wait for the initialize operation to complete on all pools. +for pool in {1..4}; do + log_must zpool wait -t initialize $TESTPOOL${pool} +done + +# Verify that the initialize operation is complete on all pools. +complete_count=$(zpool status -i | grep -c "completed") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have initialize status 'completed', but found ${complete_count}." +fi + +# Start an initialize operation on all pools using 'zpool initialize -w -a'. +log_must zpool initialize -w -a + +# Verify that the initialize operation is complete on all pools. +complete_count=$(zpool status -i | grep -c "completed") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have initialize status 'completed', but found ${complete_count}." +fi + +# Now test the -u, -c and -s options on multiple pools with -a. +log_must zpool initialize -u -a +complete_count=$(zpool status -i | grep -c "uninitialized") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have initialize status 'uninitialized', but found ${complete_count}." +fi + +log_must zpool initialize -a + +for pool in {1..4}; do + if [[ -z "$(initialize_progress $TESTPOOL${pool} ${DISK[$pool]})" ]]; then + log_fail "Initializing did not start on pool $TESTPOOL${pool}" + fi +done + +log_must zpool initialize -a -s +complete_count=$(zpool status -i | grep -c "suspended") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have initialize status 'suspended', but found ${complete_count}." +fi + +log_must zpool initialize -a -c +for pool in {1..4}; do + [[ -z "$(initialize_progress $TESTPOOL${pool} ${DISK[$pool]})" ]] || \ + log_fail "Initialize did not stop on pool $TESTPOOL${pool}" +done + +log_pass "Initialize '-a' works on multiple pools correctly." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh index 431568053..5ffba8033 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh @@ -28,6 +28,7 @@ # # Copyright (c) 2016 by Delphix. All rights reserved. +# Copyright (c) 2025 Hewlett Packard Enterprise Development LP. # . $STF_SUITE/include/libtest.shlib @@ -46,7 +47,7 @@ verify_runnable "global" set -A args "" "-?" "blah blah" "-%" "--?" "-*" "-=" \ - "-a" "-b" "-c" "-d" "-e" "-f" "-g" "-h" "-i" "-j" "-k" "-l" \ + "-b" "-c" "-d" "-e" "-f" "-g" "-h" "-i" "-j" "-k" "-l" \ "-m" "-n" "-o" "-p" "-q" "-r" "-s" "-t" "-u" "-v" "-w" "-x" "-y" "-z" \ "-A" "-B" "-C" "-D" "-E" "-F" "-G" "-H" "-I" "-J" "-K" "-L" \ "-M" "-N" "-O" "-P" "-Q" "-R" "-S" "-T" "-U" "-V" "-W" "-X" "-W" "-Z" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_multiple_pools.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_multiple_pools.ksh new file mode 100755 index 000000000..b8647e208 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_multiple_pools.ksh @@ -0,0 +1,128 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025 Hewlett Packard Enterprise Development LP. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg + +# +# DESCRIPTION: +# Verify 'zpool scrub -a' works correctly with multiple pools +# +# STRATEGY: +# 1. Create multiple pools. +# 2. Start a scrub on all pools using 'zpool scrub -a'. +# 3. Verify that the scrub is running on all pools. +# 4. Wait for the scrub to complete. +# 5. Verify that the scrub status is complete on all pools. +# 6. Start a scrub on all pools using 'zpool scrub -w -a'. +# 7. Verify that the scrub status is complete on all pools. +# 8. Now test the -p and -s options on multiple pools with -a. +# 9. Verify that the scrub status is correct for each option. +# + +verify_runnable "global" + +cleanup() { + log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 + for pool in {1..4}; do + zpool destroy $TESTPOOL${pool} + rm -rf $TESTDIR${pool} + done + rm -f $DISK1 $DISK2 $DISK3 $DISK4 + # Import the testpool + zpool import -a +} + +log_onexit cleanup + +log_assert "Verify if scrubbing multiple pools works correctly." + +# Export the testpool created by setup and Import them later. +log_must zpool export -a + +DEVSIZE='128m' +FILESIZE='50m' +TESTDIR="$TEST_BASE_DIR/zpool_scrub_multiple_pools" +DISK1="$TEST_BASE_DIR/zpool_disk1.dat" +DISK2="$TEST_BASE_DIR/zpool_disk2.dat" +DISK3="$TEST_BASE_DIR/zpool_disk3.dat" +DISK4="$TEST_BASE_DIR/zpool_disk4.dat" + +truncate -s $DEVSIZE $DISK1 +truncate -s $DEVSIZE $DISK2 +truncate -s $DEVSIZE $DISK3 +truncate -s $DEVSIZE $DISK4 + +for pool in {1..4}; do + DISK[$pool]="$TEST_BASE_DIR/zpool_disk${pool}.dat" + truncate -s $DEVSIZE ${DISK[$pool]} + log_must zpool create -O mountpoint=$TESTDIR${pool} $TESTPOOL${pool} ${DISK[$pool]} + log_must zfs create -o compression=off $TESTPOOL${pool}/testfs${pool} + typeset mntpnt=$(get_prop mountpoint $TESTPOOL${pool}/testfs${pool}) + # Fill some data into the filesystem. + log_must mkfile $FILESIZE $mntpnt/file${pool}.dat +done +sync_all_pools + +# Start a scrub on all pools using 'zpool scrub -a'. +log_must zpool scrub -a +# Wait for the scrub to complete on all pools. +for pool in {1..4}; do + log_must zpool wait -t scrub $TESTPOOL${pool} +done + +# Verify that the scrub status is complete on all pools. +complete_count=$(zpool status -v | grep -c "scrub repaired") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have scrub status 'scrub repaired', but found $complete_count." +fi + +# Start a error scrub on all pools using 'zpool scrub -w -a' +log_must zpool scrub -w -a + +# Verify that the scrub status is complete on all pools. +complete_count=$(zpool status -v | grep -c "scrub repaired") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have scrub status 'scrub repaired', but found $complete_count." +fi + +# Now test the -p and -s options on multiple pools with -a. +log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 + +log_must zpool scrub -a +complete_count=$(zpool status -v | grep -c "scrub in progress since") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have scrub status 'scrub in progress since', but found $complete_count." +fi + +log_must zpool scrub -a -p +complete_count=$(zpool status -v | grep -c "scrub paused since") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have scrub status 'scrub paused since', but found $complete_count." +fi + +log_must zpool scrub -a -s +complete_count=$(zpool status -v | grep -c "scrub canceled") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have scrub status 'scrub canceled', but found $complete_count." +fi + +log_pass "Scrubbing multiple pools works correctly." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_multiple_pools.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_multiple_pools.ksh new file mode 100755 index 000000000..4348eecc6 --- /dev/null +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_multiple_pools.ksh @@ -0,0 +1,123 @@ +#!/bin/ksh -p +# SPDX-License-Identifier: CDDL-1.0 +# +# CDDL HEADER START +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# +# CDDL HEADER END +# + +# +# Copyright (c) 2025 Hewlett Packard Enterprise Development LP. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/cli_root/zpool_trim/zpool_trim.kshlib + +# +# DESCRIPTION: +# Verify 'zpool trim -a' works correctly with multiple pools +# +# STRATEGY: +# 1. Create multiple pools. +# 2. Start a trim on all pools using 'zpool trim -a'. +# 3. Verify that the trim is started on all pools. +# 4. Wait for the trim to complete. +# 5. Verify that the trim is complete on all pools. +# 6. Start a trim on all pools using 'zpool trim -w -a'. +# 7. Verify that the trim is complete on all pools. +# 8. Now test the -c and -s options on multiple pools with -a. +# 9. Verify that the trim status is correct for each option. +# + +verify_runnable "global" + +cleanup() { + for pool in {1..4}; do + zpool destroy $TESTPOOL${pool} + rm -rf $TESTDIR${pool} + done + rm -f $DISK1 $DISK2 $DISK3 $DISK4 +} + +log_onexit cleanup + +log_assert "Verify if trim '-a' works on multiple pools correctly." + +DEVSIZE='5G' +TESTDIR="$TEST_BASE_DIR/zpool_trim_multiple_pools" +DISK1="$TEST_BASE_DIR/zpool_disk1.dat" +DISK2="$TEST_BASE_DIR/zpool_disk2.dat" +DISK3="$TEST_BASE_DIR/zpool_disk3.dat" +DISK4="$TEST_BASE_DIR/zpool_disk4.dat" + +truncate -s $DEVSIZE $DISK1 +truncate -s $DEVSIZE $DISK2 +truncate -s $DEVSIZE $DISK3 +truncate -s $DEVSIZE $DISK4 + +for pool in {1..4}; do + DISK[$pool]="$TEST_BASE_DIR/zpool_disk${pool}.dat" + truncate -s $DEVSIZE ${DISK[$pool]} + log_must zpool create $TESTPOOL${pool} ${DISK[$pool]} +done +sync_all_pools + +# Start a trim on all pools using 'zpool trim -a'. +log_must zpool trim -a + +# Verify that the trim is started on all pools. +for pool in {1..4}; do + [[ -z "$(trim_progress $TESTPOOL${pool} ${DISK[$pool]})" ]] && \ + log_fail "Trim did not start on pool $TESTPOOL${pool}" +done + +# Wait for the trim to complete on all pools. +for pool in {1..4}; do + log_must zpool wait -t trim $TESTPOOL${pool} +done + +# Verify that the trim status is complete on all pools. +complete_count=$(zpool status -t | grep -c "completed") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have trim status 'completed', but found ${complete_count}." +fi + +# Start a trim on all pools using 'zpool trim -w -a' +log_must zpool trim -w -a + +# Verify that the trim status is complete on all pools. +complete_count=$(zpool status -t | grep -c "completed") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have trim status 'completed', but found ${complete_count}." +fi + +# Now test the -s and -c options on multiple pools with -a. +log_must zpool trim -r 1 -a + +for pool in {1..4}; do + [[ -z "$(trim_progress $TESTPOOL${pool} ${DISK[$pool]})" ]] && \ + log_fail "Trim did not start" +done + +log_must zpool trim -a -s +complete_count=$(zpool status -t | grep -c "suspended") +if [[ $complete_count -ne 4 ]]; then + log_fail "Expected 4 pools to have trim status 'suspended', but found $complete_count." +fi + +log_must zpool trim -a -c +for pool in {1..4}; do + [[ -z "$(trim_progress $TESTPOOL${pool} ${DISK[$pool]})" ]] || \ + log_fail "TRIM did not stop on pool $TESTPOOL${pool}" +done + +log_pass "Trim '-a' works on multiple pools correctly."