ztest: Fix false positive of ENOSPC handling

Before running a pass zs_enospc_count is checked to free up some space
by destroying a random dataset. But the space freed may still be not
re-usable during the TXG_DEFER window breaking the next dataset creation
in ztest_generic_run().
    
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Igor Ostapenko <igor.ostapenko@klarasystems.com>
Closes #17506
This commit is contained in:
Igor Ostapenko 2025-07-04 02:00:13 +03:00 committed by Brian Behlendorf
parent f7698f47e8
commit 90d2c4407a

View File

@ -7812,6 +7812,9 @@ ztest_dataset_open(int d)
ztest_dataset_name(name, ztest_opts.zo_pool, d); ztest_dataset_name(name, ztest_opts.zo_pool, d);
if (ztest_opts.zo_verbose >= 6)
(void) printf("Opening %s\n", name);
(void) pthread_rwlock_rdlock(&ztest_name_lock); (void) pthread_rwlock_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name); error = ztest_dataset_create(name);
@ -8307,41 +8310,44 @@ static void
ztest_generic_run(ztest_shared_t *zs, spa_t *spa) ztest_generic_run(ztest_shared_t *zs, spa_t *spa)
{ {
kthread_t **run_threads; kthread_t **run_threads;
int t; int i, ndatasets;
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *), run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL); UMEM_NOFAIL);
/*
* Actual number of datasets to be used.
*/
ndatasets = MIN(ztest_opts.zo_datasets, ztest_opts.zo_threads);
/*
* Prepare the datasets first.
*/
for (i = 0; i < ndatasets; i++)
VERIFY0(ztest_dataset_open(i));
/* /*
* Kick off all the tests that run in parallel. * Kick off all the tests that run in parallel.
*/ */
for (t = 0; t < ztest_opts.zo_threads; t++) { for (i = 0; i < ztest_opts.zo_threads; i++) {
if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) { run_threads[i] = thread_create(NULL, 0, ztest_thread,
umem_free(run_threads, ztest_opts.zo_threads * (void *)(uintptr_t)i, 0, NULL, TS_RUN | TS_JOINABLE,
sizeof (kthread_t *));
return;
}
run_threads[t] = thread_create(NULL, 0, ztest_thread,
(void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
defclsyspri); defclsyspri);
} }
/* /*
* Wait for all of the tests to complete. * Wait for all of the tests to complete.
*/ */
for (t = 0; t < ztest_opts.zo_threads; t++) for (i = 0; i < ztest_opts.zo_threads; i++)
VERIFY0(thread_join(run_threads[t])); VERIFY0(thread_join(run_threads[i]));
/* /*
* Close all datasets. This must be done after all the threads * Close all datasets. This must be done after all the threads
* are joined so we can be sure none of the datasets are in-use * are joined so we can be sure none of the datasets are in-use
* by any of the threads. * by any of the threads.
*/ */
for (t = 0; t < ztest_opts.zo_threads; t++) { for (i = 0; i < ndatasets; i++)
if (t < ztest_opts.zo_datasets) ztest_dataset_close(i);
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0); txg_wait_synced(spa_get_dsl(spa), 0);
@ -8464,6 +8470,7 @@ ztest_run(ztest_shared_t *zs)
int d = ztest_random(ztest_opts.zo_datasets); int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d); ztest_dataset_destroy(d);
txg_wait_synced(spa_get_dsl(spa), 0);
} }
zs->zs_enospc_count = 0; zs->zs_enospc_count = 0;