dmu_tx_assign: make all VERIFY0 calls use DMU_TX_SUSPEND

This is the cheap way to keep non-user functions working after
break-on-suspend becomes default.

Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Paul Dagnelie <paul.dagnelie@klarasystems.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17355
This commit is contained in:
Rob Norris 2024-07-09 13:59:57 +10:00 committed by Brian Behlendorf
parent 4653e2f7d3
commit 55d035e866
11 changed files with 23 additions and 19 deletions

View File

@ -568,7 +568,7 @@ commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
if (!md->md_synctask_txg[txg & TXG_MASK]) {
dsl_sync_task_nowait(dmu_tx_pool(tx),

View File

@ -1437,7 +1437,7 @@ dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;

View File

@ -58,7 +58,7 @@ dsl_sync_task_common(const char *pool, dsl_checkfunc_t *checkfunc,
top:
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
dst.dst_pool = dp;
dst.dst_txg = dmu_tx_get_txg(tx);

View File

@ -1984,7 +1984,7 @@ static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);

View File

@ -569,7 +569,7 @@ spa_condense_indirect_commit_entry(spa_t *spa,
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
/*

View File

@ -158,7 +158,7 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
vd->vdev_initialize_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
if (new_state != VDEV_INITIALIZE_NONE) {
dsl_sync_task_nowait(spa_get_dsl(spa),
@ -250,7 +250,7 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
mutex_exit(&vd->vdev_initialize_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);

View File

@ -4652,7 +4652,8 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx,
DMU_TX_WAIT | DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
/*

View File

@ -287,7 +287,7 @@ vdev_rebuild_initiate(vdev_t *vd)
ASSERT(!vd->vdev_rebuilding);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
vd->vdev_rebuilding = B_TRUE;
@ -592,7 +592,7 @@ vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
mutex_exit(&vr->vr_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
@ -932,7 +932,7 @@ vdev_rebuild_thread(void *arg)
dsl_pool_t *dp = spa_get_dsl(spa);
dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
mutex_enter(&vd->vdev_rebuild_lock);
if (error == 0) {

View File

@ -1724,7 +1724,8 @@ again:
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT |
DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
/*

View File

@ -342,7 +342,7 @@ vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
vd->vdev_trim_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
guid, tx);
@ -527,7 +527,7 @@ vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
mutex_exit(&vd->vdev_trim_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);

View File

@ -957,7 +957,7 @@ zil_commit_activate_saxattr_feature(zilog_t *zilog)
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
!dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
dsl_dataset_dirty(ds, tx);
txg = dmu_tx_get_txg(tx);
@ -1003,7 +1003,7 @@ zil_create(zilog_t *zilog)
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
@ -1093,7 +1093,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
return (B_FALSE);
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
@ -1977,7 +1977,8 @@ next_lwb:
* Open transaction to allocate the next block pointer.
*/
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
VERIFY0(dmu_tx_assign(tx,
DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
uint64_t txg = dmu_tx_get_txg(tx);
@ -3456,7 +3457,8 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
* should not be subject to the dirty data based delays. We
* use DMU_TX_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
VERIFY0(dmu_tx_assign(tx,
DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;