From 09492e0f219fcb601c82e76abac598eaf233c7ee Mon Sep 17 00:00:00 2001 From: Alexander Motin Date: Tue, 9 Dec 2025 12:18:09 -0500 Subject: [PATCH] Reduce dataset buffers re-dirtying For each block written or freed ZFS dirties ds_dbuf of the dataset. While dbuf_dirty() has a fast path for already dirty dbufs, it still require taking the lock and doing some things visible in profiler. Investigation shown ds_dbuf dirtying by dsl_dataset_block_born() and some of dsl_dataset_block_kill() are just not needed, since by the time they are called in sync context the ds_dbuf is already dirtied by dsl_dataset_sync(). Tests show this reducing large file deletion time by ~3% by saving CPU time of single-threaded part of the sync thread. Reviewed-by: Brian Behlendorf Signed-off-by: Alexander Motin Closes #18028 --- module/zfs/dsl_dataset.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 420687480..1da596427 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -161,7 +161,8 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) ASSERT3U(BP_GET_BIRTH(bp), >, dsl_dataset_phys(ds)->ds_prev_snap_txg); - dmu_buf_will_dirty(ds->ds_dbuf, tx); + /* ds_dbuf is pre-dirtied in dsl_dataset_sync(). */ + ASSERT(dmu_buf_is_dirty(ds->ds_dbuf, tx)); mutex_enter(&ds->ds_lock); delta = parent_delta(ds, used); dsl_dataset_phys(ds)->ds_referenced_bytes += used; @@ -274,7 +275,8 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); ASSERT(!ds->ds_is_snapshot); - dmu_buf_will_dirty(ds->ds_dbuf, tx); + /* ds_dbuf is pre-dirtied in dsl_dataset_sync(). */ + ASSERT(dmu_buf_is_dirty(ds->ds_dbuf, tx)); /* * Track block for livelist, but ignore embedded blocks because