dnode: remove dn_dirtyctx and dnode_dirtycontext

Only used for a couple of debug assertions which had very little value.

Setting it required taking certain locks, so we can remove all that too.

Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Robert Evans <evansr@google.com>
Reviewed-by: Adam Moss <c@yotes.com>
Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #16297
Closes #17652
Closes #17658
This commit is contained in:
Rob Norris 2025-08-19 16:18:20 +10:00 committed by Brian Behlendorf
parent aa6f0f878b
commit 574eec2964
4 changed files with 0 additions and 59 deletions

View File

@ -172,7 +172,6 @@ extern "C" {
* dd_assigned_tx
* dn_notxholds
* dn_nodnholds
* dn_dirtyctx
* (dn_phys copy fields?)
* (dn_phys contents?)
* held from:

View File

@ -141,12 +141,6 @@ struct dmu_buf_impl;
struct objset;
struct zio;
enum dnode_dirtycontext {
DN_UNDIRTIED,
DN_DIRTY_OPEN,
DN_DIRTY_SYNC
};
/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
#define DNODE_FLAG_USED_BYTES (1 << 0)
#define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1)
@ -343,7 +337,6 @@ struct dnode {
uint8_t dn_dirtycnt;
kcondvar_t dn_notxholds;
kcondvar_t dn_nodnholds;
enum dnode_dirtycontext dn_dirtyctx;
/* protected by own devices */
zfs_refcount_t dn_tx_holds;
@ -439,7 +432,6 @@ void dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting);
int dnode_try_claim(objset_t *os, uint64_t object, int slots);
boolean_t dnode_is_dirty(dnode_t *dn);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);

View File

@ -2270,14 +2270,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
@ -2289,10 +2281,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
@ -2311,13 +2299,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*

View File

@ -173,7 +173,6 @@ dnode_cons(void *arg, void *unused, int kmflag)
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtycnt = 0;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
@ -228,7 +227,6 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT0(dn->dn_dirtycnt);
ASSERT0P(dn->dn_bonus);
ASSERT(!dn->dn_have_spill);
@ -692,7 +690,6 @@ dnode_destroy(dnode_t *dn)
dn->dn_assigned_txg = 0;
dn->dn_dirtycnt = 0;
dn->dn_dirtyctx = 0;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
@ -797,7 +794,6 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtycnt = 0;
@ -951,7 +947,6 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtycnt = odn->dn_dirtycnt;
ASSERT0(zfs_refcount_count(&odn->dn_tx_holds));
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
@ -1015,7 +1010,6 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtycnt = 0;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
@ -2214,31 +2208,6 @@ dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)