range_tree: Provide more debug details upon unexpected add/remove

Sponsored-by: Klara, Inc.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com>
Signed-off-by: Igor Ostapenko <igor.ostapenko@klarasystems.com>
Closes #17581
This commit is contained in:
Igor Ostapenko 2025-07-31 17:44:42 +03:00 committed by Alexander Motin
parent fc658b9935
commit 95abbc71c3
13 changed files with 227 additions and 87 deletions

View File

@ -619,8 +619,9 @@ livelist_metaslab_validate(spa_t *spa)
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = zfs_range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_allocated = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
0, "livelist_metaslab_validate:mv_allocated");
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
@ -6320,8 +6321,9 @@ zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs));
zfs_range_tree_t *allocs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
zfs_range_tree_t *allocs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
0, "zdb_claim_removing:allocs");
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
@ -8449,8 +8451,9 @@ dump_zpool(spa_t *spa)
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
mos_refd_objs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
0, "dump_zpool:mos_refd_objs");
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {

View File

@ -568,6 +568,8 @@ typedef struct metaslab_unflushed_phys {
uint64_t msp_unflushed_txg;
} metaslab_unflushed_phys_t;
char *metaslab_rt_name(metaslab_group_t *, metaslab_t *, const char *);
#ifdef __cplusplus
}
#endif

View File

@ -49,6 +49,9 @@ typedef enum zfs_range_seg_type {
ZFS_RANGE_SEG_NUM_TYPES,
} zfs_range_seg_type_t;
#define ZFS_RT_NAME(rt) (((rt)->rt_name != NULL) ? (rt)->rt_name : "")
#define ZFS_RT_F_DYN_NAME (1ULL << 0) /* if rt_name must be freed */
/*
* Note: the range_tree may not be accessed concurrently; consumers
* must provide external locking if required.
@ -68,6 +71,9 @@ typedef struct zfs_range_tree {
void *rt_arg;
uint64_t rt_gap; /* allowable inter-segment gap */
uint64_t rt_flags;
const char *rt_name; /* details for debugging */
/*
* The rt_histogram maintains a histogram of ranges. Each bucket,
* rt_histogram[i], contains the number of ranges whose size is:
@ -281,6 +287,9 @@ zfs_range_tree_t *zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
uint64_t gap);
zfs_range_tree_t *zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift);
zfs_range_tree_t *zfs_range_tree_create_flags(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t flags, const char *name);
void zfs_range_tree_destroy(zfs_range_tree_t *rt);
boolean_t zfs_range_tree_contains(zfs_range_tree_t *rt, uint64_t start,
uint64_t size);

View File

@ -651,6 +651,7 @@ uint64_t vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b);
int param_get_raidz_impl(char *buf, zfs_kernel_param_t *kp);
#endif
int param_set_raidz_impl(ZFS_MODULE_PARAM_ARGS);
char *vdev_rt_name(vdev_t *vd, const char *name);
/*
* Vdev ashift optimization tunables

View File

@ -86,6 +86,19 @@ int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static char *
rt_name(dnode_t *dn, const char *name)
{
struct objset *os = dn->dn_objset;
return (kmem_asprintf("{spa=%s objset=%llu obj=%llu %s}",
spa_name(os->os_spa),
(u_longlong_t)(os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET),
(u_longlong_t)dn->dn_object,
name));
}
static int
dbuf_compare(const void *x1, const void *x2)
{
@ -2436,8 +2449,10 @@ done:
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = zfs_range_tree_create(NULL,
ZFS_RANGE_SEG64, NULL, 0, 0);
dn->dn_free_ranges[txgoff] =
zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, rt_name(dn, "dn_free_ranges"));
}
zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
zfs_range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);

View File

@ -370,6 +370,16 @@ static metaslab_stats_t metaslab_stats = {
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
char *
metaslab_rt_name(metaslab_group_t *mg, metaslab_t *ms, const char *name)
{
return (kmem_asprintf("{spa=%s vdev_guid=%llu ms_id=%llu %s}",
spa_name(mg->mg_vd->vdev_spa),
(u_longlong_t)mg->mg_vd->vdev_guid,
(u_longlong_t)ms->ms_id,
name));
}
static kstat_t *metaslab_ksp;
@ -2757,30 +2767,43 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
zfs_range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = zfs_range_tree_create(NULL, type, NULL, start,
shift);
ms->ms_allocatable = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_allocatable"));
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = zfs_range_tree_create(NULL, type,
NULL, start, shift);
ms->ms_allocating[t] = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME,
metaslab_rt_name(mg, ms, "ms_allocating"));
}
ms->ms_freeing = zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freeing = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_freeing"));
ms->ms_freed = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_freed"));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = zfs_range_tree_create(NULL, type, NULL,
start, shift);
ms->ms_defer[t] = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_defer"));
}
ms->ms_checkpointing =
zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_checkpointing = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_checkpointing"));
ms->ms_unflushed_allocs = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_unflushed_allocs"));
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = zfs_range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_unflushed_frees = zfs_range_tree_create_flags(
&metaslab_rt_ops, type, mrap, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_unflushed_frees"));
ms->ms_trim = zfs_range_tree_create(NULL, type, NULL, start, shift);
ms->ms_trim = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME, metaslab_rt_name(mg, ms, "ms_trim"));
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
@ -3754,7 +3777,10 @@ metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = zfs_range_tree_create(NULL, type, NULL, start, shift);
condense_tree = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME,
metaslab_rt_name(msp->ms_group, msp, "condense_tree"));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
zfs_range_tree_walk(msp->ms_defer[t],
@ -3811,8 +3837,10 @@ metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
zfs_range_tree_t *tmp_tree = zfs_range_tree_create(NULL, type, NULL,
start, shift);
zfs_range_tree_t *tmp_tree = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift,
ZFS_RT_F_DYN_NAME,
metaslab_rt_name(msp->ms_group, msp, "tmp_tree"));
zfs_range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);

View File

@ -201,10 +201,10 @@ ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, zfs_range_seg64_t,
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf,
zfs_range_seg_gap_t, zfs_range_tree_seg_gap_compare)
zfs_range_tree_t *
zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
static zfs_range_tree_t *
zfs_range_tree_create_impl(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t gap)
uint64_t gap, uint64_t flags, const char *name)
{
zfs_range_tree_t *rt = kmem_zalloc(sizeof (zfs_range_tree_t), KM_SLEEP);
@ -236,6 +236,8 @@ zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
rt->rt_ops = ops;
rt->rt_gap = gap;
rt->rt_flags = flags;
rt->rt_name = name;
rt->rt_arg = arg;
rt->rt_type = type;
rt->rt_start = start;
@ -247,11 +249,30 @@ zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
return (rt);
}
zfs_range_tree_t *
zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t gap)
{
return (zfs_range_tree_create_impl(ops, type, arg, start, shift, gap,
0, NULL));
}
zfs_range_tree_t *
zfs_range_tree_create(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift)
{
return (zfs_range_tree_create_gap(ops, type, arg, start, shift, 0));
return (zfs_range_tree_create_impl(ops, type, arg, start, shift, 0,
0, NULL));
}
zfs_range_tree_t *
zfs_range_tree_create_flags(const zfs_range_tree_ops_t *ops,
zfs_range_seg_type_t type, void *arg, uint64_t start, uint64_t shift,
uint64_t flags, const char *name)
{
return (zfs_range_tree_create_impl(ops, type, arg, start, shift, 0,
flags, name));
}
void
@ -262,6 +283,9 @@ zfs_range_tree_destroy(zfs_range_tree_t *rt)
if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
if (rt->rt_name != NULL && (rt->rt_flags & ZFS_RT_F_DYN_NAME))
kmem_strfree((char *)(uintptr_t)rt->rt_name);
zfs_btree_destroy(&rt->rt_root);
kmem_free(rt, sizeof (*rt));
}
@ -271,15 +295,17 @@ zfs_range_tree_adjust_fill(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
int64_t delta)
{
if (delta < 0 && delta * -1 >= zfs_rs_get_fill(rs, rt)) {
zfs_panic_recover("zfs: attempting to decrease fill to or "
"below 0; probable double remove in segment [%llx:%llx]",
zfs_panic_recover("zfs: rt=%s: attempting to decrease fill to "
"or below 0; probable double remove in segment [%llx:%llx]",
ZFS_RT_NAME(rt),
(longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)zfs_rs_get_end(rs, rt));
}
if (zfs_rs_get_fill(rs, rt) + delta > zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt)) {
zfs_panic_recover("zfs: attempting to increase fill beyond "
"max; probable double add in segment [%llx:%llx]",
zfs_panic_recover("zfs: rt=%s: attempting to increase fill "
"beyond max; probable double add in segment [%llx:%llx]",
ZFS_RT_NAME(rt),
(longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)zfs_rs_get_end(rs, rt));
}
@ -319,14 +345,17 @@ zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
* the normal code paths.
*/
if (rs != NULL) {
if (gap == 0) {
zfs_panic_recover("zfs: adding existent segment to "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
uint64_t rstart = zfs_rs_get_start(rs, rt);
uint64_t rend = zfs_rs_get_end(rs, rt);
if (gap == 0) {
zfs_panic_recover("zfs: rt=%s: adding segment "
"(offset=%llx size=%llx) overlapping with existing "
"one (offset=%llx size=%llx)",
ZFS_RT_NAME(rt),
(longlong_t)start, (longlong_t)size,
(longlong_t)rstart, (longlong_t)(rend - rstart));
return;
}
if (rstart <= start && rend >= end) {
zfs_range_tree_adjust_fill(rt, rs, fill);
return;
@ -451,6 +480,7 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
zfs_range_seg_t *rs;
zfs_range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size;
uint64_t rstart, rend;
boolean_t left_over, right_over;
VERIFY3U(size, !=, 0);
@ -464,12 +494,15 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
/* Make sure we completely overlap with someone */
if (rs == NULL) {
zfs_panic_recover("zfs: removing nonexistent segment from "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
zfs_panic_recover("zfs: rt=%s: removing nonexistent segment "
"from range tree (offset=%llx size=%llx)",
ZFS_RT_NAME(rt), (longlong_t)start, (longlong_t)size);
return;
}
rstart = zfs_rs_get_start(rs, rt);
rend = zfs_rs_get_end(rs, rt);
/*
* Range trees with gap support must only remove complete segments
* from the tree. This allows us to maintain accurate fill accounting
@ -479,31 +512,36 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
if (rt->rt_gap != 0) {
if (do_fill) {
if (zfs_rs_get_fill(rs, rt) == size) {
start = zfs_rs_get_start(rs, rt);
end = zfs_rs_get_end(rs, rt);
start = rstart;
end = rend;
size = end - start;
} else {
zfs_range_tree_adjust_fill(rt, rs, -size);
return;
}
} else if (zfs_rs_get_start(rs, rt) != start ||
zfs_rs_get_end(rs, rt) != end) {
zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llx size=%llx) of "
} else if (rstart != start || rend != end) {
zfs_panic_recover("zfs: rt=%s: freeing partial segment "
"of gap tree (offset=%llx size=%llx) of "
"(offset=%llx size=%llx)",
ZFS_RT_NAME(rt),
(longlong_t)start, (longlong_t)size,
(longlong_t)zfs_rs_get_start(rs, rt),
(longlong_t)zfs_rs_get_end(rs, rt) -
zfs_rs_get_start(rs, rt));
(longlong_t)rstart, (longlong_t)(rend - rstart));
return;
}
}
VERIFY3U(zfs_rs_get_start(rs, rt), <=, start);
VERIFY3U(zfs_rs_get_end(rs, rt), >=, end);
if (!(rstart <= start && rend >= end)) {
panic("zfs: rt=%s: removing segment "
"(offset=%llx size=%llx) not completely overlapped by "
"existing one (offset=%llx size=%llx)",
ZFS_RT_NAME(rt),
(longlong_t)start, (longlong_t)size,
(longlong_t)rstart, (longlong_t)(rend - rstart));
return;
}
left_over = (zfs_rs_get_start(rs, rt) != start);
right_over = (zfs_rs_get_end(rs, rt) != end);
left_over = (rstart != start);
right_over = (rend != end);
zfs_range_tree_stat_decr(rt, rs);

View File

@ -243,6 +243,25 @@ vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
char *
vdev_rt_name(vdev_t *vd, const char *name)
{
return (kmem_asprintf("{spa=%s vdev_guid=%llu %s}",
spa_name(vd->vdev_spa),
(u_longlong_t)vd->vdev_guid,
name));
}
static char *
vdev_rt_name_dtl(vdev_t *vd, const char *name, vdev_dtl_type_t dtl_type)
{
return (kmem_asprintf("{spa=%s vdev_guid=%llu %s[%d]}",
spa_name(vd->vdev_spa),
(u_longlong_t)vd->vdev_guid,
name,
dtl_type));
}
/*
* Virtual device management.
*/
@ -679,8 +698,9 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = zfs_range_tree_create(NULL,
ZFS_RANGE_SEG64, NULL, 0, 0);
vd->vdev_obsolete_segments = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vdev_obsolete_segments"));
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
@ -734,8 +754,9 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
vd->vdev_dtl[t] = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name_dtl(vd, "vdev_dtl", t));
}
txg_list_create(&vd->vdev_ms_list, spa,
@ -3437,7 +3458,9 @@ vdev_dtl_load(vdev_t *vd)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
rt = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vdev_dtl_load:rt"));
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
@ -3585,7 +3608,8 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
rtsync = zfs_range_tree_create_flags(NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "rtsync"));
mutex_enter(&vd->vdev_dtl_lock);
zfs_range_tree_walk(rt, zfs_range_tree_add, rtsync);

View File

@ -541,8 +541,9 @@ vdev_initialize_thread(void *arg)
abd_t *deadbeef = vdev_initialize_block_alloc();
vd->vdev_initialize_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
vd->vdev_initialize_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vdev_initialize_tree"));
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {

View File

@ -4556,8 +4556,10 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
uint64_t shift, start;
zfs_range_seg_type_t type = metaslab_calculate_range_tree_type(
raidvd, msp, &start, &shift);
zfs_range_tree_t *rt = zfs_range_tree_create(NULL, type, NULL,
start, shift);
zfs_range_tree_t *rt = zfs_range_tree_create_flags(
NULL, type, NULL, start, shift, ZFS_RT_F_DYN_NAME,
metaslab_rt_name(msp->ms_group, msp,
"spa_raidz_expand_thread:rt"));
zfs_range_tree_add(rt, msp->ms_start, msp->ms_size);
zfs_range_tree_walk(msp->ms_allocatable, zfs_range_tree_remove,
rt);

View File

@ -787,8 +787,9 @@ vdev_rebuild_thread(void *arg)
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
vr->vr_top_vdev = vd;
vr->vr_scan_msp = NULL;
vr->vr_scan_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL,
0, 0);
vr->vr_scan_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vr_scan_tree"));
mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL);

View File

@ -364,13 +364,15 @@ spa_vdev_removal_create(vdev_t *vd)
spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
svr->svr_allocd_segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
svr->svr_allocd_segs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "svr_allocd_segs"));
svr->svr_vdev_id = vd->vdev_id;
for (int i = 0; i < TXG_SIZE; i++) {
svr->svr_frees[i] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
svr->svr_frees[i] = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "svr_frees"));
list_create(&svr->svr_new_segments[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
@ -1179,8 +1181,9 @@ spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs,
* relative to the start of the range to be copied (i.e. relative to the
* local variable "start").
*/
zfs_range_tree_t *obsolete_segs = zfs_range_tree_create(NULL,
ZFS_RANGE_SEG64, NULL, 0, 0);
zfs_range_tree_t *obsolete_segs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "obsolete_segs"));
zfs_btree_index_t where;
zfs_range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
@ -1448,8 +1451,9 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
* allocated segments that we are copying. We may also be copying
* free segments (of up to vdev_removal_max_span bytes).
*/
zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
zfs_range_tree_t *segs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "spa_vdev_copy_impl:segs"));
for (;;) {
zfs_range_tree_t *rt = svr->svr_allocd_segs;
zfs_range_seg_t *rs = zfs_range_tree_first(rt);
@ -1610,8 +1614,9 @@ spa_vdev_remove_thread(void *arg)
vca.vca_read_error_bytes = 0;
vca.vca_write_error_bytes = 0;
zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
zfs_range_tree_t *segs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "spa_vdev_remove_thread:segs"));
mutex_enter(&svr->svr_lock);
@ -1894,8 +1899,9 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
vdev_indirect_mapping_max_offset(vim));
}
zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
zfs_range_tree_t *segs = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0, ZFS_RT_F_DYN_NAME,
vdev_rt_name(vd, "spa_vdev_remove_cancel_sync:segs"));
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];

View File

@ -902,7 +902,9 @@ vdev_trim_thread(void *arg)
ta.trim_vdev = vd;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "trim_tree"));
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_flags = 0;
@ -1305,8 +1307,10 @@ vdev_autotrim_thread(void *arg)
* Allocate an empty range tree which is swapped in
* for the existing ms_trim tree while it is processed.
*/
trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
NULL, 0, 0);
trim_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME,
vdev_rt_name(vd, "autotrim_tree"));
zfs_range_tree_swap(&msp->ms_trim, &trim_tree);
ASSERT(zfs_range_tree_is_empty(msp->ms_trim));
@ -1360,8 +1364,10 @@ vdev_autotrim_thread(void *arg)
if (!cvd->vdev_ops->vdev_op_leaf)
continue;
ta->trim_tree = zfs_range_tree_create(NULL,
ZFS_RANGE_SEG64, NULL, 0, 0);
ta->trim_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME,
vdev_rt_name(vd, "autotrim_tree"));
zfs_range_tree_walk(trim_tree,
vdev_trim_range_add, ta);
}
@ -1600,7 +1606,9 @@ vdev_trim_l2arc_thread(void *arg)
vd->vdev_trim_secure = 0;
ta.trim_vdev = vd;
ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "trim_tree"));
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
@ -1735,7 +1743,9 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
ASSERT(!vd->vdev_top->vdev_rz_expanding);
ta.trim_vdev = vd;
ta.trim_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
ta.trim_tree = zfs_range_tree_create_flags(
NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "trim_tree"));
ta.trim_type = TRIM_TYPE_SIMPLE;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;