range_tree: convert remaining range_* defs to zfs_range_*

Signed-off-by: Rob Norris <robn@despairlabs.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Rob Norris <robn@despairlabs.com>
This commit is contained in:
Rob Norris 2025-02-11 08:50:10 +11:00 committed by Tony Hutter
parent d4a5a7e3aa
commit 68473c4fd8
16 changed files with 126 additions and 124 deletions

View File

@ -1646,7 +1646,7 @@ dump_metaslab_stats(metaslab_t *msp)
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
dump_histogram(rt->rt_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
@ -1769,7 +1769,8 @@ dump_metaslab_groups(spa_t *spa, boolean_t show_special)
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
dump_histogram(mg->mg_histogram,
ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
@ -1778,7 +1779,7 @@ dump_metaslab_groups(spa_t *spa, boolean_t show_special)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
dump_histogram(mc->mc_histogram, ZFS_RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void

View File

@ -200,7 +200,7 @@ struct metaslab_class {
uint64_t mc_deferred; /* total deferred frees */
uint64_t mc_space; /* total space (alloc + free) */
uint64_t mc_dspace; /* total deflated space */
uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
uint64_t mc_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
/*
* List of all loaded metaslabs in the class, sorted in order of most
@ -290,7 +290,7 @@ struct metaslab_group {
uint64_t mg_allocations;
uint64_t mg_failed_allocations;
uint64_t mg_fragmentation;
uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
uint64_t mg_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
int mg_ms_disabled;
boolean_t mg_disabled_updating;

View File

@ -37,7 +37,7 @@
extern "C" {
#endif
#define RANGE_TREE_HISTOGRAM_SIZE 64
#define ZFS_RANGE_TREE_HISTOGRAM_SIZE 64
typedef struct zfs_range_tree_ops zfs_range_tree_ops_t;
@ -72,34 +72,34 @@ typedef struct zfs_range_tree {
* rt_histogram[i], contains the number of ranges whose size is:
* 2^i <= size of range in bytes < 2^(i+1)
*/
uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE];
uint64_t rt_histogram[ZFS_RANGE_TREE_HISTOGRAM_SIZE];
} zfs_range_tree_t;
typedef struct range_seg32 {
typedef struct zfs_range_seg32 {
uint32_t rs_start; /* starting offset of this segment */
uint32_t rs_end; /* ending offset (non-inclusive) */
} range_seg32_t;
} zfs_range_seg32_t;
/*
* Extremely large metaslabs, vdev-wide trees, and dnode-wide trees may
* require 64-bit integers for ranges.
*/
typedef struct range_seg64 {
typedef struct zfs_range_seg64 {
uint64_t rs_start; /* starting offset of this segment */
uint64_t rs_end; /* ending offset (non-inclusive) */
} range_seg64_t;
} zfs_range_seg64_t;
typedef struct range_seg_gap {
typedef struct zfs_range_seg_gap {
uint64_t rs_start; /* starting offset of this segment */
uint64_t rs_end; /* ending offset (non-inclusive) */
uint64_t rs_fill; /* actual fill if gap mode is on */
} range_seg_gap_t;
} zfs_range_seg_gap_t;
/*
* This type needs to be the largest of the range segs, since it will be stack
* allocated and then cast the actual type to do tree operations.
*/
typedef range_seg_gap_t range_seg_max_t;
typedef zfs_range_seg_gap_t zfs_range_seg_max_t;
/*
* This is just for clarity of code purposes, so we can make it clear that a
@ -122,11 +122,11 @@ zfs_rs_get_start_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
return (((const range_seg32_t *)rs)->rs_start);
return (((const zfs_range_seg32_t *)rs)->rs_start);
case ZFS_RANGE_SEG64:
return (((const range_seg64_t *)rs)->rs_start);
return (((const zfs_range_seg64_t *)rs)->rs_start);
case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_start);
return (((const zfs_range_seg_gap_t *)rs)->rs_start);
default:
VERIFY(0);
return (0);
@ -139,11 +139,11 @@ zfs_rs_get_end_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
return (((const range_seg32_t *)rs)->rs_end);
return (((const zfs_range_seg32_t *)rs)->rs_end);
case ZFS_RANGE_SEG64:
return (((const range_seg64_t *)rs)->rs_end);
return (((const zfs_range_seg64_t *)rs)->rs_end);
case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_end);
return (((const zfs_range_seg_gap_t *)rs)->rs_end);
default:
VERIFY(0);
return (0);
@ -156,15 +156,15 @@ zfs_rs_get_fill_raw(const zfs_range_seg_t *rs, const zfs_range_tree_t *rt)
ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
case ZFS_RANGE_SEG32: {
const range_seg32_t *r32 = (const range_seg32_t *)rs;
const zfs_range_seg32_t *r32 = (const zfs_range_seg32_t *)rs;
return (r32->rs_end - r32->rs_start);
}
case ZFS_RANGE_SEG64: {
const range_seg64_t *r64 = (const range_seg64_t *)rs;
const zfs_range_seg64_t *r64 = (const zfs_range_seg64_t *)rs;
return (r64->rs_end - r64->rs_start);
}
case ZFS_RANGE_SEG_GAP:
return (((const range_seg_gap_t *)rs)->rs_fill);
return (((const zfs_range_seg_gap_t *)rs)->rs_fill);
default:
VERIFY(0);
return (0);
@ -197,13 +197,13 @@ zfs_rs_set_start_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t start)
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
ASSERT3U(start, <=, UINT32_MAX);
((range_seg32_t *)rs)->rs_start = (uint32_t)start;
((zfs_range_seg32_t *)rs)->rs_start = (uint32_t)start;
break;
case ZFS_RANGE_SEG64:
((range_seg64_t *)rs)->rs_start = start;
((zfs_range_seg64_t *)rs)->rs_start = start;
break;
case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_start = start;
((zfs_range_seg_gap_t *)rs)->rs_start = start;
break;
default:
VERIFY(0);
@ -217,13 +217,13 @@ zfs_rs_set_end_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
ASSERT3U(end, <=, UINT32_MAX);
((range_seg32_t *)rs)->rs_end = (uint32_t)end;
((zfs_range_seg32_t *)rs)->rs_end = (uint32_t)end;
break;
case ZFS_RANGE_SEG64:
((range_seg64_t *)rs)->rs_end = end;
((zfs_range_seg64_t *)rs)->rs_end = end;
break;
case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_end = end;
((zfs_range_seg_gap_t *)rs)->rs_end = end;
break;
default:
VERIFY(0);
@ -243,7 +243,7 @@ zfs_zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt,
zfs_rs_get_start_raw(rs, rt));
break;
case ZFS_RANGE_SEG_GAP:
((range_seg_gap_t *)rs)->rs_fill = fill;
((zfs_range_seg_gap_t *)rs)->rs_fill = fill;
break;
default:
VERIFY(0);

View File

@ -106,12 +106,12 @@ extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, const char *tag);
typedef void vdev_xlate_func_t(void *arg, range_seg64_t *physical_rs);
typedef void vdev_xlate_func_t(void *arg, zfs_range_seg64_t *physical_rs);
extern boolean_t vdev_xlate_is_empty(range_seg64_t *rs);
extern void vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern void vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
extern boolean_t vdev_xlate_is_empty(zfs_range_seg64_t *rs);
extern void vdev_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs);
extern void vdev_xlate_walk(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);

View File

@ -91,8 +91,8 @@ typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
range_seg64_t *physical, range_seg64_t *remain);
typedef void vdev_xlation_func_t(vdev_t *cvd, const zfs_range_seg64_t *logical,
zfs_range_seg64_t *physical, zfs_range_seg64_t *remain);
typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
uint64_t size, uint64_t max_segment);
typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
@ -616,8 +616,8 @@ extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern void vdev_default_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg);
extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);

View File

@ -1602,8 +1602,8 @@ dsl_scan_should_clear(dsl_scan_t *scn)
* # of extents in exts_by_addr = # in exts_by_size.
* B-tree efficiency is ~75%, but can be as low as 50%.
*/
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
mused += zfs_btree_numnodes(&queue->q_exts_by_size) * ((
sizeof (zfs_range_seg_gap_t) + sizeof (uint64_t)) *
3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
@ -5006,7 +5006,7 @@ ext_size_destroy(zfs_range_tree_t *rt, void *arg)
}
static uint64_t
ext_size_value(zfs_range_tree_t *rt, range_seg_gap_t *rsg)
ext_size_value(zfs_range_tree_t *rt, zfs_range_seg_gap_t *rsg)
{
(void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start;
@ -5021,7 +5021,7 @@ ext_size_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
uint64_t v = ext_size_value(rt, (zfs_range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v);
}
@ -5030,7 +5030,7 @@ ext_size_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, ZFS_RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
uint64_t v = ext_size_value(rt, (zfs_range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v);
}

View File

@ -518,7 +518,7 @@ metaslab_class_histogram_verify(metaslab_class_t *mc)
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
mc_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
@ -538,16 +538,16 @@ metaslab_class_histogram_verify(metaslab_class_t *mc)
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
kmem_free(mc_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
}
/*
@ -1029,10 +1029,10 @@ metaslab_group_histogram_verify(metaslab_group_t *mg)
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
mg_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
ASSERT3U(ZFS_RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
@ -1049,12 +1049,12 @@ metaslab_group_histogram_verify(metaslab_group_t *mg)
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
kmem_free(mg_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
}
static void
@ -1344,8 +1344,8 @@ __attribute__((always_inline)) inline
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
const zfs_range_seg32_t *r1 = x1;
const zfs_range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
@ -1363,8 +1363,8 @@ __attribute__((always_inline)) inline
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
const zfs_range_seg64_t *r1 = x1;
const zfs_range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
@ -1390,7 +1390,7 @@ metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
struct mssa_arg *mssap = arg;
zfs_range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
zfs_range_seg_max_t seg = {0};
zfs_rs_set_start(&seg, rt, start);
zfs_rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
@ -1411,10 +1411,10 @@ metaslab_size_tree_full_load(zfs_range_tree_t *rt)
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
range_seg32_t, metaslab_rangesize32_compare)
zfs_range_seg32_t, metaslab_rangesize32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
range_seg64_t, metaslab_rangesize64_compare)
zfs_range_seg64_t, metaslab_rangesize64_compare)
/*
* Create any block allocator specific components. The current allocators
@ -1432,12 +1432,12 @@ metaslab_rt_create(zfs_range_tree_t *rt, void *arg)
bt_find_in_buf_f bt_find;
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
size = sizeof (range_seg32_t);
size = sizeof (zfs_range_seg32_t);
compare = metaslab_rangesize32_compare;
bt_find = metaslab_rt_find_rangesize32_in_buf;
break;
case ZFS_RANGE_SEG64:
size = sizeof (range_seg64_t);
size = sizeof (zfs_range_seg64_t);
compare = metaslab_rangesize64_compare;
bt_find = metaslab_rt_find_rangesize64_in_buf;
break;
@ -1603,7 +1603,7 @@ metaslab_block_find(zfs_btree_t *t, zfs_range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
zfs_range_seg_t *rs;
range_seg_max_t rsearch;
zfs_range_seg_max_t rsearch;
zfs_rs_set_start(&rsearch, rt, start);
zfs_rs_set_end(&rsearch, rt, start + size);
@ -1857,7 +1857,7 @@ metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
zfs_range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
zfs_range_seg_t *rs;
range_seg_max_t rsearch;
zfs_range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
@ -2035,7 +2035,7 @@ metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (int i = shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
@ -3110,7 +3110,7 @@ metaslab_weight_from_range_tree(metaslab_t *msp)
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
for (int i = ZFS_RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;

View File

@ -82,13 +82,13 @@ zfs_rs_copy(zfs_range_seg_t *src, zfs_range_seg_t *dest, zfs_range_tree_t *rt)
size_t size = 0;
switch (rt->rt_type) {
case ZFS_RANGE_SEG32:
size = sizeof (range_seg32_t);
size = sizeof (zfs_range_seg32_t);
break;
case ZFS_RANGE_SEG64:
size = sizeof (range_seg64_t);
size = sizeof (zfs_range_seg64_t);
break;
case ZFS_RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
size = sizeof (zfs_range_seg_gap_t);
break;
default:
__builtin_unreachable();
@ -101,7 +101,7 @@ zfs_range_tree_stat_verify(zfs_range_tree_t *rt)
{
zfs_range_seg_t *rs;
zfs_btree_index_t where;
uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
uint64_t hist[ZFS_RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i;
for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
@ -114,7 +114,7 @@ zfs_range_tree_stat_verify(zfs_range_tree_t *rt)
ASSERT3U(hist[idx], !=, 0);
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
i, hist, (u_longlong_t)hist[i],
@ -156,8 +156,8 @@ __attribute__((always_inline)) inline
static int
zfs_range_tree_seg32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
const zfs_range_seg32_t *r1 = x1;
const zfs_range_seg32_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
@ -169,8 +169,8 @@ __attribute__((always_inline)) inline
static int
zfs_range_tree_seg64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
const zfs_range_seg64_t *r1 = x1;
const zfs_range_seg64_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
@ -182,8 +182,8 @@ __attribute__((always_inline)) inline
static int
zfs_range_tree_seg_gap_compare(const void *x1, const void *x2)
{
const range_seg_gap_t *r1 = x1;
const range_seg_gap_t *r2 = x2;
const zfs_range_seg_gap_t *r1 = x1;
const zfs_range_seg_gap_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
@ -191,14 +191,14 @@ zfs_range_tree_seg_gap_compare(const void *x1, const void *x2)
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg32_find_in_buf, range_seg32_t,
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg32_find_in_buf, zfs_range_seg32_t,
zfs_range_tree_seg32_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, range_seg64_t,
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg64_find_in_buf, zfs_range_seg64_t,
zfs_range_tree_seg64_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf, range_seg_gap_t,
zfs_range_tree_seg_gap_compare)
ZFS_BTREE_FIND_IN_BUF_FUNC(zfs_range_tree_seg_gap_find_in_buf,
zfs_range_seg_gap_t, zfs_range_tree_seg_gap_compare)
zfs_range_tree_t *
zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
@ -214,17 +214,17 @@ zfs_range_tree_create_gap(const zfs_range_tree_ops_t *ops,
bt_find_in_buf_f bt_find;
switch (type) {
case ZFS_RANGE_SEG32:
size = sizeof (range_seg32_t);
size = sizeof (zfs_range_seg32_t);
compare = zfs_range_tree_seg32_compare;
bt_find = zfs_range_tree_seg32_find_in_buf;
break;
case ZFS_RANGE_SEG64:
size = sizeof (range_seg64_t);
size = sizeof (zfs_range_seg64_t);
compare = zfs_range_tree_seg64_compare;
bt_find = zfs_range_tree_seg64_find_in_buf;
break;
case ZFS_RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
size = sizeof (zfs_range_seg_gap_t);
compare = zfs_range_tree_seg_gap_compare;
bt_find = zfs_range_tree_seg_gap_find_in_buf;
break;
@ -296,7 +296,7 @@ zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
zfs_range_tree_t *rt = arg;
zfs_btree_index_t where;
zfs_range_seg_t *rs_before, *rs_after, *rs;
range_seg_max_t tmp, rsearch;
zfs_range_seg_max_t tmp, rsearch;
uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0;
boolean_t merge_before, merge_after;
@ -448,7 +448,7 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
{
zfs_btree_index_t where;
zfs_range_seg_t *rs;
range_seg_max_t rsearch, rs_tmp;
zfs_range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size;
boolean_t left_over, right_over;
@ -510,7 +510,7 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) {
range_seg_max_t newseg;
zfs_range_seg_max_t newseg;
zfs_rs_set_start(&newseg, rt, end);
zfs_rs_set_end_raw(&newseg, rt, zfs_rs_get_end_raw(rs, rt));
zfs_rs_set_fill(&newseg, rt, zfs_rs_get_end(rs, rt) - end);
@ -593,7 +593,7 @@ zfs_range_tree_resize_segment(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
static zfs_range_seg_t *
zfs_range_tree_find_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_max_t rsearch;
zfs_range_seg_max_t rsearch;
uint64_t end = start + size;
VERIFY(size != 0);
@ -644,7 +644,7 @@ zfs_range_tree_find_in(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
if (rt->rt_type == ZFS_RANGE_SEG64)
ASSERT3U(start + size, >, start);
range_seg_max_t rsearch;
zfs_range_seg_max_t rsearch;
zfs_rs_set_start(&rsearch, rt, start);
zfs_rs_set_end_raw(&rsearch, rt, zfs_rs_get_start_raw(&rsearch, rt) +
1);
@ -772,7 +772,7 @@ zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
zfs_range_tree_t *removefrom, zfs_range_tree_t *addto)
{
zfs_btree_index_t where;
range_seg_max_t starting_rs;
zfs_range_seg_max_t starting_rs;
zfs_rs_set_start(&starting_rs, removefrom, start);
zfs_rs_set_end_raw(&starting_rs, removefrom,
zfs_rs_get_start_raw(&starting_rs, removefrom) + 1);
@ -801,7 +801,7 @@ zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
end);
uint64_t overlap_size = overlap_end - overlap_start;
ASSERT3S(overlap_size, >, 0);
range_seg_max_t rs;
zfs_range_seg_max_t rs;
zfs_rs_copy(curr, &rs, removefrom);
zfs_range_tree_remove(removefrom, overlap_start, overlap_size);

View File

@ -497,7 +497,7 @@ space_map_histogram_add(space_map_t *sm, zfs_range_tree_t *rt, dmu_tx_t *tx)
* map only cares about allocatable blocks (minimum of sm_shift) we
* can safely ignore all ranges in the range tree smaller than sm_shift.
*/
for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (int i = sm->sm_shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
/*
* Since the largest histogram bucket in the space map is
@ -1050,7 +1050,7 @@ space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
size += histogram[idx] * entry_size;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) {
ASSERT3U(idx, >=, single_entry_max_bucket);
entries_for_seg =
1ULL << (idx - single_entry_max_bucket);
@ -1067,7 +1067,7 @@ space_map_estimate_optimal_size(space_map_t *sm, zfs_range_tree_t *rt,
for (; idx <= double_entry_max_bucket; idx++)
size += histogram[idx] * 2 * sizeof (uint64_t);
for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
for (; idx < ZFS_RANGE_TREE_HISTOGRAM_SIZE; idx++) {
ASSERT3U(idx, >=, double_entry_max_bucket);
entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
size += histogram[idx] *

View File

@ -294,8 +294,8 @@ vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
}
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
vdev_default_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
@ -1677,7 +1677,7 @@ vdev_metaslab_fini(vdev_t *vd)
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
@ -5689,7 +5689,7 @@ vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
vdev_xlate_is_empty(zfs_range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
@ -5703,8 +5703,8 @@ vdev_xlate_is_empty(range_seg64_t *rs)
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
vdev_xlate(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
@ -5736,7 +5736,7 @@ vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
zfs_range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
@ -5744,12 +5744,12 @@ vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_walk(vdev_t *vd, const zfs_range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
zfs_range_seg64_t iter_rs = *logical_rs;
zfs_range_seg64_t physical_rs;
zfs_range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {

View File

@ -1823,7 +1823,7 @@ static void
vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
range_seg64_t logical_rs, physical_rs, remain_rs;
zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_draid_asize(vd, rr->rr_size, 0);
@ -2080,8 +2080,8 @@ vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
}
static void
vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
vdev_draid_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_draid_ops);

View File

@ -359,7 +359,7 @@ vdev_initialize_ranges(vdev_t *vd, abd_t *data)
}
static void
vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
vdev_initialize_xlate_last_rs_end(void *arg, zfs_range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
@ -368,7 +368,7 @@ vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
}
static void
vdev_initialize_xlate_progress(void *arg, range_seg64_t *physical_rs)
vdev_initialize_xlate_progress(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
@ -407,7 +407,7 @@ vdev_initialize_calculate_progress(vdev_t *vd)
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
range_seg64_t logical_rs, physical_rs, remain_rs;
zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
@ -481,7 +481,7 @@ vdev_initialize_load(vdev_t *vd)
}
static void
vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs)
vdev_initialize_xlate_range_add(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = arg;
@ -516,7 +516,7 @@ static void
vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
range_seg64_t logical_rs;
zfs_range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;

View File

@ -643,7 +643,8 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
* will be combined with adjacent allocated segments
* as a single mapping.
*/
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE;
i++) {
if (i + 1 < highbit64(vdev_removal_max_span)
- 1) {
to_alloc +=

View File

@ -2305,7 +2305,7 @@ vdev_raidz_io_verify(zio_t *zio, raidz_map_t *rm, raidz_row_t *rr, int col)
{
(void) rm;
#ifdef ZFS_DEBUG
range_seg64_t logical_rs, physical_rs, remain_rs;
zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_raidz_asize(zio->io_vd, rr->rr_size,
@ -3650,8 +3650,8 @@ vdev_raidz_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
}
static void
vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
vdev_raidz_xlate(vdev_t *cvd, const zfs_range_seg64_t *logical_rs,
zfs_range_seg64_t *physical_rs, zfs_range_seg64_t *remain_rs)
{
(void) remain_rs;

View File

@ -1143,7 +1143,7 @@ spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs,
* the allocation at the end of a segment, thus avoiding
* additional split blocks.
*/
range_seg_max_t search;
zfs_range_seg_max_t search;
zfs_btree_index_t where;
zfs_rs_set_start(&search, segs, start + maxalloc);
zfs_rs_set_end(&search, segs, start + maxalloc);

View File

@ -645,7 +645,7 @@ done:
}
static void
vdev_trim_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
vdev_trim_xlate_last_rs_end(void *arg, zfs_range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
@ -654,7 +654,7 @@ vdev_trim_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
}
static void
vdev_trim_xlate_progress(void *arg, range_seg64_t *physical_rs)
vdev_trim_xlate_progress(void *arg, zfs_range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
@ -696,7 +696,7 @@ vdev_trim_calculate_progress(vdev_t *vd)
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
range_seg64_t logical_rs, physical_rs, remain_rs;
zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
@ -807,7 +807,7 @@ vdev_trim_load(vdev_t *vd)
}
static void
vdev_trim_xlate_range_add(void *arg, range_seg64_t *physical_rs)
vdev_trim_xlate_range_add(void *arg, zfs_range_seg64_t *physical_rs)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
@ -845,7 +845,7 @@ vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
range_seg64_t logical_rs;
zfs_range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
@ -1588,7 +1588,7 @@ vdev_trim_l2arc_thread(void *arg)
spa_t *spa = vd->vdev_spa;
l2arc_dev_t *dev = l2arc_vdev_get(vd);
trim_args_t ta = {0};
range_seg64_t physical_rs;
zfs_range_seg64_t physical_rs;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
@ -1722,7 +1722,7 @@ int
vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
{
trim_args_t ta = {0};
range_seg64_t physical_rs;
zfs_range_seg64_t physical_rs;
int error;
physical_rs.rs_start = start;
physical_rs.rs_end = start + size;