Undo c89 workarounds to match with upstream

With PR 5756 the zfs module now supports c99 and the
remaining past c89 workarounds can be undone.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Signed-off-by: Don Brady <don.brady@delphix.com>
Closes #6816
This commit is contained in:
Don Brady 2017-11-04 14:25:13 -06:00 committed by Brian Behlendorf
parent df1f129bc4
commit 1c27024e22
48 changed files with 424 additions and 764 deletions

View File

@ -638,7 +638,6 @@ _NOTE(CONSTCOND) } while (0)
{ "zero", "single", "double", "triple" }; \ { "zero", "single", "double", "triple" }; \
int len = 0; \ int len = 0; \
int copies = 0; \ int copies = 0; \
int d; \
\ \
if (bp == NULL) { \ if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \ len += func(buf + len, size - len, "<NULL>"); \
@ -662,7 +661,7 @@ _NOTE(CONSTCOND) } while (0)
(u_longlong_t)BPE_GET_PSIZE(bp), \ (u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \ (u_longlong_t)bp->blk_birth); \
} else { \ } else { \
for (d = 0; d < BP_GET_NDVAS(bp); d++) { \ for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \ const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \ if (DVA_IS_VALID(dva)) \
copies++; \ copies++; \

View File

@ -91,26 +91,21 @@ zfeature_is_valid_guid(const char *name)
boolean_t boolean_t
zfeature_is_supported(const char *guid) zfeature_is_supported(const char *guid)
{ {
spa_feature_t i;
if (zfeature_checks_disable) if (zfeature_checks_disable)
return (B_TRUE); return (B_TRUE);
for (i = 0; i < SPA_FEATURES; i++) { for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *feature = &spa_feature_table[i]; zfeature_info_t *feature = &spa_feature_table[i];
if (strcmp(guid, feature->fi_guid) == 0) if (strcmp(guid, feature->fi_guid) == 0)
return (B_TRUE); return (B_TRUE);
} }
return (B_FALSE); return (B_FALSE);
} }
int int
zfeature_lookup_name(const char *name, spa_feature_t *res) zfeature_lookup_name(const char *name, spa_feature_t *res)
{ {
spa_feature_t i; for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *feature = &spa_feature_table[i]; zfeature_info_t *feature = &spa_feature_table[i];
if (strcmp(name, feature->fi_uname) == 0) { if (strcmp(name, feature->fi_uname) == 0) {
if (res != NULL) if (res != NULL)
@ -126,9 +121,8 @@ boolean_t
zfeature_depends_on(spa_feature_t fid, spa_feature_t check) zfeature_depends_on(spa_feature_t fid, spa_feature_t check)
{ {
zfeature_info_t *feature = &spa_feature_table[fid]; zfeature_info_t *feature = &spa_feature_table[fid];
int i;
for (i = 0; feature->fi_depends[i] != SPA_FEATURE_NONE; i++) { for (int i = 0; feature->fi_depends[i] != SPA_FEATURE_NONE; i++) {
if (feature->fi_depends[i] == check) if (feature->fi_depends[i] == check)
return (B_TRUE); return (B_TRUE);
} }
@ -138,9 +132,7 @@ zfeature_depends_on(spa_feature_t fid, spa_feature_t check)
static boolean_t static boolean_t
deps_contains_feature(const spa_feature_t *deps, const spa_feature_t feature) deps_contains_feature(const spa_feature_t *deps, const spa_feature_t feature)
{ {
int i; for (int i = 0; deps[i] != SPA_FEATURE_NONE; i++)
for (i = 0; deps[i] != SPA_FEATURE_NONE; i++)
if (deps[i] == feature) if (deps[i] == feature)
return (B_TRUE); return (B_TRUE);

View File

@ -136,12 +136,13 @@ permset_namecheck(const char *path, namecheck_err_t *why, char *what)
int int
entity_namecheck(const char *path, namecheck_err_t *why, char *what) entity_namecheck(const char *path, namecheck_err_t *why, char *what)
{ {
const char *start, *end, *loc; const char *start, *end;
int found_delim; int found_delim;
/* /*
* Make sure the name is not too long. * Make sure the name is not too long.
*/ */
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN) { if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN) {
if (why) if (why)
*why = NAME_ERR_TOOLONG; *why = NAME_ERR_TOOLONG;
@ -178,7 +179,7 @@ entity_namecheck(const char *path, namecheck_err_t *why, char *what)
} }
/* Validate the contents of this component */ /* Validate the contents of this component */
for (loc = start; loc != end; loc++) { for (const char *loc = start; loc != end; loc++) {
if (!valid_char(*loc) && *loc != '%') { if (!valid_char(*loc) && *loc != '%') {
if (why) { if (why) {
*why = NAME_ERR_INVALCHAR; *why = NAME_ERR_INVALCHAR;

View File

@ -166,7 +166,7 @@ int
zprop_iter_common(zprop_func func, void *cb, boolean_t show_all, zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type) boolean_t ordered, zfs_type_t type)
{ {
int i, j, num_props, size, prop; int i, num_props, size, prop;
zprop_desc_t *prop_tbl; zprop_desc_t *prop_tbl;
zprop_desc_t **order; zprop_desc_t **order;
@ -181,7 +181,7 @@ zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
return (ZPROP_CONT); return (ZPROP_CONT);
#endif #endif
for (j = 0; j < num_props; j++) for (int j = 0; j < num_props; j++)
order[j] = &prop_tbl[j]; order[j] = &prop_tbl[j];
if (ordered) { if (ordered) {

View File

@ -581,14 +581,12 @@ abd_free_struct(abd_t *abd)
abd_t * abd_t *
abd_alloc(size_t size, boolean_t is_metadata) abd_alloc(size_t size, boolean_t is_metadata)
{ {
abd_t *abd;
if (!zfs_abd_scatter_enabled || size <= PAGESIZE) if (!zfs_abd_scatter_enabled || size <= PAGESIZE)
return (abd_alloc_linear(size, is_metadata)); return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE); VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd = abd_alloc_struct(); abd_t *abd = abd_alloc_struct();
abd->abd_flags = ABD_FLAG_OWNER; abd->abd_flags = ABD_FLAG_OWNER;
abd_alloc_pages(abd, size); abd_alloc_pages(abd, size);
@ -1108,10 +1106,9 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_advance(&aiter, off); abd_iter_advance(&aiter, off);
while (size > 0) { while (size > 0) {
size_t len;
abd_iter_map(&aiter); abd_iter_map(&aiter);
len = MIN(aiter.iter_mapsize, size); size_t len = MIN(aiter.iter_mapsize, size);
ASSERT3U(len, >, 0); ASSERT3U(len, >, 0);
ret = func(aiter.iter_mapaddr, len, private); ret = func(aiter.iter_mapaddr, len, private);
@ -1242,13 +1239,12 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
abd_iter_advance(&saiter, soff); abd_iter_advance(&saiter, soff);
while (size > 0) { while (size > 0) {
size_t dlen, slen, len;
abd_iter_map(&daiter); abd_iter_map(&daiter);
abd_iter_map(&saiter); abd_iter_map(&saiter);
dlen = MIN(daiter.iter_mapsize, size); size_t dlen = MIN(daiter.iter_mapsize, size);
slen = MIN(saiter.iter_mapsize, size); size_t slen = MIN(saiter.iter_mapsize, size);
len = MIN(dlen, slen); size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0); ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len, ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,

View File

@ -2266,7 +2266,6 @@ static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{ {
arc_buf_contents_t type = arc_buf_type(hdr); arc_buf_contents_t type = arc_buf_type(hdr);
arc_buf_t *buf;
ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(HDR_HAS_L1HDR(hdr));
@ -2290,7 +2289,8 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
HDR_GET_PSIZE(hdr), hdr); HDR_GET_PSIZE(hdr), hdr);
} }
for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf)) if (arc_buf_is_shared(buf))
continue; continue;
(void) refcount_add_many(&state->arcs_esize[type], (void) refcount_add_many(&state->arcs_esize[type],
@ -2307,7 +2307,6 @@ static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{ {
arc_buf_contents_t type = arc_buf_type(hdr); arc_buf_contents_t type = arc_buf_type(hdr);
arc_buf_t *buf;
ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(HDR_HAS_L1HDR(hdr));
@ -2331,7 +2330,8 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
HDR_GET_PSIZE(hdr), hdr); HDR_GET_PSIZE(hdr), hdr);
} }
for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf)) if (arc_buf_is_shared(buf))
continue; continue;
(void) refcount_remove_many(&state->arcs_esize[type], (void) refcount_remove_many(&state->arcs_esize[type],
@ -2547,7 +2547,6 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr)); ASSERT(!HDR_HAS_RABD(hdr));
} else { } else {
arc_buf_t *buf;
uint32_t buffers = 0; uint32_t buffers = 0;
/* /*
@ -2555,7 +2554,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
* thus we must remove each of these references one * thus we must remove each of these references one
* at a time. * at a time.
*/ */
for (buf = hdr->b_l1hdr.b_buf; buf != NULL; for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) { buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0); ASSERT3U(bufcnt, !=, 0);
buffers++; buffers++;
@ -2605,7 +2604,6 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
(void) refcount_remove_many(&old_state->arcs_size, (void) refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr); HDR_GET_LSIZE(hdr), hdr);
} else { } else {
arc_buf_t *buf;
uint32_t buffers = 0; uint32_t buffers = 0;
/* /*
@ -2613,7 +2611,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
* thus we must remove each of these references one * thus we must remove each of these references one
* at a time. * at a time.
*/ */
for (buf = hdr->b_l1hdr.b_buf; buf != NULL; for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) { buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0); ASSERT3U(bufcnt, !=, 0);
buffers++; buffers++;
@ -3344,10 +3342,11 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
static arc_buf_hdr_t * static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{ {
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr; arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT(HDR_HAS_L2HDR(hdr));
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache)); (old == hdr_l2only_cache && new == hdr_full_cache));
@ -4021,7 +4020,6 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
multilist_t *ml = state->arcs_list[type]; multilist_t *ml = state->arcs_list[type];
int num_sublists; int num_sublists;
arc_buf_hdr_t **markers; arc_buf_hdr_t **markers;
int i;
IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
@ -4035,7 +4033,7 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
* than starting from the tail each time. * than starting from the tail each time.
*/ */
markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
for (i = 0; i < num_sublists; i++) { for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls; multilist_sublist_t *mls;
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
@ -4076,7 +4074,7 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
* (e.g. index 0) would cause evictions to favor certain * (e.g. index 0) would cause evictions to favor certain
* sublists over others. * sublists over others.
*/ */
for (i = 0; i < num_sublists; i++) { for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining; uint64_t bytes_remaining;
uint64_t bytes_evicted; uint64_t bytes_evicted;
@ -4122,7 +4120,7 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
} }
} }
for (i = 0; i < num_sublists; i++) { for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i); multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]); multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls); multilist_sublist_unlock(mls);
@ -4947,7 +4945,6 @@ arc_reclaim_thread(void *unused)
mutex_enter(&arc_reclaim_lock); mutex_enter(&arc_reclaim_lock);
while (!arc_reclaim_thread_exit) { while (!arc_reclaim_thread_exit) {
int64_t to_free;
uint64_t evicted = 0; uint64_t evicted = 0;
uint64_t need_free = arc_need_free; uint64_t need_free = arc_need_free;
arc_tuning_update(); arc_tuning_update();
@ -4996,7 +4993,8 @@ arc_reclaim_thread(void *unused)
*/ */
free_memory = arc_available_memory(); free_memory = arc_available_memory();
to_free = (arc_c >> arc_shrink_shift) - free_memory; int64_t to_free =
(arc_c >> arc_shrink_shift) - free_memory;
if (to_free > 0) { if (to_free > 0) {
#ifdef _KERNEL #ifdef _KERNEL
to_free = MAX(to_free, need_free); to_free = MAX(to_free, need_free);
@ -5665,15 +5663,14 @@ arc_read_done(zio_t *zio)
* read. * read.
*/ */
if (HDR_IN_HASH_TABLE(hdr)) { if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==, ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]); BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==, ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]); BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock); arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
&hash_lock);
ASSERT((found == hdr && ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
@ -6372,8 +6369,6 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
void void
arc_release(arc_buf_t *buf, void *tag) arc_release(arc_buf_t *buf, void *tag)
{ {
kmutex_t *hash_lock;
arc_state_t *state;
arc_buf_hdr_t *hdr = buf->b_hdr; arc_buf_hdr_t *hdr = buf->b_hdr;
/* /*
@ -6414,7 +6409,7 @@ arc_release(arc_buf_t *buf, void *tag)
return; return;
} }
hash_lock = HDR_LOCK(hdr); kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock); mutex_enter(hash_lock);
/* /*
@ -6422,7 +6417,7 @@ arc_release(arc_buf_t *buf, void *tag)
* held, we must be careful not to reference state or the * held, we must be careful not to reference state or the
* b_state field after dropping the lock. * b_state field after dropping the lock.
*/ */
state = hdr->b_l1hdr.b_state; arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon); ASSERT3P(state, !=, arc_anon);
@ -6622,7 +6617,6 @@ arc_write_ready(zio_t *zio)
arc_buf_hdr_t *hdr = buf->b_hdr; arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp; blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp); uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
enum zio_compress compress;
fstrans_cookie_t cookie = spl_fstrans_mark(); fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr)); ASSERT(HDR_HAS_L1HDR(hdr));
@ -6689,6 +6683,7 @@ arc_write_ready(zio_t *zio)
/* this must be done after the buffer flags are adjusted */ /* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf); arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF; compress = ZIO_COMPRESS_OFF;
} else { } else {
@ -8459,7 +8454,6 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
l2arc_write_callback_t *cb; l2arc_write_callback_t *cb;
zio_t *pio, *wzio; zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa); uint64_t guid = spa_load_guid(spa);
int try;
ASSERT3P(dev->l2ad_vdev, !=, NULL); ASSERT3P(dev->l2ad_vdev, !=, NULL);
@ -8472,7 +8466,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
/* /*
* Copy buffers for L2ARC writing. * Copy buffers for L2ARC writing.
*/ */
for (try = 0; try < L2ARC_FEED_TYPES; try++) { for (int try = 0; try < L2ARC_FEED_TYPES; try++) {
multilist_sublist_t *mls = l2arc_sublist_lock(try); multilist_sublist_t *mls = l2arc_sublist_lock(try);
uint64_t passed_sz = 0; uint64_t passed_sz = 0;

View File

@ -50,7 +50,6 @@ encode_embedded_bp_compressed(blkptr_t *bp, void *data,
uint64_t *bp64 = (uint64_t *)bp; uint64_t *bp64 = (uint64_t *)bp;
uint64_t w = 0; uint64_t w = 0;
uint8_t *data8 = data; uint8_t *data8 = data;
int i;
ASSERT3U(compressed_size, <=, BPE_PAYLOAD_SIZE); ASSERT3U(compressed_size, <=, BPE_PAYLOAD_SIZE);
ASSERT(uncompressed_size == compressed_size || ASSERT(uncompressed_size == compressed_size ||
@ -69,7 +68,7 @@ encode_embedded_bp_compressed(blkptr_t *bp, void *data,
* Encode the byte array into the words of the block pointer. * Encode the byte array into the words of the block pointer.
* First byte goes into low bits of first word (little endian). * First byte goes into low bits of first word (little endian).
*/ */
for (i = 0; i < compressed_size; i++) { for (int i = 0; i < compressed_size; i++) {
BF64_SET(w, (i % sizeof (w)) * NBBY, NBBY, data8[i]); BF64_SET(w, (i % sizeof (w)) * NBBY, NBBY, data8[i]);
if (i % sizeof (w) == sizeof (w) - 1) { if (i % sizeof (w) == sizeof (w) - 1) {
/* we've reached the end of a word */ /* we've reached the end of a word */
@ -97,7 +96,6 @@ decode_embedded_bp_compressed(const blkptr_t *bp, void *buf)
uint8_t *buf8 = buf; uint8_t *buf8 = buf;
uint64_t w = 0; uint64_t w = 0;
const uint64_t *bp64 = (const uint64_t *)bp; const uint64_t *bp64 = (const uint64_t *)bp;
int i;
ASSERT(BP_IS_EMBEDDED(bp)); ASSERT(BP_IS_EMBEDDED(bp));
@ -107,7 +105,7 @@ decode_embedded_bp_compressed(const blkptr_t *bp, void *buf)
* Decode the words of the block pointer into the byte array. * Decode the words of the block pointer into the byte array.
* Low bits of first word are the first byte (little endian). * Low bits of first word are the first byte (little endian).
*/ */
for (i = 0; i < psize; i++) { for (int i = 0; i < psize; i++) {
if (i % sizeof (w) == 0) { if (i % sizeof (w) == 0) {
/* beginning of a word */ /* beginning of a word */
ASSERT3P(bp64, <, bp + 1); ASSERT3P(bp64, <, bp + 1);

View File

@ -378,7 +378,6 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
{ {
bpobj_t subbpo; bpobj_t subbpo;
uint64_t used, comp, uncomp, subsubobjs; uint64_t used, comp, uncomp, subsubobjs;
ASSERTV(dmu_object_info_t doi);
ASSERT(bpo->bpo_havesubobj); ASSERT(bpo->bpo_havesubobj);
ASSERT(bpo->bpo_havecomp); ASSERT(bpo->bpo_havecomp);
@ -407,6 +406,7 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
DMU_OT_NONE, 0, tx); DMU_OT_NONE, 0, tx);
} }
ASSERTV(dmu_object_info_t doi);
ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi)); ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ); ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);

View File

@ -502,7 +502,7 @@ dbuf_evict_one(void)
{ {
int idx = multilist_get_random_index(dbuf_cache); int idx = multilist_get_random_index(dbuf_cache);
multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx);
dmu_buf_impl_t *db;
ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
/* /*
@ -513,7 +513,7 @@ dbuf_evict_one(void)
ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL);
(void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE);
db = multilist_sublist_tail(mls); dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db); db = multilist_sublist_prev(mls, db);
} }
@ -844,7 +844,6 @@ dbuf_verify(dmu_buf_impl_t *db)
ASSERT(buf[i] == 0); ASSERT(buf[i] == 0);
} }
} else { } else {
int i;
blkptr_t *bps = db->db.db_data; blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size); db->db.db_size);
@ -855,7 +854,7 @@ dbuf_verify(dmu_buf_impl_t *db)
* We iterate through each blkptr and verify * We iterate through each blkptr and verify
* they only have those fields set. * they only have those fields set.
*/ */
for (i = 0; for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t); i < db->db.db_size / sizeof (blkptr_t);
i++) { i++) {
blkptr_t *bp = &bps[i]; blkptr_t *bp = &bps[i];
@ -1080,8 +1079,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
BP_IS_HOLE(db->db_blkptr) && BP_IS_HOLE(db->db_blkptr) &&
db->db_blkptr->blk_birth != 0) { db->db_blkptr->blk_birth != 0) {
blkptr_t *bps = db->db.db_data; blkptr_t *bps = db->db.db_data;
int i; for (int i = 0; i < ((1 <<
for (i = 0; i < ((1 <<
DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
i++) { i++) {
blkptr_t *bp = &bps[i]; blkptr_t *bp = &bps[i];
@ -1974,7 +1972,6 @@ static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{ {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
ASSERT(tx->tx_txg != 0); ASSERT(tx->tx_txg != 0);
ASSERT(!refcount_is_zero(&db->db_holds)); ASSERT(!refcount_is_zero(&db->db_holds));
@ -1987,6 +1984,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
*/ */
mutex_enter(&db->db_mtx); mutex_enter(&db->db_mtx);
dbuf_dirty_record_t *dr;
for (dr = db->db_last_dirty; for (dr = db->db_last_dirty;
dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
/* /*
@ -2307,8 +2305,6 @@ static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh) dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
{ {
int nlevels, epbs;
*parentp = NULL; *parentp = NULL;
*bpp = NULL; *bpp = NULL;
@ -2327,9 +2323,9 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
return (0); return (0);
} }
nlevels = int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64); ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
@ -2497,11 +2493,11 @@ typedef struct dbuf_prefetch_arg {
static void static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{ {
arc_flags_t aflags;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return; return;
aflags = dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
@ -2520,8 +2516,6 @@ static void
dbuf_prefetch_indirect_done(zio_t *zio, int err, arc_buf_t *abuf, void *private) dbuf_prefetch_indirect_done(zio_t *zio, int err, arc_buf_t *abuf, void *private)
{ {
dbuf_prefetch_arg_t *dpa = private; dbuf_prefetch_arg_t *dpa = private;
uint64_t nextblkid;
blkptr_t *bp;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0); ASSERT3S(dpa->dpa_curlevel, >, 0);
@ -2560,9 +2554,9 @@ dbuf_prefetch_indirect_done(zio_t *zio, int err, arc_buf_t *abuf, void *private)
dpa->dpa_curlevel--; dpa->dpa_curlevel--;
nextblkid = dpa->dpa_zb.zb_blkid >> uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
bp = ((blkptr_t *)abuf->b_data) + blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
if (BP_IS_HOLE(bp) || err != 0) { if (BP_IS_HOLE(bp) || err != 0) {
kmem_free(dpa, sizeof (*dpa)); kmem_free(dpa, sizeof (*dpa));
@ -2602,10 +2596,6 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
blkptr_t bp; blkptr_t bp;
int epbs, nlevels, curlevel; int epbs, nlevels, curlevel;
uint64_t curblkid; uint64_t curblkid;
dmu_buf_impl_t *db;
zio_t *pio;
dbuf_prefetch_arg_t *dpa;
dsl_dataset_t *ds;
ASSERT(blkid != DMU_BONUS_BLKID); ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
@ -2628,7 +2618,7 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
return; return;
db = dbuf_find(dn->dn_objset, dn->dn_object, dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid); level, blkid);
if (db != NULL) { if (db != NULL) {
mutex_exit(&db->db_mtx); mutex_exit(&db->db_mtx);
@ -2673,11 +2663,11 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL); ZIO_FLAG_CANFAIL);
dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
ds = dn->dn_objset->os_dsl_dataset; dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid); dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel; dpa->dpa_curlevel = curlevel;
@ -3933,7 +3923,6 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
ZIO_PRIORITY_ASYNC_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else { } else {
arc_write_done_func_t *children_ready_cb = NULL;
ASSERT(arc_released(data)); ASSERT(arc_released(data));
/* /*
@ -3941,6 +3930,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
* ready callback so that we can properly handle an indirect * ready callback so that we can properly handle an indirect
* block that only contains holes. * block that only contains holes.
*/ */
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0) if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready; children_ready_cb = dbuf_write_children_ready;

View File

@ -261,10 +261,9 @@ ddt_object_name(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
void void
ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg) ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg)
{ {
int d;
ASSERT(txg != 0); ASSERT(txg != 0);
for (d = 0; d < SPA_DVAS_PER_BP; d++) for (int d = 0; d < SPA_DVAS_PER_BP; d++)
bp->blk_dva[d] = ddp->ddp_dva[d]; bp->blk_dva[d] = ddp->ddp_dva[d];
BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth); BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth);
} }
@ -313,10 +312,9 @@ ddt_key_fill(ddt_key_t *ddk, const blkptr_t *bp)
void void
ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp) ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp)
{ {
int d;
ASSERT(ddp->ddp_phys_birth == 0); ASSERT(ddp->ddp_phys_birth == 0);
for (d = 0; d < SPA_DVAS_PER_BP; d++) for (int d = 0; d < SPA_DVAS_PER_BP; d++)
ddp->ddp_dva[d] = bp->blk_dva[d]; ddp->ddp_dva[d] = bp->blk_dva[d];
ddp->ddp_phys_birth = BP_PHYSICAL_BIRTH(bp); ddp->ddp_phys_birth = BP_PHYSICAL_BIRTH(bp);
} }
@ -356,9 +354,8 @@ ddt_phys_t *
ddt_phys_select(const ddt_entry_t *dde, const blkptr_t *bp) ddt_phys_select(const ddt_entry_t *dde, const blkptr_t *bp)
{ {
ddt_phys_t *ddp = (ddt_phys_t *)dde->dde_phys; ddt_phys_t *ddp = (ddt_phys_t *)dde->dde_phys;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_dva[0]) && if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_dva[0]) &&
BP_PHYSICAL_BIRTH(bp) == ddp->ddp_phys_birth) BP_PHYSICAL_BIRTH(bp) == ddp->ddp_phys_birth)
return (ddp); return (ddp);
@ -370,9 +367,8 @@ uint64_t
ddt_phys_total_refcnt(const ddt_entry_t *dde) ddt_phys_total_refcnt(const ddt_entry_t *dde)
{ {
uint64_t refcnt = 0; uint64_t refcnt = 0;
int p;
for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++)
refcnt += dde->dde_phys[p].ddp_refcnt; refcnt += dde->dde_phys[p].ddp_refcnt;
return (refcnt); return (refcnt);
@ -386,18 +382,17 @@ ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds)
ddt_key_t *ddk = &dde->dde_key; ddt_key_t *ddk = &dde->dde_key;
uint64_t lsize = DDK_GET_LSIZE(ddk); uint64_t lsize = DDK_GET_LSIZE(ddk);
uint64_t psize = DDK_GET_PSIZE(ddk); uint64_t psize = DDK_GET_PSIZE(ddk);
int p, d;
bzero(dds, sizeof (*dds)); bzero(dds, sizeof (*dds));
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
uint64_t dsize = 0; uint64_t dsize = 0;
uint64_t refcnt = ddp->ddp_refcnt; uint64_t refcnt = ddp->ddp_refcnt;
if (ddp->ddp_phys_birth == 0) if (ddp->ddp_phys_birth == 0)
continue; continue;
for (d = 0; d < DDE_GET_NDVAS(dde); d++) for (int d = 0; d < DDE_GET_NDVAS(dde); d++)
dsize += dva_get_dsize_sync(spa, &ddp->ddp_dva[d]); dsize += dva_get_dsize_sync(spa, &ddp->ddp_dva[d]);
dds->dds_blocks += 1; dds->dds_blocks += 1;
@ -445,20 +440,16 @@ ddt_stat_update(ddt_t *ddt, ddt_entry_t *dde, uint64_t neg)
void void
ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src) ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src)
{ {
int h; for (int h = 0; h < 64; h++)
for (h = 0; h < 64; h++)
ddt_stat_add(&dst->ddh_stat[h], &src->ddh_stat[h], 0); ddt_stat_add(&dst->ddh_stat[h], &src->ddh_stat[h], 0);
} }
void void
ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh) ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh)
{ {
int h;
bzero(dds, sizeof (*dds)); bzero(dds, sizeof (*dds));
for (h = 0; h < 64; h++) for (int h = 0; h < 64; h++)
ddt_stat_add(dds, &ddh->ddh_stat[h], 0); ddt_stat_add(dds, &ddh->ddh_stat[h], 0);
} }
@ -478,15 +469,11 @@ ddt_histogram_empty(const ddt_histogram_t *ddh)
void void
ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total) ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
{ {
enum zio_checksum c;
enum ddt_type type;
enum ddt_class class;
/* Sum the statistics we cached in ddt_object_sync(). */ /* Sum the statistics we cached in ddt_object_sync(). */
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c]; ddt_t *ddt = spa->spa_ddt[c];
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) { class++) {
ddt_object_t *ddo = ddt_object_t *ddo =
&ddt->ddt_object_stats[type][class]; &ddt->ddt_object_stats[type][class];
@ -507,14 +494,10 @@ ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
void void
ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh) ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh)
{ {
enum zio_checksum c; for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
enum ddt_type type;
enum ddt_class class;
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c]; ddt_t *ddt = spa->spa_ddt[c];
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) { class++) {
ddt_histogram_add(ddh, ddt_histogram_add(ddh,
&ddt->ddt_histogram_cache[type][class]); &ddt->ddt_histogram_cache[type][class]);
@ -571,9 +554,8 @@ ddt_ditto_copies_needed(ddt_t *ddt, ddt_entry_t *dde, ddt_phys_t *ddp_willref)
int total_copies = 0; int total_copies = 0;
int desired_copies = 0; int desired_copies = 0;
int copies_needed = 0; int copies_needed = 0;
int p;
for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p]; ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *zio = dde->dde_lead_zio[p]; zio_t *zio = dde->dde_lead_zio[p];
uint64_t refcnt = ddp->ddp_refcnt; /* committed refs */ uint64_t refcnt = ddp->ddp_refcnt; /* committed refs */
@ -612,9 +594,8 @@ ddt_ditto_copies_present(ddt_entry_t *dde)
ddt_phys_t *ddp = &dde->dde_phys[DDT_PHYS_DITTO]; ddt_phys_t *ddp = &dde->dde_phys[DDT_PHYS_DITTO];
dva_t *dva = ddp->ddp_dva; dva_t *dva = ddp->ddp_dva;
int copies = 0 - DVA_GET_GANG(dva); int copies = 0 - DVA_GET_GANG(dva);
int d;
for (d = 0; d < DDE_GET_NDVAS(dde); d++, dva++) for (int d = 0; d < DDE_GET_NDVAS(dde); d++, dva++)
if (DVA_IS_VALID(dva)) if (DVA_IS_VALID(dva))
copies++; copies++;
@ -722,11 +703,9 @@ ddt_alloc(const ddt_key_t *ddk)
static void static void
ddt_free(ddt_entry_t *dde) ddt_free(ddt_entry_t *dde)
{ {
int p;
ASSERT(!dde->dde_loading); ASSERT(!dde->dde_loading);
for (p = 0; p < DDT_PHYS_TYPES; p++) for (int p = 0; p < DDT_PHYS_TYPES; p++)
ASSERT(dde->dde_lead_zio[p] == NULL); ASSERT(dde->dde_lead_zio[p] == NULL);
if (dde->dde_repair_abd != NULL) if (dde->dde_repair_abd != NULL)
@ -813,8 +792,6 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp)
{ {
ddt_t *ddt; ddt_t *ddt;
ddt_entry_t dde; ddt_entry_t dde;
enum ddt_type type;
enum ddt_class class;
if (!zfs_dedup_prefetch || bp == NULL || !BP_GET_DEDUP(bp)) if (!zfs_dedup_prefetch || bp == NULL || !BP_GET_DEDUP(bp))
return; return;
@ -827,8 +804,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp)
ddt = ddt_select(spa, bp); ddt = ddt_select(spa, bp);
ddt_key_fill(&dde.dde_key, bp); ddt_key_fill(&dde.dde_key, bp);
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; class++) { for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
ddt_object_prefetch(ddt, type, class, &dde); ddt_object_prefetch(ddt, type, class, &dde);
} }
} }
@ -851,9 +828,8 @@ ddt_entry_compare(const void *x1, const void *x2)
const ddt_key_cmp_t *k1 = (const ddt_key_cmp_t *)&dde1->dde_key; const ddt_key_cmp_t *k1 = (const ddt_key_cmp_t *)&dde1->dde_key;
const ddt_key_cmp_t *k2 = (const ddt_key_cmp_t *)&dde2->dde_key; const ddt_key_cmp_t *k2 = (const ddt_key_cmp_t *)&dde2->dde_key;
int32_t cmp = 0; int32_t cmp = 0;
int i;
for (i = 0; i < DDT_KEY_CMP_LEN; i++) { for (int i = 0; i < DDT_KEY_CMP_LEN; i++) {
cmp = (int32_t)k1->u16[i] - (int32_t)k2->u16[i]; cmp = (int32_t)k1->u16[i] - (int32_t)k2->u16[i];
if (likely(cmp)) if (likely(cmp))
break; break;
@ -896,20 +872,15 @@ ddt_table_free(ddt_t *ddt)
void void
ddt_create(spa_t *spa) ddt_create(spa_t *spa)
{ {
enum zio_checksum c;
spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM; spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM;
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
spa->spa_ddt[c] = ddt_table_alloc(spa, c); spa->spa_ddt[c] = ddt_table_alloc(spa, c);
} }
int int
ddt_load(spa_t *spa) ddt_load(spa_t *spa)
{ {
enum zio_checksum c;
enum ddt_type type;
enum ddt_class class;
int error; int error;
ddt_create(spa); ddt_create(spa);
@ -921,10 +892,10 @@ ddt_load(spa_t *spa)
if (error) if (error)
return (error == ENOENT ? 0 : error); return (error == ENOENT ? 0 : error);
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c]; ddt_t *ddt = spa->spa_ddt[c];
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) { class++) {
error = ddt_object_load(ddt, type, class); error = ddt_object_load(ddt, type, class);
if (error != 0 && error != ENOENT) if (error != 0 && error != ENOENT)
@ -946,9 +917,7 @@ ddt_load(spa_t *spa)
void void
ddt_unload(spa_t *spa) ddt_unload(spa_t *spa)
{ {
enum zio_checksum c; for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
if (spa->spa_ddt[c]) { if (spa->spa_ddt[c]) {
ddt_table_free(spa->spa_ddt[c]); ddt_table_free(spa->spa_ddt[c]);
spa->spa_ddt[c] = NULL; spa->spa_ddt[c] = NULL;
@ -961,8 +930,6 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
{ {
ddt_t *ddt; ddt_t *ddt;
ddt_entry_t *dde; ddt_entry_t *dde;
enum ddt_type type;
enum ddt_class class;
if (!BP_GET_DEDUP(bp)) if (!BP_GET_DEDUP(bp))
return (B_FALSE); return (B_FALSE);
@ -975,8 +942,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
ddt_key_fill(&(dde->dde_key), bp); ddt_key_fill(&(dde->dde_key), bp);
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class <= max_class; class++) { for (enum ddt_class class = 0; class <= max_class; class++) {
if (ddt_object_lookup(ddt, type, class, dde) == 0) { if (ddt_object_lookup(ddt, type, class, dde) == 0) {
kmem_cache_free(ddt_entry_cache, dde); kmem_cache_free(ddt_entry_cache, dde);
return (B_TRUE); return (B_TRUE);
@ -993,15 +960,13 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
{ {
ddt_key_t ddk; ddt_key_t ddk;
ddt_entry_t *dde; ddt_entry_t *dde;
enum ddt_type type;
enum ddt_class class;
ddt_key_fill(&ddk, bp); ddt_key_fill(&ddk, bp);
dde = ddt_alloc(&ddk); dde = ddt_alloc(&ddk);
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; class++) { for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
/* /*
* We can only do repair if there are multiple copies * We can only do repair if there are multiple copies
* of the block. For anything in the UNIQUE class, * of the block. For anything in the UNIQUE class,
@ -1051,12 +1016,11 @@ ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio)
ddt_key_t *rddk = &rdde->dde_key; ddt_key_t *rddk = &rdde->dde_key;
zio_t *zio; zio_t *zio;
blkptr_t blk; blkptr_t blk;
int p;
zio = zio_null(rio, rio->io_spa, NULL, zio = zio_null(rio, rio->io_spa, NULL,
ddt_repair_entry_done, rdde, rio->io_flags); ddt_repair_entry_done, rdde, rio->io_flags);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) {
if (ddp->ddp_phys_birth == 0 || if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth != rddp->ddp_phys_birth || ddp->ddp_phys_birth != rddp->ddp_phys_birth ||
bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva))) bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
@ -1106,12 +1070,11 @@ ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg)
enum ddt_class oclass = dde->dde_class; enum ddt_class oclass = dde->dde_class;
enum ddt_class nclass; enum ddt_class nclass;
uint64_t total_refcnt = 0; uint64_t total_refcnt = 0;
int p;
ASSERT(dde->dde_loaded); ASSERT(dde->dde_loaded);
ASSERT(!dde->dde_loading); ASSERT(!dde->dde_loading);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
ASSERT(dde->dde_lead_zio[p] == NULL); ASSERT(dde->dde_lead_zio[p] == NULL);
if (ddp->ddp_phys_birth == 0) { if (ddp->ddp_phys_birth == 0) {
ASSERT(ddp->ddp_refcnt == 0); ASSERT(ddp->ddp_refcnt == 0);
@ -1168,8 +1131,6 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
spa_t *spa = ddt->ddt_spa; spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde; ddt_entry_t *dde;
void *cookie = NULL; void *cookie = NULL;
enum ddt_type type;
enum ddt_class class;
if (avl_numnodes(&ddt->ddt_tree) == 0) if (avl_numnodes(&ddt->ddt_tree) == 0)
return; return;
@ -1187,9 +1148,9 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
ddt_free(dde); ddt_free(dde);
} }
for (type = 0; type < DDT_TYPES; type++) { for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
uint64_t add, count = 0; uint64_t add, count = 0;
for (class = 0; class < DDT_CLASSES; class++) { for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (ddt_object_exists(ddt, type, class)) { if (ddt_object_exists(ddt, type, class)) {
ddt_object_sync(ddt, type, class, tx); ddt_object_sync(ddt, type, class, tx);
VERIFY(ddt_object_count(ddt, type, class, VERIFY(ddt_object_count(ddt, type, class,
@ -1197,7 +1158,7 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
count += add; count += add;
} }
} }
for (class = 0; class < DDT_CLASSES; class++) { for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (count == 0 && ddt_object_exists(ddt, type, class)) if (count == 0 && ddt_object_exists(ddt, type, class))
ddt_object_destroy(ddt, type, class, tx); ddt_object_destroy(ddt, type, class, tx);
} }
@ -1214,13 +1175,12 @@ ddt_sync(spa_t *spa, uint64_t txg)
dmu_tx_t *tx; dmu_tx_t *tx;
zio_t *rio = zio_root(spa, NULL, NULL, zio_t *rio = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
enum zio_checksum c;
ASSERT(spa_syncing_txg(spa) == txg); ASSERT(spa_syncing_txg(spa) == txg);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c]; ddt_t *ddt = spa->spa_ddt[c];
if (ddt == NULL) if (ddt == NULL)
continue; continue;

View File

@ -675,10 +675,8 @@ dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
} }
if (nblks != 0) { if (nblks != 0) {
int i;
blkid = dbuf_whichblock(dn, level, offset); blkid = dbuf_whichblock(dn, level, offset);
for (i = 0; i < nblks; i++) for (int i = 0; i < nblks; i++)
dbuf_prefetch(dn, level, blkid + i, pri, 0); dbuf_prefetch(dn, level, blkid + i, pri, 0);
} }
@ -704,7 +702,6 @@ get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
/* bytes of data covered by a level-1 indirect block */ /* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange = uint64_t iblkrange =
dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
uint64_t blks;
ASSERT3U(minimum, <=, *start); ASSERT3U(minimum, <=, *start);
@ -714,7 +711,7 @@ get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
} }
ASSERT(ISP2(iblkrange)); ASSERT(ISP2(iblkrange));
for (blks = 0; *start > minimum && blks < maxblks; blks++) { for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
int err; int err;
/* /*
@ -767,7 +764,6 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
int err; int err;
uint64_t dirty_frees_threshold; uint64_t dirty_frees_threshold;
dsl_pool_t *dp = dmu_objset_pool(os); dsl_pool_t *dp = dmu_objset_pool(os);
int t;
if (dn == NULL) if (dn == NULL)
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
@ -805,7 +801,7 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
chunk_len = chunk_end - chunk_begin; chunk_len = chunk_end - chunk_begin;
mutex_enter(&dp->dp_lock); mutex_enter(&dp->dp_lock);
for (t = 0; t < TXG_SIZE; t++) { for (int t = 0; t < TXG_SIZE; t++) {
long_free_dirty_all_txgs += long_free_dirty_all_txgs +=
dp->dp_long_free_dirty_pertxg[t]; dp->dp_long_free_dirty_pertxg[t];
} }
@ -2291,7 +2287,6 @@ void
__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{ {
dnode_phys_t *dnp = dn->dn_phys; dnode_phys_t *dnp = dn->dn_phys;
int i;
doi->doi_data_block_size = dn->dn_datablksz; doi->doi_data_block_size = dn->dn_datablksz;
doi->doi_metadata_block_size = dn->dn_indblkshift ? doi->doi_metadata_block_size = dn->dn_indblkshift ?
@ -2307,7 +2302,7 @@ __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0; doi->doi_fill_count = 0;
for (i = 0; i < dnp->dn_nblkptr; i++) for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
} }

View File

@ -852,11 +852,9 @@ dmu_objset_evict_dbufs(objset_t *os)
void void
dmu_objset_evict(objset_t *os) dmu_objset_evict(objset_t *os)
{ {
int t;
dsl_dataset_t *ds = os->os_dsl_dataset; dsl_dataset_t *ds = os->os_dsl_dataset;
for (t = 0; t < TXG_SIZE; t++) for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!dmu_objset_is_dirty(os, t)); ASSERT(!dmu_objset_is_dirty(os, t));
if (ds) if (ds)
@ -1384,8 +1382,6 @@ dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
static void static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{ {
int i;
blkptr_t *bp = zio->io_bp; blkptr_t *bp = zio->io_bp;
objset_t *os = arg; objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
@ -1401,7 +1397,7 @@ dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
* objects that are stored in the objset_phys_t -- the meta * objects that are stored in the objset_phys_t -- the meta
* dnode and user/group accounting objects). * dnode and user/group accounting objects).
*/ */
for (i = 0; i < dnp->dn_nblkptr; i++) for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]); fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
BP_SET_FILL(bp, fill); BP_SET_FILL(bp, fill);
@ -2273,7 +2269,6 @@ static void
dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
{ {
dsl_pool_t *dp = dcp->dc_dp; dsl_pool_t *dp = dcp->dc_dp;
dmu_objset_find_ctx_t *child_dcp;
dsl_dir_t *dd; dsl_dir_t *dd;
dsl_dataset_t *ds; dsl_dataset_t *ds;
zap_cursor_t zc; zap_cursor_t zc;
@ -2315,7 +2310,7 @@ dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
sizeof (uint64_t)); sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1); ASSERT3U(attr->za_num_integers, ==, 1);
child_dcp = dmu_objset_find_ctx_t *child_dcp =
kmem_alloc(sizeof (*child_dcp), KM_SLEEP); kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
*child_dcp = *dcp; *child_dcp = *dcp;
child_dcp->dc_ddobj = attr->za_first_integer; child_dcp->dc_ddobj = attr->za_first_integer;

View File

@ -744,7 +744,6 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
spa_t *spa = ds->ds_dir->dd_pool->dp_spa; spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
int err = 0; int err = 0;
uint64_t dnobj;
ASSERT3U(zb->zb_level, >=, 0); ASSERT3U(zb->zb_level, >=, 0);
@ -780,12 +779,10 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
return (0); return (0);
} else if (type == DMU_OT_DNODE) { } else if (type == DMU_OT_DNODE) {
dnode_phys_t *blk;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_flags_t aflags = ARC_FLAG_WAIT; arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf; arc_buf_t *abuf;
enum zio_flag zioflags = ZIO_FLAG_CANFAIL; enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
int i;
if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) { if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_ENCRYPTED(bp)); ASSERT(BP_IS_ENCRYPTED(bp));
@ -799,8 +796,8 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
return (SET_ERROR(EIO)); return (SET_ERROR(EIO));
blk = abuf->b_data; dnode_phys_t *blk = abuf->b_data;
dnobj = zb->zb_blkid * epb; uint64_t dnobj = zb->zb_blkid * epb;
/* /*
* Raw sends require sending encryption parameters for the * Raw sends require sending encryption parameters for the
@ -813,7 +810,8 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
} }
if (err == 0) { if (err == 0) {
for (i = 0; i < epb; i += blk[i].dn_extra_slots + 1) { for (int i = 0; i < epb;
i += blk[i].dn_extra_slots + 1) {
err = dump_dnode(dsa, bp, dnobj + i, blk + i); err = dump_dnode(dsa, bp, dnobj + i, blk + i);
if (err != 0) if (err != 0)
break; break;
@ -1836,9 +1834,6 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
VERIFY0(dmu_objset_from_ds(newds, &os)); VERIFY0(dmu_objset_from_ds(newds, &os));
if (drba->drba_cookie->drc_resumable) { if (drba->drba_cookie->drc_resumable) {
uint64_t one = 1;
uint64_t zero = 0;
dsl_dataset_zapify(newds, tx); dsl_dataset_zapify(newds, tx);
if (drrb->drr_fromguid != 0) { if (drrb->drr_fromguid != 0) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
@ -1848,6 +1843,8 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
8, 1, &drrb->drr_toguid, tx)); 8, 1, &drrb->drr_toguid, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
uint64_t one = 1;
uint64_t zero = 0;
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
8, 1, &one, tx)); 8, 1, &one, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
@ -1915,10 +1912,6 @@ dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
dsl_dataset_t *ds; dsl_dataset_t *ds;
const char *tofs = drba->drba_cookie->drc_tofs; const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t val;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
/* already checked */ /* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
@ -1960,6 +1953,8 @@ dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s", (void) snprintf(recvname, sizeof (recvname), "%s/%s",
tofs, recv_clone_name); tofs, recv_clone_name);
@ -1984,6 +1979,7 @@ dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
dsl_dataset_rele_flags(ds, dsflags, FTAG); dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
uint64_t val;
error = zap_lookup(dp->dp_meta_objset, ds->ds_object, error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
if (error != 0 || drrb->drr_toguid != val) { if (error != 0 || drrb->drr_toguid != val) {
@ -3085,9 +3081,7 @@ objlist_create(struct objlist *list)
static void static void
objlist_destroy(struct objlist *list) objlist_destroy(struct objlist *list)
{ {
struct receive_objnode *n; for (struct receive_objnode *n = list_remove_head(&list->list);
for (n = list_remove_head(&list->list);
n != NULL; n = list_remove_head(&list->list)) { n != NULL; n = list_remove_head(&list->list)) {
kmem_free(n, sizeof (*n)); kmem_free(n, sizeof (*n));
} }

View File

@ -702,7 +702,6 @@ traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
blkptr_cb_t func, void *arg) blkptr_cb_t func, void *arg)
{ {
int err; int err;
uint64_t obj;
dsl_pool_t *dp = spa_get_dsl(spa); dsl_pool_t *dp = spa_get_dsl(spa);
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
boolean_t hard = (flags & TRAVERSE_HARD); boolean_t hard = (flags & TRAVERSE_HARD);
@ -714,7 +713,7 @@ traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
return (err); return (err);
/* visit each dataset */ /* visit each dataset */
for (obj = 1; err == 0; for (uint64_t obj = 1; err == 0;
err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) { err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
dmu_object_info_t doi; dmu_object_info_t doi;

View File

@ -392,7 +392,6 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
SPA_BLKPTRSHIFT; SPA_BLKPTRSHIFT;
uint64_t start = off >> shift; uint64_t start = off >> shift;
uint64_t end = (off + len) >> shift; uint64_t end = (off + len) >> shift;
uint64_t i;
ASSERT(dn->dn_indblkshift != 0); ASSERT(dn->dn_indblkshift != 0);
@ -406,7 +405,7 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
zio_t *zio = zio_root(tx->tx_pool->dp_spa, zio_t *zio = zio_root(tx->tx_pool->dp_spa,
NULL, NULL, ZIO_FLAG_CANFAIL); NULL, NULL, ZIO_FLAG_CANFAIL);
for (i = start; i <= end; i++) { for (uint64_t i = start; i <= end; i++) {
uint64_t ibyte = i << shift; uint64_t ibyte = i << shift;
err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
i = ibyte >> shift; i = ibyte >> shift;
@ -1114,15 +1113,13 @@ dmu_tx_destroy(dmu_tx_t *tx)
void void
dmu_tx_commit(dmu_tx_t *tx) dmu_tx_commit(dmu_tx_t *tx)
{ {
dmu_tx_hold_t *txh;
ASSERT(tx->tx_txg != 0); ASSERT(tx->tx_txg != 0);
/* /*
* Go through the transaction's hold list and remove holds on * Go through the transaction's hold list and remove holds on
* associated dnodes, notifying waiters if no holds remain. * associated dnodes, notifying waiters if no holds remain.
*/ */
for (txh = list_head(&tx->tx_holds); txh != NULL; for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) { txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode; dnode_t *dn = txh->txh_dnode;

View File

@ -152,17 +152,15 @@ dmu_zfetch_fini(zfetch_t *zf)
static void static void
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid) dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{ {
zstream_t *zs;
zstream_t *zs_next; zstream_t *zs_next;
int numstreams = 0; int numstreams = 0;
uint32_t max_streams;
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
/* /*
* Clean up old streams. * Clean up old streams.
*/ */
for (zs = list_head(&zf->zf_stream); for (zstream_t *zs = list_head(&zf->zf_stream);
zs != NULL; zs = zs_next) { zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs); zs_next = list_next(&zf->zf_stream, zs);
if (((gethrtime() - zs->zs_atime) / NANOSEC) > if (((gethrtime() - zs->zs_atime) / NANOSEC) >
@ -180,7 +178,7 @@ dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
* If we are already at the maximum number of streams for this file, * If we are already at the maximum number of streams for this file,
* even after removing old streams, then don't create this stream. * even after removing old streams, then don't create this stream.
*/ */
max_streams = MAX(1, MIN(zfetch_max_streams, uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz / zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance)); zfetch_max_distance));
if (numstreams >= max_streams) { if (numstreams >= max_streams) {
@ -188,7 +186,7 @@ dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
return; return;
} }
zs = kmem_zalloc(sizeof (*zs), KM_SLEEP); zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_blkid = blkid; zs->zs_blkid = blkid;
zs->zs_pf_blkid = blkid; zs->zs_pf_blkid = blkid;
zs->zs_ipf_blkid = blkid; zs->zs_ipf_blkid = blkid;
@ -211,8 +209,8 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
{ {
zstream_t *zs; zstream_t *zs;
int64_t pf_start, ipf_start, ipf_istart, ipf_iend; int64_t pf_start, ipf_start, ipf_istart, ipf_iend;
int64_t pf_ahead_blks, max_blks, iblk; int64_t pf_ahead_blks, max_blks;
int epbs, max_dist_blks, pf_nblks, ipf_nblks, i; int epbs, max_dist_blks, pf_nblks, ipf_nblks;
uint64_t end_of_access_blkid; uint64_t end_of_access_blkid;
end_of_access_blkid = blkid + nblks; end_of_access_blkid = blkid + nblks;
@ -324,11 +322,11 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
* calling it to reduce the time we hold them. * calling it to reduce the time we hold them.
*/ */
for (i = 0; i < pf_nblks; i++) { for (int i = 0; i < pf_nblks; i++) {
dbuf_prefetch(zf->zf_dnode, 0, pf_start + i, dbuf_prefetch(zf->zf_dnode, 0, pf_start + i,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH); ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH);
} }
for (iblk = ipf_istart; iblk < ipf_iend; iblk++) { for (int64_t iblk = ipf_istart; iblk < ipf_iend; iblk++) {
dbuf_prefetch(zf->zf_dnode, 1, iblk, dbuf_prefetch(zf->zf_dnode, 1, iblk,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH); ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH);
} }

View File

@ -1921,8 +1921,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
* amount of space if we copy the freed BPs into deadlists. * amount of space if we copy the freed BPs into deadlists.
*/ */
if (dn->dn_nlevels > 1) { if (dn->dn_nlevels > 1) {
uint64_t first, last, i, ibyte; uint64_t first, last;
int shift, err;
first = blkid >> epbs; first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx); dnode_dirty_l1(dn, first, tx);
@ -1933,17 +1932,17 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
if (last != first) if (last != first)
dnode_dirty_l1(dn, last, tx); dnode_dirty_l1(dn, last, tx);
shift = dn->dn_datablkshift + dn->dn_indblkshift - int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT; SPA_BLKPTRSHIFT;
for (i = first + 1; i < last; i++) { for (uint64_t i = first + 1; i < last; i++) {
/* /*
* Set i to the blockid of the next non-hole * Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note * level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of * that dnode_next_offset() operates in terms of
* level-0-equivalent bytes. * level-0-equivalent bytes.
*/ */
ibyte = i << shift; uint64_t ibyte = i << shift;
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK, int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0); &ibyte, 2, 1, 0);
i = ibyte >> shift; i = ibyte >> shift;
if (i >= last) if (i >= last)

View File

@ -116,14 +116,10 @@ free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
{ {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
uint64_t bytesfreed = 0; uint64_t bytesfreed = 0;
int i;
dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
for (i = 0; i < num; i++, bp++) { for (int i = 0; i < num; i++, bp++) {
uint64_t lsize, lvl;
dmu_object_type_t type;
if (BP_IS_HOLE(bp)) if (BP_IS_HOLE(bp))
continue; continue;
@ -138,9 +134,9 @@ free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
* records transmitted during a zfs send. * records transmitted during a zfs send.
*/ */
lsize = BP_GET_LSIZE(bp); uint64_t lsize = BP_GET_LSIZE(bp);
type = BP_GET_TYPE(bp); dmu_object_type_t type = BP_GET_TYPE(bp);
lvl = BP_GET_LEVEL(bp); uint64_t lvl = BP_GET_LEVEL(bp);
bzero(bp, sizeof (blkptr_t)); bzero(bp, sizeof (blkptr_t));
@ -243,7 +239,6 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
dmu_buf_impl_t *subdb; dmu_buf_impl_t *subdb;
uint64_t start, end, dbstart, dbend; uint64_t start, end, dbstart, dbend;
unsigned int epbs, shift, i; unsigned int epbs, shift, i;
uint64_t id;
/* /*
* There is a small possibility that this block will not be cached: * There is a small possibility that this block will not be cached:
@ -280,7 +275,7 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
FREE_VERIFY(db, start, end, tx); FREE_VERIFY(db, start, end, tx);
free_blocks(dn, bp, end-start+1, tx); free_blocks(dn, bp, end-start+1, tx);
} else { } else {
for (id = start; id <= end; id++, bp++) { for (uint64_t id = start; id <= end; id++, bp++) {
if (BP_IS_HOLE(bp)) if (BP_IS_HOLE(bp))
continue; continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER); rw_enter(&dn->dn_struct_rwlock, RW_READER);
@ -356,11 +351,10 @@ dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
int start = blkid >> shift; int start = blkid >> shift;
int end = (blkid + nblks - 1) >> shift; int end = (blkid + nblks - 1) >> shift;
dmu_buf_impl_t *db; dmu_buf_impl_t *db;
int i;
ASSERT(start < dn->dn_phys->dn_nblkptr); ASSERT(start < dn->dn_phys->dn_nblkptr);
bp += start; bp += start;
for (i = start; i <= end; i++, bp++) { for (int i = start; i <= end; i++, bp++) {
if (BP_IS_HOLE(bp)) if (BP_IS_HOLE(bp))
continue; continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER); rw_enter(&dn->dn_struct_rwlock, RW_READER);
@ -562,9 +556,8 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dnode_phys_t *dnp = dn->dn_phys; dnode_phys_t *dnp = dn->dn_phys;
int txgoff = tx->tx_txg & TXG_MASK; int txgoff = tx->tx_txg & TXG_MASK;
list_t *list = &dn->dn_dirty_records[txgoff]; list_t *list = &dn->dn_dirty_records[txgoff];
boolean_t kill_spill = B_FALSE;
boolean_t freeing_dnode;
ASSERTV(static const dnode_phys_t zerodn = { 0 }); ASSERTV(static const dnode_phys_t zerodn = { 0 });
boolean_t kill_spill = B_FALSE;
ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
@ -657,7 +650,8 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dn->dn_next_bonustype[txgoff] = 0; dn->dn_next_bonustype[txgoff] = 0;
} }
freeing_dnode = dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg; boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
dn->dn_free_txg <= tx->tx_txg;
/* /*
* Remove the spill block if we have been explicitly asked to * Remove the spill block if we have been explicitly asked to

View File

@ -149,12 +149,11 @@ dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
dsl_bookmark_create_arg_t *dbca = arg; dsl_bookmark_create_arg_t *dbca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
int rv = 0; int rv = 0;
nvpair_t *pair;
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS)) if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
for (pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) { pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
dsl_dataset_t *snapds; dsl_dataset_t *snapds;
int error; int error;
@ -183,11 +182,10 @@ dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
dsl_bookmark_create_arg_t *dbca = arg; dsl_bookmark_create_arg_t *dbca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
nvpair_t *pair;
ASSERT(spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS)); ASSERT(spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS));
for (pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) { pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
dsl_dataset_t *snapds, *bmark_fs; dsl_dataset_t *snapds, *bmark_fs;
zfs_bookmark_phys_t bmark_phys; zfs_bookmark_phys_t bmark_phys;
@ -268,7 +266,6 @@ dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
for (zap_cursor_init(&zc, dp->dp_meta_objset, bmark_zapobj); for (zap_cursor_init(&zc, dp->dp_meta_objset, bmark_zapobj);
zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) { zap_cursor_advance(&zc)) {
nvlist_t *out_props;
char *bmark_name = attr.za_name; char *bmark_name = attr.za_name;
zfs_bookmark_phys_t bmark_phys; zfs_bookmark_phys_t bmark_phys;
@ -277,7 +274,7 @@ dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
if (err != 0) if (err != 0)
break; break;
out_props = fnvlist_alloc(); nvlist_t *out_props = fnvlist_alloc();
if (nvlist_exists(props, if (nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_GUID))) { zfs_prop_to_name(ZFS_PROP_GUID))) {
dsl_prop_nvlist_add_uint64(out_props, dsl_prop_nvlist_add_uint64(out_props,
@ -356,7 +353,6 @@ dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
dsl_bookmark_destroy_arg_t *dbda = arg; dsl_bookmark_destroy_arg_t *dbda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
int rv = 0; int rv = 0;
nvpair_t *pair;
ASSERT(nvlist_empty(dbda->dbda_success)); ASSERT(nvlist_empty(dbda->dbda_success));
ASSERT(nvlist_empty(dbda->dbda_errors)); ASSERT(nvlist_empty(dbda->dbda_errors));
@ -364,7 +360,7 @@ dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS)) if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
return (0); return (0);
for (pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) { pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
const char *fullname = nvpair_name(pair); const char *fullname = nvpair_name(pair);
dsl_dataset_t *ds; dsl_dataset_t *ds;
@ -408,9 +404,8 @@ dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
dsl_bookmark_destroy_arg_t *dbda = arg; dsl_bookmark_destroy_arg_t *dbda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
nvpair_t *pair;
for (pair = nvlist_next_nvpair(dbda->dbda_success, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) { pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
dsl_dataset_t *ds; dsl_dataset_t *ds;
char *shortname; char *shortname;

View File

@ -113,7 +113,6 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{ {
int used, compressed, uncompressed; int used, compressed, uncompressed;
int64_t delta; int64_t delta;
spa_feature_t f;
used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
compressed = BP_GET_PSIZE(bp); compressed = BP_GET_PSIZE(bp);
@ -147,7 +146,7 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
B_TRUE; B_TRUE;
} }
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp)); spa_feature_t f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE) if (f != SPA_FEATURE_NONE)
ds->ds_feature_activation_needed[f] = B_TRUE; ds->ds_feature_activation_needed[f] = B_TRUE;
@ -719,10 +718,9 @@ dsl_dataset_name(dsl_dataset_t *ds, char *name)
int int
dsl_dataset_namelen(dsl_dataset_t *ds) dsl_dataset_namelen(dsl_dataset_t *ds)
{ {
int len;
VERIFY0(dsl_dataset_get_snapname(ds)); VERIFY0(dsl_dataset_get_snapname(ds));
mutex_enter(&ds->ds_lock); mutex_enter(&ds->ds_lock);
len = strlen(ds->ds_snapname); int len = strlen(ds->ds_snapname);
/* add '@' if ds is a snap */ /* add '@' if ds is a snap */
if (len > 0) if (len > 0)
len++; len++;
@ -854,7 +852,6 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
if (origin == NULL) { if (origin == NULL) {
dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx); dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
} else { } else {
spa_feature_t f;
dsl_dataset_t *ohds; /* head of the origin snapshot */ dsl_dataset_t *ohds; /* head of the origin snapshot */
dsphys->ds_prev_snap_obj = origin->ds_object; dsphys->ds_prev_snap_obj = origin->ds_object;
@ -877,7 +874,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags & dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
(DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET); (DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
for (f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (origin->ds_feature_inuse[f]) if (origin->ds_feature_inuse[f])
dsl_dataset_activate_feature(dsobj, f, tx); dsl_dataset_activate_feature(dsobj, f, tx);
} }
@ -1045,8 +1042,8 @@ dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
int err;
ASSERTV(uint64_t count); ASSERTV(uint64_t count);
int err;
ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2); ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj, err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
@ -1107,9 +1104,7 @@ dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
boolean_t boolean_t
dsl_dataset_is_dirty(dsl_dataset_t *ds) dsl_dataset_is_dirty(dsl_dataset_t *ds)
{ {
int t; for (int t = 0; t < TXG_SIZE; t++) {
for (t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets, if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, t)) ds, t))
return (B_TRUE); return (B_TRUE);
@ -1363,7 +1358,6 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dsl_dataset_phys_t *dsphys; dsl_dataset_phys_t *dsphys;
uint64_t dsobj, crtxg; uint64_t dsobj, crtxg;
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
spa_feature_t f;
ASSERTV(static zil_header_t zero_zil); ASSERTV(static zil_header_t zero_zil);
ASSERTV(objset_t *os); ASSERTV(objset_t *os);
@ -1419,7 +1413,7 @@ dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
rrw_exit(&ds->ds_bp_rwlock, FTAG); rrw_exit(&ds->ds_bp_rwlock, FTAG);
dmu_buf_rele(dbuf, FTAG); dmu_buf_rele(dbuf, FTAG);
for (f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) if (ds->ds_feature_inuse[f])
dsl_dataset_activate_feature(dsobj, f, tx); dsl_dataset_activate_feature(dsobj, f, tx);
} }
@ -1691,8 +1685,6 @@ dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
void void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{ {
spa_feature_t f;
ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL); ASSERT(ds->ds_objset != NULL);
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0); ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
@ -1721,7 +1713,7 @@ dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
dmu_objset_sync(ds->ds_objset, zio, tx); dmu_objset_sync(ds->ds_objset, zio, tx);
for (f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_activation_needed[f]) { if (ds->ds_feature_activation_needed[f]) {
if (ds->ds_feature_inuse[f]) if (ds->ds_feature_inuse[f])
continue; continue;
@ -1823,10 +1815,6 @@ get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
uint64_t val; uint64_t val;
nvlist_t *token_nv = fnvlist_alloc(); nvlist_t *token_nv = fnvlist_alloc();
size_t packed_size, compressed_size; size_t packed_size, compressed_size;
zio_cksum_t cksum;
char *propval;
char buf[MAXNAMELEN];
int i;
if (zap_lookup(dp->dp_meta_objset, ds->ds_object, if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) { DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
@ -1848,6 +1836,7 @@ get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) { DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "toguid", val); fnvlist_add_uint64(token_nv, "toguid", val);
} }
char buf[MAXNAMELEN];
if (zap_lookup(dp->dp_meta_objset, ds->ds_object, if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) { DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
fnvlist_add_string(token_nv, "toname", buf); fnvlist_add_string(token_nv, "toname", buf);
@ -1875,14 +1864,15 @@ get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
compressed_size = gzip_compress(packed, compressed, compressed_size = gzip_compress(packed, compressed,
packed_size, packed_size, 6); packed_size, packed_size, 6);
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum); fletcher_4_native_varsize(compressed, compressed_size, &cksum);
str = kmem_alloc(compressed_size * 2 + 1, KM_SLEEP); str = kmem_alloc(compressed_size * 2 + 1, KM_SLEEP);
for (i = 0; i < compressed_size; i++) { for (int i = 0; i < compressed_size; i++) {
(void) sprintf(str + i * 2, "%02x", compressed[i]); (void) sprintf(str + i * 2, "%02x", compressed[i]);
} }
str[compressed_size * 2] = '\0'; str[compressed_size * 2] = '\0';
propval = kmem_asprintf("%u-%llx-%llx-%s", char *propval = kmem_asprintf("%u-%llx-%llx-%s",
ZFS_SEND_RESUME_TOKEN_VERSION, ZFS_SEND_RESUME_TOKEN_VERSION,
(longlong_t)cksum.zc_word[0], (longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str); (longlong_t)packed_size, str);
@ -1970,10 +1960,6 @@ dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
} }
if (!dsl_dataset_is_snapshot(ds)) { if (!dsl_dataset_is_snapshot(ds)) {
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
/* /*
* A failed "newfs" (e.g. full) resumable receive leaves * A failed "newfs" (e.g. full) resumable receive leaves
* the stats set on this dataset. Check here for the prop. * the stats set on this dataset. Check here for the prop.
@ -1985,6 +1971,9 @@ dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
* stats set on our child named "%recv". Check the child * stats set on our child named "%recv". Check the child
* for the prop. * for the prop.
*/ */
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, recvname); dsl_dataset_name(ds, recvname);
if (strlcat(recvname, "/", sizeof (recvname)) < if (strlcat(recvname, "/", sizeof (recvname)) <
sizeof (recvname) && sizeof (recvname) &&
@ -2271,8 +2260,6 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
dsl_dataset_t *ds; dsl_dataset_t *ds;
int64_t unused_refres_delta; int64_t unused_refres_delta;
int error; int error;
nvpair_t *pair;
nvlist_t *proprequest, *bookmarks;
error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds); error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
if (error != 0) if (error != 0)
@ -2315,14 +2302,14 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
} }
/* must not have any bookmarks after the most recent snapshot */ /* must not have any bookmarks after the most recent snapshot */
proprequest = fnvlist_alloc(); nvlist_t *proprequest = fnvlist_alloc();
fnvlist_add_boolean(proprequest, zfs_prop_to_name(ZFS_PROP_CREATETXG)); fnvlist_add_boolean(proprequest, zfs_prop_to_name(ZFS_PROP_CREATETXG));
bookmarks = fnvlist_alloc(); nvlist_t *bookmarks = fnvlist_alloc();
error = dsl_get_bookmarks_impl(ds, proprequest, bookmarks); error = dsl_get_bookmarks_impl(ds, proprequest, bookmarks);
fnvlist_free(proprequest); fnvlist_free(proprequest);
if (error != 0) if (error != 0)
return (error); return (error);
for (pair = nvlist_next_nvpair(bookmarks, NULL); for (nvpair_t *pair = nvlist_next_nvpair(bookmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(bookmarks, pair)) { pair != NULL; pair = nvlist_next_nvpair(bookmarks, pair)) {
nvlist_t *valuenv = nvlist_t *valuenv =
fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair), fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair),
@ -3067,7 +3054,6 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
{ {
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta; int64_t unused_refres_delta;
blkptr_t tmp;
ASSERT(clone->ds_reserved == 0); ASSERT(clone->ds_reserved == 0);
/* /*
@ -3083,9 +3069,6 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
* Swap per-dataset feature flags. * Swap per-dataset feature flags.
*/ */
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
boolean_t clone_inuse;
boolean_t origin_head_inuse;
if (!(spa_feature_table[f].fi_flags & if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) { ZFEATURE_FLAG_PER_DATASET)) {
ASSERT(!clone->ds_feature_inuse[f]); ASSERT(!clone->ds_feature_inuse[f]);
@ -3093,8 +3076,8 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
continue; continue;
} }
clone_inuse = clone->ds_feature_inuse[f]; boolean_t clone_inuse = clone->ds_feature_inuse[f];
origin_head_inuse = origin_head->ds_feature_inuse[f]; boolean_t origin_head_inuse = origin_head->ds_feature_inuse[f];
if (clone_inuse) { if (clone_inuse) {
dsl_dataset_deactivate_feature(clone->ds_object, f, tx); dsl_dataset_deactivate_feature(clone->ds_object, f, tx);
@ -3152,6 +3135,7 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
{ {
rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG); rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG); rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
blkptr_t tmp;
tmp = dsl_dataset_phys(origin_head)->ds_bp; tmp = dsl_dataset_phys(origin_head)->ds_bp;
dsl_dataset_phys(origin_head)->ds_bp = dsl_dataset_phys(origin_head)->ds_bp =
dsl_dataset_phys(clone)->ds_bp; dsl_dataset_phys(clone)->ds_bp;
@ -3689,7 +3673,6 @@ dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
dsl_pool_t *dp = later->ds_dir->dd_pool; dsl_pool_t *dp = later->ds_dir->dd_pool;
int error; int error;
boolean_t ret; boolean_t ret;
dsl_dataset_t *origin;
ASSERT(dsl_pool_config_held(dp)); ASSERT(dsl_pool_config_held(dp));
ASSERT(earlier->ds_is_snapshot || earlier_txg != 0); ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
@ -3708,6 +3691,7 @@ dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == earlier->ds_object) if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == earlier->ds_object)
return (B_TRUE); return (B_TRUE);
dsl_dataset_t *origin;
error = dsl_dataset_hold_obj(dp, error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin); dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
if (error != 0) if (error != 0)

View File

@ -245,14 +245,11 @@ dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
void void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx) dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
{ {
spa_feature_t f;
int after_branch_point = FALSE; int after_branch_point = FALSE;
dsl_pool_t *dp = ds->ds_dir->dd_pool; dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
dsl_dataset_t *ds_prev = NULL; dsl_dataset_t *ds_prev = NULL;
uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0; uint64_t obj;
dsl_dataset_t *ds_next, *ds_head, *hds;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
@ -279,7 +276,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
obj = ds->ds_object; obj = ds->ds_object;
for (f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) { if (ds->ds_feature_inuse[f]) {
dsl_dataset_deactivate_feature(obj, f, tx); dsl_dataset_deactivate_feature(obj, f, tx);
ds->ds_feature_inuse[f] = B_FALSE; ds->ds_feature_inuse[f] = B_FALSE;
@ -310,6 +307,10 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
} }
} }
dsl_dataset_t *ds_next;
uint64_t old_unique;
uint64_t used = 0, comp = 0, uncomp = 0;
VERIFY0(dsl_dataset_hold_obj(dp, VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next)); dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj); ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
@ -388,6 +389,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
ASSERT3P(ds_next->ds_prev, ==, NULL); ASSERT3P(ds_next->ds_prev, ==, NULL);
/* Collapse range in this head. */ /* Collapse range in this head. */
dsl_dataset_t *hds;
VERIFY0(dsl_dataset_hold_obj(dp, VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds)); dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
dsl_deadlist_remove_key(&hds->ds_deadlist, dsl_deadlist_remove_key(&hds->ds_deadlist,
@ -435,6 +437,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
} }
/* remove from snapshot namespace */ /* remove from snapshot namespace */
dsl_dataset_t *ds_head;
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0); ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
VERIFY0(dsl_dataset_hold_obj(dp, VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head)); dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
@ -726,11 +729,9 @@ void
dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx) dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
{ {
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
spa_feature_t f;
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
uint64_t obj, ddobj, prevobj = 0; uint64_t obj, ddobj, prevobj = 0;
boolean_t rmorigin; boolean_t rmorigin;
objset_t *os;
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
ASSERT(ds->ds_prev == NULL || ASSERT(ds->ds_prev == NULL ||
@ -758,7 +759,7 @@ dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
obj = ds->ds_object; obj = ds->ds_object;
for (f = 0; f < SPA_FEATURES; f++) { for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) { if (ds->ds_feature_inuse[f]) {
dsl_dataset_deactivate_feature(obj, f, tx); dsl_dataset_deactivate_feature(obj, f, tx);
ds->ds_feature_inuse[f] = B_FALSE; ds->ds_feature_inuse[f] = B_FALSE;
@ -794,6 +795,7 @@ dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
dmu_buf_will_dirty(ds->ds_dbuf, tx); dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj = 0; dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os)); VERIFY0(dmu_objset_from_ds(ds, &os));
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
@ -959,11 +961,10 @@ dsl_destroy_head(const char *name)
error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE, error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE,
FTAG, &os); FTAG, &os);
if (error == 0) { if (error == 0) {
uint64_t obj;
uint64_t prev_snap_txg = uint64_t prev_snap_txg =
dsl_dataset_phys(dmu_objset_ds(os))-> dsl_dataset_phys(dmu_objset_ds(os))->
ds_prev_snap_txg; ds_prev_snap_txg;
for (obj = 0; error == 0; for (uint64_t obj = 0; error == 0;
error = dmu_object_next(os, &obj, FALSE, error = dmu_object_next(os, &obj, FALSE,
prev_snap_txg)) prev_snap_txg))
(void) dmu_free_long_object(os, obj); (void) dmu_free_long_object(os, obj);

View File

@ -537,10 +537,6 @@ extern int zfs_vdev_async_write_active_min_dirty_percent;
static boolean_t static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{ {
uint64_t elapsed_nanosecs;
int mintime;
int dirty_pct;
/* we never skip user/group accounting objects */ /* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0) if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE); return (B_FALSE);
@ -569,10 +565,10 @@ dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
* - the spa is shutting down because this pool is being exported * - the spa is shutting down because this pool is being exported
* or the machine is rebooting. * or the machine is rebooting.
*/ */
mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scan_min_time_ms; zfs_resilver_min_time_ms : zfs_scan_min_time_ms;
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout || if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > mintime && (NSEC2MSEC(elapsed_nanosecs) > mintime &&
(txg_sync_waiting(scn->scn_dp) || (txg_sync_waiting(scn->scn_dp) ||
@ -1183,7 +1179,6 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
dsl_pool_t *dp = scn->scn_dp; dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds; dsl_dataset_t *ds;
objset_t *os; objset_t *os;
char *dsname;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
@ -1248,7 +1243,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG); rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname); dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u", "suspending=%u",
@ -1446,12 +1441,11 @@ dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_phys_t *ddp = dde->dde_phys; ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp; blkptr_t bp;
zbookmark_phys_t zb = { 0 }; zbookmark_phys_t zb = { 0 };
int p;
if (scn->scn_phys.scn_state != DSS_SCANNING) if (scn->scn_phys.scn_state != DSS_SCANNING)
return; return;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue; continue;
@ -1890,13 +1884,12 @@ count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
int equal;
zfs_blkstat_t *zb;
if (t & DMU_OT_NEWTYPE) if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER; t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb = &zab->zab_type[l][t];
zb->zb_count++; zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp); zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp); zb->zb_lsize += BP_GET_LSIZE(bp);
@ -1993,7 +1986,6 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
boolean_t needs_io = B_FALSE; boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
int scan_delay = 0; int scan_delay = 0;
int d;
if (phys_birth <= scn->scn_phys.scn_min_txg || if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) phys_birth >= scn->scn_phys.scn_max_txg)
@ -2020,7 +2012,7 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
if (zb->zb_level == ZB_ZIL_LEVEL) if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE; zio_flags |= ZIO_FLAG_SPECULATIVE;
for (d = 0; d < BP_GET_NDVAS(bp); d++) { for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d]; const dva_t *dva = &bp->blk_dva[d];
/* /*

View File

@ -83,7 +83,6 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
{ {
dsl_dataset_user_hold_arg_t *dduha = arg; dsl_dataset_user_hold_arg_t *dduha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
@ -91,7 +90,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
if (!dmu_tx_is_syncing(tx)) if (!dmu_tx_is_syncing(tx))
return (0); return (0);
for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) { pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
dsl_dataset_t *ds; dsl_dataset_t *ds;
int error = 0; int error = 0;
@ -255,14 +254,13 @@ dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
dsl_dataset_user_hold_arg_t *dduha = arg; dsl_dataset_user_hold_arg_t *dduha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
nvlist_t *tmpholds; nvlist_t *tmpholds;
nvpair_t *pair;
uint64_t now = gethrestime_sec(); uint64_t now = gethrestime_sec();
if (dduha->dduha_minor != 0) if (dduha->dduha_minor != 0)
tmpholds = fnvlist_alloc(); tmpholds = fnvlist_alloc();
else else
tmpholds = NULL; tmpholds = NULL;
for (pair = nvlist_next_nvpair(dduha->dduha_chkholds, NULL); for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_chkholds, NULL);
pair != NULL; pair != NULL;
pair = nvlist_next_nvpair(dduha->dduha_chkholds, pair)) { pair = nvlist_next_nvpair(dduha->dduha_chkholds, pair)) {
dsl_dataset_t *ds; dsl_dataset_t *ds;
@ -351,7 +349,6 @@ dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
{ {
uint64_t zapobj; uint64_t zapobj;
nvlist_t *holds_found; nvlist_t *holds_found;
nvpair_t *pair;
objset_t *mos; objset_t *mos;
int numholds; int numholds;
@ -366,7 +363,7 @@ dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj; zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj;
VERIFY0(nvlist_alloc(&holds_found, NV_UNIQUE_NAME, KM_SLEEP)); VERIFY0(nvlist_alloc(&holds_found, NV_UNIQUE_NAME, KM_SLEEP));
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) { pair = nvlist_next_nvpair(holds, pair)) {
uint64_t tmp; uint64_t tmp;
int error; int error;
@ -427,7 +424,6 @@ dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
dsl_dataset_user_release_arg_t *ddura; dsl_dataset_user_release_arg_t *ddura;
dsl_holdfunc_t *holdfunc; dsl_holdfunc_t *holdfunc;
dsl_pool_t *dp; dsl_pool_t *dp;
nvpair_t *pair;
if (!dmu_tx_is_syncing(tx)) if (!dmu_tx_is_syncing(tx))
return (0); return (0);
@ -439,7 +435,7 @@ dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
ddura = arg; ddura = arg;
holdfunc = ddura->ddura_holdfunc; holdfunc = ddura->ddura_holdfunc;
for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL); for (nvpair_t *pair = nvlist_next_nvpair(ddura->ddura_holds, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) { pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) {
int error; int error;
dsl_dataset_t *ds; dsl_dataset_t *ds;
@ -479,9 +475,8 @@ dsl_dataset_user_release_sync_one(dsl_dataset_t *ds, nvlist_t *holds,
{ {
dsl_pool_t *dp = ds->ds_dir->dd_pool; dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset; objset_t *mos = dp->dp_meta_objset;
nvpair_t *pair;
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) { pair = nvlist_next_nvpair(holds, pair)) {
int error; int error;
const char *holdname = nvpair_name(pair); const char *holdname = nvpair_name(pair);
@ -505,11 +500,10 @@ dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
dsl_dataset_user_release_arg_t *ddura = arg; dsl_dataset_user_release_arg_t *ddura = arg;
dsl_holdfunc_t *holdfunc = ddura->ddura_holdfunc; dsl_holdfunc_t *holdfunc = ddura->ddura_holdfunc;
dsl_pool_t *dp = dmu_tx_pool(tx); dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
for (pair = nvlist_next_nvpair(ddura->ddura_chkholds, NULL); for (nvpair_t *pair = nvlist_next_nvpair(ddura->ddura_chkholds, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_chkholds, pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_chkholds,
pair)) { pair)) {
dsl_dataset_t *ds; dsl_dataset_t *ds;

View File

@ -307,7 +307,7 @@ metaslab_class_histogram_verify(metaslab_class_t *mc)
{ {
vdev_t *rvd = mc->mc_spa->spa_root_vdev; vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t *mc_hist; uint64_t *mc_hist;
int i, c; int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return; return;
@ -315,7 +315,7 @@ metaslab_class_histogram_verify(metaslab_class_t *mc)
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP); KM_SLEEP);
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -350,11 +350,10 @@ metaslab_class_fragmentation(metaslab_class_t *mc)
{ {
vdev_t *rvd = mc->mc_spa->spa_root_vdev; vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0; uint64_t fragmentation = 0;
int c;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -401,10 +400,9 @@ metaslab_class_expandable_space(metaslab_class_t *mc)
{ {
vdev_t *rvd = mc->mc_spa->spa_root_vdev; vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0; uint64_t space = 0;
int c;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -449,7 +447,6 @@ metaslab_verify_space(metaslab_t *msp, uint64_t txg)
spa_t *spa = msp->ms_group->mg_vd->vdev_spa; spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocated = 0; uint64_t allocated = 0;
uint64_t sm_free_space, msp_free_space; uint64_t sm_free_space, msp_free_space;
int t;
ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -474,7 +471,7 @@ metaslab_verify_space(metaslab_t *msp, uint64_t txg)
* Account for future allocations since we would have already * Account for future allocations since we would have already
* deducted that space from the ms_freetree. * deducted that space from the ms_freetree.
*/ */
for (t = 0; t < TXG_CONCURRENT_STATES; t++) { for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocated += allocated +=
range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]); range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
} }
@ -699,7 +696,7 @@ metaslab_group_histogram_verify(metaslab_group_t *mg)
uint64_t *mg_hist; uint64_t *mg_hist;
vdev_t *vd = mg->mg_vd; vdev_t *vd = mg->mg_vd;
uint64_t ashift = vd->vdev_ashift; uint64_t ashift = vd->vdev_ashift;
int i, m; int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return; return;
@ -710,7 +707,7 @@ metaslab_group_histogram_verify(metaslab_group_t *mg)
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift); SPACE_MAP_HISTOGRAM_SIZE + ashift);
for (m = 0; m < vd->vdev_ms_count; m++) { for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m]; metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_sm == NULL) if (msp->ms_sm == NULL)
@ -732,14 +729,13 @@ metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{ {
metaslab_class_t *mc = mg->mg_class; metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift; uint64_t ashift = mg->mg_vd->vdev_ashift;
int i;
ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL) if (msp->ms_sm == NULL)
return; return;
mutex_enter(&mg->mg_lock); mutex_enter(&mg->mg_lock);
for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg->mg_histogram[i + ashift] += mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i]; msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] += mc->mc_histogram[i + ashift] +=
@ -753,14 +749,13 @@ metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{ {
metaslab_class_t *mc = mg->mg_class; metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift; uint64_t ashift = mg->mg_vd->vdev_ashift;
int i;
ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL) if (msp->ms_sm == NULL)
return; return;
mutex_enter(&mg->mg_lock); mutex_enter(&mg->mg_lock);
for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=, ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]); msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=, ASSERT3U(mc->mc_histogram[i + ashift], >=,
@ -834,9 +829,8 @@ metaslab_group_fragmentation(metaslab_group_t *mg)
vdev_t *vd = mg->mg_vd; vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0; uint64_t fragmentation = 0;
uint64_t valid_ms = 0; uint64_t valid_ms = 0;
int m;
for (m = 0; m < vd->vdev_ms_count; m++) { for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m]; metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID) if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
@ -1347,7 +1341,6 @@ int
metaslab_load(metaslab_t *msp) metaslab_load(metaslab_t *msp)
{ {
int error = 0; int error = 0;
int t;
boolean_t success = B_FALSE; boolean_t success = B_FALSE;
ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(MUTEX_HELD(&msp->ms_lock));
@ -1373,7 +1366,7 @@ metaslab_load(metaslab_t *msp)
ASSERT3P(msp->ms_group, !=, NULL); ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE; msp->ms_loaded = B_TRUE;
for (t = 0; t < TXG_DEFER_SIZE; t++) { for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defertree[t], range_tree_walk(msp->ms_defertree[t],
range_tree_remove, msp->ms_tree); range_tree_remove, msp->ms_tree);
} }
@ -1473,8 +1466,6 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
void void
metaslab_fini(metaslab_t *msp) metaslab_fini(metaslab_t *msp)
{ {
int t;
metaslab_group_t *mg = msp->ms_group; metaslab_group_t *mg = msp->ms_group;
metaslab_group_remove(mg, msp); metaslab_group_remove(mg, msp);
@ -1490,11 +1481,11 @@ metaslab_fini(metaslab_t *msp)
range_tree_destroy(msp->ms_freeingtree); range_tree_destroy(msp->ms_freeingtree);
range_tree_destroy(msp->ms_freedtree); range_tree_destroy(msp->ms_freedtree);
for (t = 0; t < TXG_SIZE; t++) { for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_alloctree[t]); range_tree_destroy(msp->ms_alloctree[t]);
} }
for (t = 0; t < TXG_DEFER_SIZE; t++) { for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defertree[t]); range_tree_destroy(msp->ms_defertree[t]);
} }
@ -1561,7 +1552,6 @@ metaslab_set_fragmentation(metaslab_t *msp)
uint64_t total = 0; uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa, boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM); SPA_FEATURE_SPACEMAP_HISTOGRAM);
int i;
if (!feature_enabled) { if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID; msp->ms_fragmentation = ZFS_FRAG_INVALID;
@ -1603,7 +1593,7 @@ metaslab_set_fragmentation(metaslab_t *msp)
return; return;
} }
for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0; uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift; uint8_t shift = msp->ms_sm->sm_shift;
@ -1710,11 +1700,11 @@ metaslab_weight_from_range_tree(metaslab_t *msp)
{ {
uint64_t weight = 0; uint64_t weight = 0;
uint32_t segments = 0; uint32_t segments = 0;
int i;
ASSERT(msp->ms_loaded); ASSERT(msp->ms_loaded);
for (i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; i--) { for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
@ -1750,9 +1740,8 @@ static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp) metaslab_weight_from_spacemap(metaslab_t *msp)
{ {
uint64_t weight = 0; uint64_t weight = 0;
int i;
for (i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) { if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
WEIGHT_SET_COUNT(weight, WEIGHT_SET_COUNT(weight,
msp->ms_sm->sm_phys->smp_histogram[i]); msp->ms_sm->sm_phys->smp_histogram[i]);
@ -1962,8 +1951,6 @@ void
metaslab_segment_may_passivate(metaslab_t *msp) metaslab_segment_may_passivate(metaslab_t *msp)
{ {
spa_t *spa = msp->ms_group->mg_vd->vdev_spa; spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t weight;
int activation_idx, current_idx;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return; return;
@ -1973,9 +1960,9 @@ metaslab_segment_may_passivate(metaslab_t *msp)
* information that is accessible to us is the in-core range tree * information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information. * histogram; calculate the new weight based on that information.
*/ */
weight = metaslab_weight_from_range_tree(msp); uint64_t weight = metaslab_weight_from_range_tree(msp);
activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
current_idx = WEIGHT_GET_INDEX(weight); int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight); metaslab_passivate(msp, weight);
@ -2118,7 +2105,6 @@ metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
spa_t *spa = msp->ms_group->mg_vd->vdev_spa; spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
range_tree_t *condense_tree; range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm; space_map_t *sm = msp->ms_sm;
int t;
ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1); ASSERT3U(spa_sync_pass(spa), ==, 1);
@ -2151,12 +2137,12 @@ metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
*/ */
range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree); range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
for (t = 0; t < TXG_DEFER_SIZE; t++) { for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defertree[t], range_tree_walk(msp->ms_defertree[t],
range_tree_remove, condense_tree); range_tree_remove, condense_tree);
} }
for (t = 1; t < TXG_CONCURRENT_STATES; t++) { for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
range_tree_remove, condense_tree); range_tree_remove, condense_tree);
} }
@ -2280,8 +2266,6 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
} }
if (msp->ms_loaded) { if (msp->ms_loaded) {
int t;
/* /*
* When the space map is loaded, we have an accruate * When the space map is loaded, we have an accruate
* histogram in the range tree. This gives us an opportunity * histogram in the range tree. This gives us an opportunity
@ -2307,7 +2291,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
* that is completely empty unless the metaslab is fully * that is completely empty unless the metaslab is fully
* allocated. * allocated.
*/ */
for (t = 0; t < TXG_DEFER_SIZE; t++) { for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm, space_map_histogram_add(msp->ms_sm,
msp->ms_defertree[t], tx); msp->ms_defertree[t], tx);
} }
@ -2366,9 +2350,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree; range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta; int64_t alloc_delta, defer_delta;
uint64_t free_space;
boolean_t defer_allowed = B_TRUE; boolean_t defer_allowed = B_TRUE;
int t;
ASSERT(!vd->vdev_ishole); ASSERT(!vd->vdev_ishole);
@ -2379,7 +2361,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
* range trees and add its capacity to the vdev. * range trees and add its capacity to the vdev.
*/ */
if (msp->ms_freedtree == NULL) { if (msp->ms_freedtree == NULL) {
for (t = 0; t < TXG_SIZE; t++) { for (int t = 0; t < TXG_SIZE; t++) {
ASSERT(msp->ms_alloctree[t] == NULL); ASSERT(msp->ms_alloctree[t] == NULL);
msp->ms_alloctree[t] = range_tree_create(NULL, msp, msp->ms_alloctree[t] = range_tree_create(NULL, msp,
@ -2394,7 +2376,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
msp->ms_freedtree = range_tree_create(NULL, msp, msp->ms_freedtree = range_tree_create(NULL, msp,
&msp->ms_lock); &msp->ms_lock);
for (t = 0; t < TXG_DEFER_SIZE; t++) { for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ASSERT(msp->ms_defertree[t] == NULL); ASSERT(msp->ms_defertree[t] == NULL);
msp->ms_defertree[t] = range_tree_create(NULL, msp, msp->ms_defertree[t] = range_tree_create(NULL, msp,
@ -2406,7 +2388,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
free_space = metaslab_class_get_space(spa_normal_class(spa)) - uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa)); metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa)) { if (free_space <= spa_get_slop_space(spa)) {
defer_allowed = B_FALSE; defer_allowed = B_FALSE;
@ -2470,7 +2452,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
if (msp->ms_loaded && if (msp->ms_loaded &&
msp->ms_selected_txg + metaslab_unload_delay < txg) { msp->ms_selected_txg + metaslab_unload_delay < txg) {
for (t = 1; t < TXG_CONCURRENT_STATES; t++) { for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space( VERIFY0(range_tree_space(
msp->ms_alloctree[(txg + t) & TXG_MASK])); msp->ms_alloctree[(txg + t) & TXG_MASK]));
} }
@ -2654,13 +2636,11 @@ metaslab_trace_fini(zio_alloc_list_t *zal)
static void static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags) metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
{ {
metaslab_group_t *mg;
if (!(flags & METASLAB_ASYNC_ALLOC) || if (!(flags & METASLAB_ASYNC_ALLOC) ||
flags & METASLAB_DONT_THROTTLE) flags & METASLAB_DONT_THROTTLE)
return; return;
mg = vdev_lookup_top(spa, vdev)->vdev_mg; metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled) if (!mg->mg_class->mc_alloc_throttle_enabled)
return; return;
@ -2670,13 +2650,11 @@ metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
void void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags) metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
{ {
metaslab_group_t *mg;
if (!(flags & METASLAB_ASYNC_ALLOC) || if (!(flags & METASLAB_ASYNC_ALLOC) ||
flags & METASLAB_DONT_THROTTLE) flags & METASLAB_DONT_THROTTLE)
return; return;
mg = vdev_lookup_top(spa, vdev)->vdev_mg; metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled) if (!mg->mg_class->mc_alloc_throttle_enabled)
return; return;
@ -2689,9 +2667,8 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
#ifdef ZFS_DEBUG #ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva; const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp); int ndvas = BP_GET_NDVAS(bp);
int d;
for (d = 0; d < ndvas; d++) { for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]); uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag)); VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
@ -2741,7 +2718,6 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d) uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
{ {
metaslab_t *msp = NULL; metaslab_t *msp = NULL;
metaslab_t *search;
uint64_t offset = -1ULL; uint64_t offset = -1ULL;
uint64_t activation_weight; uint64_t activation_weight;
uint64_t target_distance; uint64_t target_distance;
@ -2755,7 +2731,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
} }
} }
search = kmem_alloc(sizeof (*search), KM_SLEEP); metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX; search->ms_weight = UINT64_MAX;
search->ms_start = 0; search->ms_start = 0;
for (;;) { for (;;) {
@ -3062,8 +3038,6 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
top: top:
do { do {
boolean_t allocatable; boolean_t allocatable;
uint64_t offset;
uint64_t distance, asize;
ASSERT(mg->mg_activation_count == 1); ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd; vd = mg->mg_vd;
@ -3120,7 +3094,7 @@ top:
* in this BP. If we are trying hard, allow any offset * in this BP. If we are trying hard, allow any offset
* to be used (distance=0). * to be used (distance=0).
*/ */
distance = 0; uint64_t distance = 0;
if (!try_hard) { if (!try_hard) {
distance = vd->vdev_asize >> distance = vd->vdev_asize >>
ditto_same_vdev_distance_shift; ditto_same_vdev_distance_shift;
@ -3128,11 +3102,11 @@ top:
distance = 0; distance = 0;
} }
asize = vdev_psize_to_asize(vd, psize); uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
offset = metaslab_group_alloc(mg, zal, asize, txg, distance, uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
dva, d); distance, dva, d);
if (offset != -1ULL) { if (offset != -1ULL) {
/* /*
@ -3343,24 +3317,21 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
int flags) int flags)
{ {
uint64_t available_slots = 0; uint64_t available_slots = 0;
uint64_t reserved_slots;
boolean_t slot_reserved = B_FALSE; boolean_t slot_reserved = B_FALSE;
ASSERT(mc->mc_alloc_throttle_enabled); ASSERT(mc->mc_alloc_throttle_enabled);
mutex_enter(&mc->mc_lock); mutex_enter(&mc->mc_lock);
reserved_slots = refcount_count(&mc->mc_alloc_slots); uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
if (reserved_slots < mc->mc_alloc_max_slots) if (reserved_slots < mc->mc_alloc_max_slots)
available_slots = mc->mc_alloc_max_slots - reserved_slots; available_slots = mc->mc_alloc_max_slots - reserved_slots;
if (slots <= available_slots || GANG_ALLOCATION(flags)) { if (slots <= available_slots || GANG_ALLOCATION(flags)) {
int d;
/* /*
* We reserve the slots individually so that we can unreserve * We reserve the slots individually so that we can unreserve
* them individually when an I/O completes. * them individually when an I/O completes.
*/ */
for (d = 0; d < slots; d++) { for (int d = 0; d < slots; d++) {
reserved_slots = refcount_add(&mc->mc_alloc_slots, zio); reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
} }
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
@ -3374,11 +3345,9 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
void void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio) metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
{ {
int d;
ASSERT(mc->mc_alloc_throttle_enabled); ASSERT(mc->mc_alloc_throttle_enabled);
mutex_enter(&mc->mc_lock); mutex_enter(&mc->mc_lock);
for (d = 0; d < slots; d++) { for (int d = 0; d < slots; d++) {
(void) refcount_remove(&mc->mc_alloc_slots, zio); (void) refcount_remove(&mc->mc_alloc_slots, zio);
} }
mutex_exit(&mc->mc_lock); mutex_exit(&mc->mc_lock);
@ -3391,7 +3360,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
{ {
dva_t *dva = bp->blk_dva; dva_t *dva = bp->blk_dva;
dva_t *hintdva = hintbp->blk_dva; dva_t *hintdva = hintbp->blk_dva;
int d, error = 0; int error = 0;
ASSERT(bp->blk_birth == 0); ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
@ -3408,7 +3377,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL); ASSERT3P(zal, !=, NULL);
for (d = 0; d < ndvas; d++) { for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal); txg, flags, zal);
if (error != 0) { if (error != 0) {
@ -3444,14 +3413,14 @@ void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{ {
const dva_t *dva = bp->blk_dva; const dva_t *dva = bp->blk_dva;
int d, ndvas = BP_GET_NDVAS(bp); int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp)); ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) for (int d = 0; d < ndvas; d++)
metaslab_free_dva(spa, &dva[d], txg, now); metaslab_free_dva(spa, &dva[d], txg, now);
spa_config_exit(spa, SCL_FREE, FTAG); spa_config_exit(spa, SCL_FREE, FTAG);
@ -3462,7 +3431,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{ {
const dva_t *dva = bp->blk_dva; const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp); int ndvas = BP_GET_NDVAS(bp);
int d, error = 0; int error = 0;
ASSERT(!BP_IS_HOLE(bp)); ASSERT(!BP_IS_HOLE(bp));
@ -3477,7 +3446,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) for (int d = 0; d < ndvas; d++)
if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
break; break;
@ -3540,13 +3509,11 @@ metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
void void
metaslab_check_free(spa_t *spa, const blkptr_t *bp) metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{ {
int i, j;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return; return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (i = 0; i < BP_GET_NDVAS(bp); i++) { for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev); vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
@ -3558,7 +3525,7 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
range_tree_verify(msp->ms_freeingtree, offset, size); range_tree_verify(msp->ms_freeingtree, offset, size);
range_tree_verify(msp->ms_freedtree, offset, size); range_tree_verify(msp->ms_freedtree, offset, size);
for (j = 0; j < TXG_DEFER_SIZE; j++) for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify(msp->ms_defertree[j], offset, size); range_tree_verify(msp->ms_defertree[j], offset, size);
} }
spa_config_exit(spa, SCL_VDEV, FTAG); spa_config_exit(spa, SCL_VDEV, FTAG);

View File

@ -121,11 +121,9 @@ multilist_create(size_t size, size_t offset,
void void
multilist_destroy(multilist_t *ml) multilist_destroy(multilist_t *ml)
{ {
int i;
ASSERT(multilist_is_empty(ml)); ASSERT(multilist_is_empty(ml));
for (i = 0; i < ml->ml_num_sublists; i++) { for (int i = 0; i < ml->ml_num_sublists; i++) {
multilist_sublist_t *mls = &ml->ml_sublists[i]; multilist_sublist_t *mls = &ml->ml_sublists[i];
ASSERT(list_is_empty(&mls->mls_list)); ASSERT(list_is_empty(&mls->mls_list));
@ -243,9 +241,7 @@ multilist_remove(multilist_t *ml, void *obj)
int int
multilist_is_empty(multilist_t *ml) multilist_is_empty(multilist_t *ml)
{ {
int i; for (int i = 0; i < ml->ml_num_sublists; i++) {
for (i = 0; i < ml->ml_num_sublists; i++) {
multilist_sublist_t *mls = &ml->ml_sublists[i]; multilist_sublist_t *mls = &ml->ml_sublists[i];
/* See comment in multilist_insert(). */ /* See comment in multilist_insert(). */
boolean_t need_lock = !MUTEX_HELD(&mls->mls_lock); boolean_t need_lock = !MUTEX_HELD(&mls->mls_lock);

View File

@ -757,10 +757,10 @@ spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
static int static int
spa_change_guid_check(void *arg, dmu_tx_t *tx) spa_change_guid_check(void *arg, dmu_tx_t *tx)
{ {
ASSERTV(uint64_t *newguid = arg);
spa_t *spa = dmu_tx_pool(tx)->dp_spa; spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state; uint64_t vdev_state;
ASSERTV(uint64_t *newguid = arg);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state; vdev_state = rvd->vdev_state;
@ -875,7 +875,7 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
uint_t count = ztip->zti_count; uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
char name[32]; char name[32];
uint_t i, flags = 0; uint_t flags = 0;
boolean_t batch = B_FALSE; boolean_t batch = B_FALSE;
if (mode == ZTI_MODE_NULL) { if (mode == ZTI_MODE_NULL) {
@ -909,7 +909,7 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
break; break;
} }
for (i = 0; i < count; i++) { for (uint_t i = 0; i < count; i++) {
taskq_t *tq; taskq_t *tq;
if (count > 1) { if (count > 1) {
@ -950,14 +950,13 @@ static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{ {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
if (tqs->stqs_taskq == NULL) { if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0); ASSERT3U(tqs->stqs_count, ==, 0);
return; return;
} }
for (i = 0; i < tqs->stqs_count; i++) { for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL); ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]); taskq_destroy(tqs->stqs_taskq[i]);
} }
@ -1019,10 +1018,8 @@ spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
static void static void
spa_create_zio_taskqs(spa_t *spa) spa_create_zio_taskqs(spa_t *spa)
{ {
int t, q; for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
for (t = 0; t < ZIO_TYPES; t++) {
for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q); spa_taskqs_init(spa, t, q);
} }
} }
@ -1203,8 +1200,6 @@ spa_activate(spa_t *spa, int mode)
static void static void
spa_deactivate(spa_t *spa) spa_deactivate(spa_t *spa)
{ {
int t, q;
ASSERT(spa->spa_sync_on == B_FALSE); ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL); ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL); ASSERT(spa->spa_root_vdev == NULL);
@ -1231,8 +1226,8 @@ spa_deactivate(spa_t *spa)
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid); taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (t = 0; t < ZIO_TYPES; t++) { for (int t = 0; t < ZIO_TYPES; t++) {
for (q = 0; q < ZIO_TASKQ_TYPES; q++) { for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q); spa_taskqs_fini(spa, t, q);
} }
} }
@ -1294,7 +1289,6 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
nvlist_t **child; nvlist_t **child;
uint_t children; uint_t children;
int error; int error;
int c;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error); return (error);
@ -1314,7 +1308,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
for (c = 0; c < children; c++) { for (int c = 0; c < children; c++) {
vdev_t *vd; vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) { atype)) != 0) {
@ -1335,7 +1329,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
static void static void
spa_unload(spa_t *spa) spa_unload(spa_t *spa)
{ {
int i, c; int i;
ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(MUTEX_HELD(&spa_namespace_lock));
@ -1360,7 +1354,7 @@ spa_unload(spa_t *spa)
*/ */
if (spa->spa_root_vdev != NULL) { if (spa->spa_root_vdev != NULL) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
for (c = 0; c < spa->spa_root_vdev->vdev_children; c++) for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]); vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
spa_config_exit(spa, SCL_ALL, FTAG); spa_config_exit(spa, SCL_ALL, FTAG);
} }
@ -1372,7 +1366,7 @@ spa_unload(spa_t *spa)
* Wait for any outstanding async I/O to complete. * Wait for any outstanding async I/O to complete.
*/ */
if (spa->spa_async_zio_root != NULL) { if (spa->spa_async_zio_root != NULL) {
for (i = 0; i < max_ncpus; i++) for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]); (void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *)); kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL; spa->spa_async_zio_root = NULL;
@ -1721,9 +1715,7 @@ load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
static void static void
spa_check_removed(vdev_t *vd) spa_check_removed(vdev_t *vd)
{ {
int c; for (int c = 0; c < vd->vdev_children; c++)
for (c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]); spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
@ -1736,14 +1728,12 @@ spa_check_removed(vdev_t *vd)
static void static void
spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd) spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
{ {
uint64_t i;
ASSERT3U(vd->vdev_children, ==, mvd->vdev_children); ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
vd->vdev_top_zap = mvd->vdev_top_zap; vd->vdev_top_zap = mvd->vdev_top_zap;
vd->vdev_leaf_zap = mvd->vdev_leaf_zap; vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
for (i = 0; i < vd->vdev_children; i++) { for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]); spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
} }
} }
@ -1756,7 +1746,6 @@ spa_config_valid(spa_t *spa, nvlist_t *config)
{ {
vdev_t *mrvd, *rvd = spa->spa_root_vdev; vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv; nvlist_t *nv;
int c, i;
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
@ -1778,7 +1767,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config)
KM_SLEEP); KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c]; vdev_t *mtvd = mrvd->vdev_child[c];
@ -1795,7 +1784,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config)
VERIFY(nvlist_add_nvlist(spa->spa_load_info, VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
for (i = 0; i < idx; i++) for (int i = 0; i < idx; i++)
nvlist_free(child[i]); nvlist_free(child[i]);
} }
nvlist_free(nv); nvlist_free(nv);
@ -1807,7 +1796,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config)
* from the MOS config (mrvd). Check each top-level vdev * from the MOS config (mrvd). Check each top-level vdev
* with the corresponding MOS config top-level (mtvd). * with the corresponding MOS config top-level (mtvd).
*/ */
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c]; vdev_t *mtvd = mrvd->vdev_child[c];
@ -1909,14 +1898,13 @@ spa_passivate_log(spa_t *spa)
{ {
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE; boolean_t slog_found = B_FALSE;
int c;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
if (!spa_has_slogs(spa)) if (!spa_has_slogs(spa))
return (B_FALSE); return (B_FALSE);
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -1933,11 +1921,10 @@ static void
spa_activate_log(spa_t *spa) spa_activate_log(spa_t *spa)
{ {
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -1967,9 +1954,7 @@ spa_offline_log(spa_t *spa)
static void static void
spa_aux_check_removed(spa_aux_vdev_t *sav) spa_aux_check_removed(spa_aux_vdev_t *sav)
{ {
int i; for (int i = 0; i < sav->sav_count; i++)
for (i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]); spa_check_removed(sav->sav_vdevs[i]);
} }
@ -2029,9 +2014,6 @@ static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{ {
zio_t *rio;
size_t size;
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0); return (0);
/* /*
@ -2044,8 +2026,8 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (!BP_IS_METADATA(bp) && !spa_load_verify_data) if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0); return (0);
rio = arg; zio_t *rio = arg;
size = BP_GET_PSIZE(bp); size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock); mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight)
@ -2324,7 +2306,6 @@ vdev_count_verify_zaps(vdev_t *vd)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
uint64_t total = 0; uint64_t total = 0;
uint64_t i;
if (vd->vdev_top_zap != 0) { if (vd->vdev_top_zap != 0) {
total++; total++;
@ -2337,7 +2318,7 @@ vdev_count_verify_zaps(vdev_t *vd)
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap)); spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
} }
for (i = 0; i < vd->vdev_children; i++) { for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]); total += vdev_count_verify_zaps(vd->vdev_child[i]);
} }
@ -2553,11 +2534,10 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
uberblock_t *ub = &spa->spa_uberblock; uberblock_t *ub = &spa->spa_uberblock;
uint64_t children, config_cache_txg = spa->spa_config_txg; uint64_t children, config_cache_txg = spa->spa_config_txg;
int orig_mode = spa->spa_mode; int orig_mode = spa->spa_mode;
int parse, i; int parse;
uint64_t obj; uint64_t obj;
boolean_t missing_feat_write = B_FALSE; boolean_t missing_feat_write = B_FALSE;
boolean_t activity_check = B_FALSE; boolean_t activity_check = B_FALSE;
nvlist_t *mos_config;
/* /*
* If this is an untrusted config, access the pool in read-only mode. * If this is an untrusted config, access the pool in read-only mode.
@ -2581,7 +2561,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
*/ */
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP); KM_SLEEP);
for (i = 0; i < max_ncpus; i++) { for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER); ZIO_FLAG_GODFATHER);
@ -2720,13 +2700,12 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
*/ */
if (ub->ub_version >= SPA_VERSION_FEATURES) { if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat; nvlist_t *unsup_feat;
nvpair_t *nvp;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0); 0);
for (nvp = nvlist_next_nvpair(spa->spa_label_features, NULL); for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
nvp != NULL; NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) { if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat, VERIFY(nvlist_add_string(unsup_feat,
@ -2788,7 +2767,6 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
if (spa_version(spa) >= SPA_VERSION_FEATURES) { if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE; boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat; nvlist_t *unsup_feat, *enabled_feat;
spa_feature_t i;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj) != 0) { &spa->spa_feat_for_read_obj) != 0) {
@ -2864,7 +2842,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* Load refcounts for ZFS features from disk into an in-memory * Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization. * cache during SPA initialization.
*/ */
for (i = 0; i < SPA_FEATURES; i++) { for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount; uint64_t refcount;
error = feature_get_refcount_from_disk(spa, error = feature_get_refcount_from_disk(spa,
@ -2989,6 +2967,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
*/ */
/* The sentinel is only available in the MOS config. */ /* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@ -3196,7 +3175,6 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
dmu_tx_t *tx; dmu_tx_t *tx;
int need_update = B_FALSE; int need_update = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa); dsl_pool_t *dp = spa_get_dsl(spa);
int c;
ASSERT(state != SPA_LOAD_TRYIMPORT); ASSERT(state != SPA_LOAD_TRYIMPORT);
@ -3243,7 +3221,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) (spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE; need_update = B_TRUE;
for (c = 0; c < rvd->vdev_children; c++) for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0) if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE; need_update = B_TRUE;
@ -4018,8 +3996,6 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
boolean_t has_encryption; boolean_t has_encryption;
spa_feature_t feat; spa_feature_t feat;
char *feat_name; char *feat_name;
nvpair_t *elem;
int c, i;
char *poolname; char *poolname;
nvlist_t *nvl; nvlist_t *nvl;
@ -4061,7 +4037,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
has_features = B_FALSE; has_features = B_FALSE;
has_encryption = B_FALSE; has_encryption = B_FALSE;
for (elem = nvlist_next_nvpair(props, NULL); for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) { elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) { if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE; has_features = B_TRUE;
@ -4101,7 +4077,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
*/ */
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *), spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP); KM_SLEEP);
for (i = 0; i < max_ncpus; i++) { for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL, spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER); ZIO_FLAG_GODFATHER);
@ -4124,7 +4100,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
(error = vdev_create(rvd, txg, B_FALSE)) == 0 && (error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, (error = spa_validate_aux(spa, nvroot, txg,
VDEV_ALLOC_ADD)) == 0) { VDEV_ALLOC_ADD)) == 0) {
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_metaslab_set_size(rvd->vdev_child[c]); vdev_metaslab_set_size(rvd->vdev_child[c]);
vdev_expand(rvd->vdev_child[c], txg); vdev_expand(rvd->vdev_child[c], txg);
} }
@ -4743,7 +4719,6 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
vdev_t *vd, *tvd; vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache; nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache; uint_t nspares, nl2cache;
int c;
ASSERT(spa_writeable(spa)); ASSERT(spa_writeable(spa));
@ -4780,7 +4755,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
/* /*
* Transfer each new top-level vdev from vd to rvd. * Transfer each new top-level vdev from vd to rvd.
*/ */
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
/* /*
* Set the vdev id to the first hole, if one exists. * Set the vdev id to the first hole, if one exists.
@ -4852,12 +4827,12 @@ int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
{ {
uint64_t txg, dtl_max_txg; uint64_t txg, dtl_max_txg;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops; vdev_ops_t *pvops;
char *oldvdpath, *newvdpath; char *oldvdpath, *newvdpath;
int newvd_isspare; int newvd_isspare;
int error; int error;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
ASSERT(spa_writeable(spa)); ASSERT(spa_writeable(spa));
@ -5062,12 +5037,12 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{ {
uint64_t txg; uint64_t txg;
int error; int error;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *vd, *pvd, *cvd, *tvd; vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE; boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0; uint64_t unspare_guid = 0;
char *vdpath; char *vdpath;
int c, t;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
ASSERT(spa_writeable(spa)); ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa); txg = spa_vdev_enter(spa);
@ -5134,7 +5109,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
vd->vdev_path != NULL) { vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path); size_t len = strlen(vd->vdev_path);
for (c = 0; c < pvd->vdev_children; c++) { for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c]; cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL) if (cvd == vd || cvd->vdev_path == NULL)
@ -5241,7 +5216,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
* prevent vd from being accessed after it's freed. * prevent vd from being accessed after it's freed.
*/ */
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none"); vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (t = 0; t < TXG_SIZE; t++) for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE; vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg); vdev_dirty(tvd, VDD_DTL, vd, txg);
@ -5589,9 +5564,7 @@ out:
static nvlist_t * static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{ {
int i; for (int i = 0; i < count; i++) {
for (i = 0; i < count; i++) {
uint64_t guid; uint64_t guid;
VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
@ -5609,12 +5582,11 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
nvlist_t *dev_to_remove) nvlist_t *dev_to_remove)
{ {
nvlist_t **newdev = NULL; nvlist_t **newdev = NULL;
int i, j;
if (count > 1) if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
for (i = 0, j = 0; i < count; i++) { for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove) if (dev[i] == dev_to_remove)
continue; continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
@ -5623,7 +5595,7 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
for (i = 0; i < count - 1; i++) for (int i = 0; i < count - 1; i++)
nvlist_free(newdev[i]); nvlist_free(newdev[i]);
if (count > 1) if (count > 1)
@ -5848,9 +5820,8 @@ static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd) spa_vdev_resilver_done_hunt(vdev_t *vd)
{ {
vdev_t *newvd, *oldvd; vdev_t *newvd, *oldvd;
int c;
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL) if (oldvd != NULL)
return (oldvd); return (oldvd);
@ -6065,8 +6036,6 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
static void static void
spa_async_remove(spa_t *spa, vdev_t *vd) spa_async_remove(spa_t *spa, vdev_t *vd)
{ {
int c;
if (vd->vdev_remove_wanted) { if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE; vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE; vd->vdev_delayed_close = B_FALSE;
@ -6085,33 +6054,29 @@ spa_async_remove(spa_t *spa, vdev_t *vd)
vdev_state_dirty(vd->vdev_top); vdev_state_dirty(vd->vdev_top);
} }
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]); spa_async_remove(spa, vd->vdev_child[c]);
} }
static void static void
spa_async_probe(spa_t *spa, vdev_t *vd) spa_async_probe(spa_t *spa, vdev_t *vd)
{ {
int c;
if (vd->vdev_probe_wanted) { if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE; vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */ vdev_reopen(vd); /* vdev_open() does the actual probe */
} }
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]); spa_async_probe(spa, vd->vdev_child[c]);
} }
static void static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd) spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{ {
int c;
if (!spa->spa_autoexpand) if (!spa->spa_autoexpand)
return; return;
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd); spa_async_autoexpand(spa, cvd);
} }
@ -6404,7 +6369,6 @@ static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx) spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
uint64_t i;
if (vd->vdev_top_zap != 0) { if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz, VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
@ -6414,7 +6378,7 @@ spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
VERIFY0(zap_add_int(spa->spa_meta_objset, avz, VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx)); vd->vdev_leaf_zap, tx));
} }
for (i = 0; i < vd->vdev_children; i++) { for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx); spa_avz_build(vd->vdev_child[i], avz, tx);
} }
} }
@ -6441,15 +6405,15 @@ spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
spa->spa_all_vdev_zaps != 0); spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) { if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
zap_cursor_t zc;
zap_attribute_t za;
/* Make and build the new AVZ */ /* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset, uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx); DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx); spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */ /* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset, for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps); spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_retrieve(&zc, &za) == 0;
@ -6783,15 +6747,12 @@ spa_sync(spa_t *spa, uint64_t txg)
dsl_pool_t *dp = spa->spa_dsl_pool; dsl_pool_t *dp = spa->spa_dsl_pool;
objset_t *mos = spa->spa_meta_objset; objset_t *mos = spa->spa_meta_objset;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
metaslab_class_t *mc;
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd; vdev_t *vd;
dmu_tx_t *tx; dmu_tx_t *tx;
int error; int error;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active * uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100; zfs_vdev_queue_depth_pct / 100;
uint64_t queue_depth_total;
int c;
VERIFY(spa_writeable(spa)); VERIFY(spa_writeable(spa));
@ -6866,8 +6827,8 @@ spa_sync(spa_t *spa, uint64_t txg)
* The max queue depth will not change in the middle of syncing * The max queue depth will not change in the middle of syncing
* out this txg. * out this txg.
*/ */
queue_depth_total = 0; uint64_t queue_depth_total = 0;
for (c = 0; c < rvd->vdev_children; c++) { for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c]; vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg; metaslab_group_t *mg = tvd->vdev_mg;
@ -6884,7 +6845,7 @@ spa_sync(spa_t *spa, uint64_t txg)
mg->mg_max_alloc_queue_depth = max_queue_depth; mg->mg_max_alloc_queue_depth = max_queue_depth;
queue_depth_total += mg->mg_max_alloc_queue_depth; queue_depth_total += mg->mg_max_alloc_queue_depth;
} }
mc = spa_normal_class(spa); metaslab_class_t *mc = spa_normal_class(spa);
ASSERT0(refcount_count(&mc->mc_alloc_slots)); ASSERT0(refcount_count(&mc->mc_alloc_slots));
mc->mc_alloc_max_slots = queue_depth_total; mc->mc_alloc_max_slots = queue_depth_total;
mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled; mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
@ -6997,7 +6958,7 @@ spa_sync(spa_t *spa, uint64_t txg)
int children = rvd->vdev_children; int children = rvd->vdev_children;
int c0 = spa_get_random(children); int c0 = spa_get_random(children);
for (c = 0; c < children; c++) { for (int c = 0; c < children; c++) {
vd = rvd->vdev_child[(c0 + c) % children]; vd = rvd->vdev_child[(c0 + c) % children];
if (vd->vdev_ms_array == 0 || vd->vdev_islog) if (vd->vdev_ms_array == 0 || vd->vdev_islog)
continue; continue;

View File

@ -413,7 +413,6 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
boolean_t locked = B_FALSE; boolean_t locked = B_FALSE;
uint64_t split_guid; uint64_t split_guid;
char *pool_name; char *pool_name;
int config_gen_flags = 0;
if (vd == NULL) { if (vd == NULL) {
vd = rvd; vd = rvd;
@ -463,6 +462,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename);
int config_gen_flags = 0;
if (vd != rvd) { if (vd != rvd) {
fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID,
vd->vdev_top->vdev_guid); vd->vdev_top->vdev_guid);

View File

@ -360,9 +360,7 @@ uint64_t spa_min_slop = 128 * 1024 * 1024;
static void static void
spa_config_lock_init(spa_t *spa) spa_config_lock_init(spa_t *spa)
{ {
int i; for (int i = 0; i < SCL_LOCKS; i++) {
for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
@ -375,9 +373,7 @@ spa_config_lock_init(spa_t *spa)
static void static void
spa_config_lock_destroy(spa_t *spa) spa_config_lock_destroy(spa_t *spa)
{ {
int i; for (int i = 0; i < SCL_LOCKS; i++) {
for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock); mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv); cv_destroy(&scl->scl_cv);
@ -390,9 +386,7 @@ spa_config_lock_destroy(spa_t *spa)
int int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{ {
int i; for (int i = 0; i < SCL_LOCKS; i++) {
for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i))) if (!(locks & (1 << i)))
continue; continue;
@ -424,11 +418,10 @@ void
spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
{ {
int wlocks_held = 0; int wlocks_held = 0;
int i;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (i = 0; i < SCL_LOCKS; i++) { for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread) if (scl->scl_writer == curthread)
wlocks_held |= (1 << i); wlocks_held |= (1 << i);
@ -457,9 +450,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
void void
spa_config_exit(spa_t *spa, int locks, void *tag) spa_config_exit(spa_t *spa, int locks, void *tag)
{ {
int i; for (int i = SCL_LOCKS - 1; i >= 0; i--) {
for (i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i))) if (!(locks & (1 << i)))
continue; continue;
@ -478,9 +469,9 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
int int
spa_config_held(spa_t *spa, int locks, krw_t rw) spa_config_held(spa_t *spa, int locks, krw_t rw)
{ {
int i, locks_held = 0; int locks_held = 0;
for (i = 0; i < SCL_LOCKS; i++) { for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i]; spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i))) if (!(locks & (1 << i)))
continue; continue;
@ -562,8 +553,6 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
{ {
spa_t *spa; spa_t *spa;
spa_config_dirent_t *dp; spa_config_dirent_t *dp;
int t;
int i;
ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(MUTEX_HELD(&spa_namespace_lock));
@ -589,7 +578,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
for (t = 0; t < TXG_SIZE; t++) for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]); bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
@ -660,7 +649,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
* setting SPA_FEATURE_DISABLED for all entries in the feature * setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache. * refcount cache.
*/ */
for (i = 0; i < SPA_FEATURES; i++) { for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
} }
@ -676,7 +665,6 @@ void
spa_remove(spa_t *spa) spa_remove(spa_t *spa)
{ {
spa_config_dirent_t *dp; spa_config_dirent_t *dp;
int t;
ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
@ -710,7 +698,7 @@ spa_remove(spa_t *spa)
spa_stats_destroy(spa); spa_stats_destroy(spa);
spa_config_lock_destroy(spa); spa_config_lock_destroy(spa);
for (t = 0; t < TXG_SIZE; t++) for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]); bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa); zio_checksum_templates_free(spa);
@ -1077,9 +1065,10 @@ spa_vdev_config_enter(spa_t *spa)
void void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{ {
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE; int config_changed = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(txg > spa_last_synced_txg(spa)); ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL; spa->spa_pending_vdev = NULL;
@ -1803,9 +1792,8 @@ uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{ {
uint64_t dsize = 0; uint64_t dsize = 0;
int d;
for (d = 0; d < BP_GET_NDVAS(bp); d++) for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize); return (dsize);
@ -1815,11 +1803,10 @@ uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp) bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{ {
uint64_t dsize = 0; uint64_t dsize = 0;
int d;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < BP_GET_NDVAS(bp); d++) for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG); spa_config_exit(spa, SCL_VDEV, FTAG);

View File

@ -144,13 +144,11 @@ space_map_histogram_clear(space_map_t *sm)
boolean_t boolean_t
space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
{ {
int i;
/* /*
* Verify that the in-core range tree does not have any * Verify that the in-core range tree does not have any
* ranges smaller than our sm_shift size. * ranges smaller than our sm_shift size.
*/ */
for (i = 0; i < sm->sm_shift; i++) { for (int i = 0; i < sm->sm_shift; i++) {
if (rt->rt_histogram[i] != 0) if (rt->rt_histogram[i] != 0)
return (B_FALSE); return (B_FALSE);
} }
@ -161,7 +159,6 @@ void
space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
{ {
int idx = 0; int idx = 0;
int i;
ASSERT(MUTEX_HELD(rt->rt_lock)); ASSERT(MUTEX_HELD(rt->rt_lock));
ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dmu_tx_is_syncing(tx));
@ -181,7 +178,7 @@ space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
* map only cares about allocatable blocks (minimum of sm_shift) we * map only cares about allocatable blocks (minimum of sm_shift) we
* can safely ignore all ranges in the range tree smaller than sm_shift. * can safely ignore all ranges in the range tree smaller than sm_shift.
*/ */
for (i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
/* /*
* Since the largest histogram bucket in the space map is * Since the largest histogram bucket in the space map is

View File

@ -785,9 +785,7 @@ txg_list_empty(txg_list_t *tl, uint64_t txg)
boolean_t boolean_t
txg_all_lists_empty(txg_list_t *tl) txg_all_lists_empty(txg_list_t *tl)
{ {
int i; for (int i = 0; i < TXG_SIZE; i++) {
for (i = 0; i < TXG_SIZE; i++) {
if (!txg_list_empty(tl, i)) { if (!txg_list_empty(tl, i)) {
return (B_FALSE); return (B_FALSE);
} }

View File

@ -97,9 +97,8 @@ vdev_default_asize(vdev_t *vd, uint64_t psize)
{ {
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize; uint64_t csize;
int c;
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize); csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize); asize = MAX(asize, csize);
} }
@ -146,10 +145,9 @@ vdev_get_min_asize(vdev_t *vd)
void void
vdev_set_min_asize(vdev_t *vd) vdev_set_min_asize(vdev_t *vd)
{ {
int c;
vd->vdev_min_asize = vdev_get_min_asize(vd); vd->vdev_min_asize = vdev_get_min_asize(vd);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]); vdev_set_min_asize(vd->vdev_child[c]);
} }
@ -172,12 +170,11 @@ vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{ {
vdev_t *mvd; vdev_t *mvd;
int c;
if (vd->vdev_guid == guid) if (vd->vdev_guid == guid)
return (vd); return (vd);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL) NULL)
return (mvd); return (mvd);
@ -189,12 +186,11 @@ static int
vdev_count_leaves_impl(vdev_t *vd) vdev_count_leaves_impl(vdev_t *vd)
{ {
int n = 0; int n = 0;
int c;
if (vd->vdev_ops->vdev_op_leaf) if (vd->vdev_ops->vdev_op_leaf)
return (1); return (1);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]); n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n); return (n);
@ -289,17 +285,16 @@ vdev_compact_children(vdev_t *pvd)
vdev_t **newchild, *cvd; vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children; int oldc = pvd->vdev_children;
int newc; int newc;
int c;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (c = newc = 0; c < oldc; c++) for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c]) if (pvd->vdev_child[c])
newc++; newc++;
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP); newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (c = newc = 0; c < oldc; c++) { for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) { if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd; newchild[newc] = cvd;
cvd->vdev_id = newc++; cvd->vdev_id = newc++;
@ -318,7 +313,6 @@ vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{ {
vdev_t *vd; vdev_t *vd;
int t;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
@ -367,7 +361,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
for (t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, NULL, vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
&vd->vdev_dtl_lock); &vd->vdev_dtl_lock);
} }
@ -651,7 +645,6 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
void void
vdev_free(vdev_t *vd) vdev_free(vdev_t *vd)
{ {
int c, t;
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
/* /*
@ -666,7 +659,7 @@ vdev_free(vdev_t *vd)
/* /*
* Free all children. * Free all children.
*/ */
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]); vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL); ASSERT(vd->vdev_child == NULL);
@ -720,7 +713,7 @@ vdev_free(vdev_t *vd)
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm); space_map_close(vd->vdev_dtl_sm);
for (t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]); range_tree_destroy(vd->vdev_dtl[t]);
} }
@ -812,14 +805,12 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
static void static void
vdev_top_update(vdev_t *tvd, vdev_t *vd) vdev_top_update(vdev_t *tvd, vdev_t *vd)
{ {
int c;
if (vd == NULL) if (vd == NULL)
return; return;
vd->vdev_top = tvd; vd->vdev_top = tvd;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]); vdev_top_update(tvd, vd->vdev_child[c]);
} }
@ -1081,7 +1072,6 @@ vdev_probe(vdev_t *vd, zio_t *zio)
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL; vdev_probe_stats_t *vps = NULL;
zio_t *pio; zio_t *pio;
int l;
ASSERT(vd->vdev_ops->vdev_op_leaf); ASSERT(vd->vdev_ops->vdev_op_leaf);
@ -1151,7 +1141,7 @@ vdev_probe(vdev_t *vd, zio_t *zio)
return (NULL); return (NULL);
} }
for (l = 1; l < VDEV_LABELS; l++) { for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd, zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l, vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE, offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
@ -1180,14 +1170,12 @@ vdev_open_child(void *arg)
static boolean_t static boolean_t
vdev_uses_zvols(vdev_t *vd) vdev_uses_zvols(vdev_t *vd)
{ {
int c;
#ifdef _KERNEL #ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path)) if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE); return (B_TRUE);
#endif #endif
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c])) if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE); return (B_TRUE);
@ -1199,7 +1187,6 @@ vdev_open_children(vdev_t *vd)
{ {
taskq_t *tq; taskq_t *tq;
int children = vd->vdev_children; int children = vd->vdev_children;
int c;
/* /*
* in order to handle pools on top of zvols, do the opens * in order to handle pools on top of zvols, do the opens
@ -1208,7 +1195,7 @@ vdev_open_children(vdev_t *vd)
*/ */
if (vdev_uses_zvols(vd)) { if (vdev_uses_zvols(vd)) {
retry_sync: retry_sync:
for (c = 0; c < children; c++) for (int c = 0; c < children; c++)
vd->vdev_child[c]->vdev_open_error = vd->vdev_child[c]->vdev_open_error =
vdev_open(vd->vdev_child[c]); vdev_open(vd->vdev_child[c]);
} else { } else {
@ -1217,7 +1204,7 @@ retry_sync:
if (tq == NULL) if (tq == NULL)
goto retry_sync; goto retry_sync;
for (c = 0; c < children; c++) for (int c = 0; c < children; c++)
VERIFY(taskq_dispatch(tq, vdev_open_child, VERIFY(taskq_dispatch(tq, vdev_open_child,
vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID); vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
@ -1226,7 +1213,7 @@ retry_sync:
vd->vdev_nonrot = B_TRUE; vd->vdev_nonrot = B_TRUE;
for (c = 0; c < children; c++) for (int c = 0; c < children; c++)
vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot; vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
} }
@ -1242,7 +1229,6 @@ vdev_open(vdev_t *vd)
uint64_t max_osize = 0; uint64_t max_osize = 0;
uint64_t asize, max_asize, psize; uint64_t asize, max_asize, psize;
uint64_t ashift = 0; uint64_t ashift = 0;
int c;
ASSERT(vd->vdev_open_thread == curthread || ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@ -1321,7 +1307,7 @@ vdev_open(vdev_t *vd)
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0); return (0);
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE); VDEV_AUX_NONE);
@ -1479,9 +1465,8 @@ vdev_validate(vdev_t *vd, boolean_t strict)
nvlist_t *label; nvlist_t *label;
uint64_t guid = 0, top_guid; uint64_t guid = 0, top_guid;
uint64_t state; uint64_t state;
int c;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
if (vdev_validate(vd->vdev_child[c], strict) != 0) if (vdev_validate(vd->vdev_child[c], strict) != 0)
return (SET_ERROR(EBADF)); return (SET_ERROR(EBADF));
@ -1623,13 +1608,12 @@ void
vdev_hold(vdev_t *vd) vdev_hold(vdev_t *vd)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
int c;
ASSERT(spa_is_root(spa)); ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED) if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return; return;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]); vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf) if (vd->vdev_ops->vdev_op_leaf)
@ -1639,10 +1623,8 @@ vdev_hold(vdev_t *vd)
void void
vdev_rele(vdev_t *vd) vdev_rele(vdev_t *vd)
{ {
int c;
ASSERT(spa_is_root(vd->vdev_spa)); ASSERT(spa_is_root(vd->vdev_spa));
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]); vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf) if (vd->vdev_ops->vdev_op_leaf)
@ -1748,9 +1730,7 @@ vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
void void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{ {
int c; for (int c = 0; c < vd->vdev_children; c++)
for (c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg); vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf) if (vd->vdev_ops->vdev_op_leaf)
@ -1935,11 +1915,11 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
avl_tree_t reftree; avl_tree_t reftree;
int c, t, minref; int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg, vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done); scrub_txg, scrub_done);
@ -2020,9 +2000,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
} }
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
for (t = 0; t < DTL_TYPES; t++) { for (int t = 0; t < DTL_TYPES; t++) {
int c;
/* account for child's outage in parent's missing map */ /* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB) if (t == DTL_SCRUB)
@ -2034,7 +2012,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
else else
minref = vd->vdev_children; /* any kind of mirror */ minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree); space_reftree_create(&reftree);
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock); mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
@ -2052,7 +2030,6 @@ vdev_dtl_load(vdev_t *vd)
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset; objset_t *mos = spa->spa_meta_objset;
int error = 0; int error = 0;
int c;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(!vd->vdev_ishole); ASSERT(!vd->vdev_ishole);
@ -2078,7 +2055,7 @@ vdev_dtl_load(vdev_t *vd)
return (error); return (error);
} }
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]); error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0) if (error != 0)
break; break;
@ -2114,8 +2091,6 @@ vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
void void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{ {
uint64_t i;
if (vd->vdev_ops != &vdev_hole_ops && if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops && vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops && vd->vdev_ops != &vdev_root_ops &&
@ -2127,7 +2102,7 @@ vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
vd->vdev_top_zap = vdev_create_link_zap(vd, tx); vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
} }
} }
for (i = 0; i < vd->vdev_children; i++) { for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx); vdev_construct_zaps(vd->vdev_child[i], tx);
} }
} }
@ -2261,7 +2236,6 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
boolean_t needed = B_FALSE; boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX; uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0; uint64_t thismax = 0;
int c;
if (vd->vdev_children == 0) { if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock); mutex_enter(&vd->vdev_dtl_lock);
@ -2274,7 +2248,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
} }
mutex_exit(&vd->vdev_dtl_lock); mutex_exit(&vd->vdev_dtl_lock);
} else { } else {
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax; uint64_t cmin, cmax;
@ -2296,12 +2270,10 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
void void
vdev_load(vdev_t *vd) vdev_load(vdev_t *vd)
{ {
int c;
/* /*
* Recursively load all children. * Recursively load all children.
*/ */
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_load(vd->vdev_child[c]); vdev_load(vd->vdev_child[c]);
/* /*
@ -2368,7 +2340,6 @@ vdev_remove(vdev_t *vd, uint64_t txg)
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset; objset_t *mos = spa->spa_meta_objset;
dmu_tx_t *tx; dmu_tx_t *tx;
int m, i;
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
ASSERT(vd == vd->vdev_top); ASSERT(vd == vd->vdev_top);
@ -2380,7 +2351,7 @@ vdev_remove(vdev_t *vd, uint64_t txg)
metaslab_group_histogram_verify(mg); metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class); metaslab_class_histogram_verify(mg->mg_class);
for (m = 0; m < vd->vdev_ms_count; m++) { for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m]; metaslab_t *msp = vd->vdev_ms[m];
if (msp == NULL || msp->ms_sm == NULL) if (msp == NULL || msp->ms_sm == NULL)
@ -2405,7 +2376,7 @@ vdev_remove(vdev_t *vd, uint64_t txg)
metaslab_group_histogram_verify(mg); metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class); metaslab_class_histogram_verify(mg->mg_class);
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
ASSERT0(mg->mg_histogram[i]); ASSERT0(mg->mg_histogram[i]);
} }
@ -2778,7 +2749,6 @@ void
vdev_clear(spa_t *spa, vdev_t *vd) vdev_clear(spa_t *spa, vdev_t *vd)
{ {
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@ -2789,7 +2759,7 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vd->vdev_stat.vs_write_errors = 0; vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0; vd->vdev_stat.vs_checksum_errors = 0;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]); vdev_clear(spa, vd->vdev_child[c]);
/* /*
@ -2949,7 +2919,7 @@ vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
static void static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{ {
int c, t; int t;
/* /*
* If we're getting stats on the root vdev, aggregate the I/O counts * If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root). * over all top-level vdevs (i.e. the direct children of the root).
@ -2962,7 +2932,7 @@ vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
if (vsx) if (vsx)
memset(vsx, 0, sizeof (*vsx)); memset(vsx, 0, sizeof (*vsx));
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat; vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex; vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
@ -3049,9 +3019,8 @@ void
vdev_scan_stat_init(vdev_t *vd) vdev_scan_stat_init(vdev_t *vd)
{ {
vdev_stat_t *vs = &vd->vdev_stat; vdev_stat_t *vs = &vd->vdev_stat;
int c;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]); vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock); mutex_enter(&vd->vdev_stat_lock);
@ -3418,10 +3387,9 @@ vdev_propagate_state(vdev_t *vd)
int degraded = 0, faulted = 0; int degraded = 0, faulted = 0;
int corrupted = 0; int corrupted = 0;
vdev_t *child; vdev_t *child;
int c;
if (vd->vdev_children > 0) { if (vd->vdev_children > 0) {
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c]; child = vd->vdev_child[c];
/* /*
@ -3650,14 +3618,12 @@ vdev_is_bootable(vdev_t *vd)
void void
vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
{ {
int c;
ASSERT(nvd->vdev_top->vdev_islog); ASSERT(nvd->vdev_top->vdev_islog);
ASSERT(spa_config_held(nvd->vdev_spa, ASSERT(spa_config_held(nvd->vdev_spa,
SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
for (c = 0; c < nvd->vdev_children; c++) for (int c = 0; c < nvd->vdev_children; c++)
vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
if (nvd->vdev_ops->vdev_op_leaf) { if (nvd->vdev_ops->vdev_op_leaf) {
@ -3679,13 +3645,11 @@ vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
boolean_t boolean_t
vdev_log_state_valid(vdev_t *vd) vdev_log_state_valid(vdev_t *vd)
{ {
int c;
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed) !vd->vdev_removed)
return (B_TRUE); return (B_TRUE);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c])) if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE); return (B_TRUE);
@ -3729,9 +3693,7 @@ vdev_split(vdev_t *vd)
void void
vdev_deadman(vdev_t *vd) vdev_deadman(vdev_t *vd)
{ {
int c; for (int c = 0; c < vd->vdev_children; c++) {
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd); vdev_deadman(cvd);

View File

@ -215,7 +215,6 @@ vdev_cache_fill(zio_t *fio)
vdev_cache_t *vc = &vd->vdev_cache; vdev_cache_t *vc = &vd->vdev_cache;
vdev_cache_entry_t *ve = fio->io_private; vdev_cache_entry_t *ve = fio->io_private;
zio_t *pio; zio_t *pio;
zio_link_t *zl;
ASSERT3U(fio->io_size, ==, VCBS); ASSERT3U(fio->io_size, ==, VCBS);
@ -235,7 +234,7 @@ vdev_cache_fill(zio_t *fio)
* any reads that were queued up before the missed update are still * any reads that were queued up before the missed update are still
* valid, so we can satisfy them from this line before we evict it. * valid, so we can satisfy them from this line before we evict it.
*/ */
zl = NULL; zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(fio, &zl)) != NULL) while ((pio = zio_walk_parents(fio, &zl)) != NULL)
vdev_cache_hit(vc, ve, pio); vdev_cache_hit(vc, ve, pio);

View File

@ -602,7 +602,6 @@ vdev_label_read_config(vdev_t *vd, uint64_t txg)
int error = 0; int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE; ZIO_FLAG_SPECULATIVE;
int l;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@ -613,7 +612,7 @@ vdev_label_read_config(vdev_t *vd, uint64_t txg)
vp = abd_to_buf(vp_abd); vp = abd_to_buf(vp_abd);
retry: retry:
for (l = 0; l < VDEV_LABELS; l++) { for (int l = 0; l < VDEV_LABELS; l++) {
nvlist_t *label = NULL; nvlist_t *label = NULL;
zio = zio_root(spa, NULL, NULL, flags); zio = zio_root(spa, NULL, NULL, flags);
@ -803,12 +802,10 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
int error; int error;
uint64_t spare_guid = 0, l2cache_guid = 0; uint64_t spare_guid = 0, l2cache_guid = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
int c, l;
vdev_t *pvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c], if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0) crtxg, reason)) != 0)
return (error); return (error);
@ -844,7 +841,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
vd->vdev_guid += guid_delta; vd->vdev_guid += guid_delta;
for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta; pvd->vdev_guid_sum += guid_delta;
/* /*
@ -864,7 +861,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
vd->vdev_guid += guid_delta; vd->vdev_guid += guid_delta;
for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta; pvd->vdev_guid_sum += guid_delta;
/* /*
@ -966,7 +963,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
retry: retry:
zio = zio_root(spa, NULL, NULL, flags); zio = zio_root(spa, NULL, NULL, flags);
for (l = 0; l < VDEV_LABELS; l++) { for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp_abd, vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys), offsetof(vdev_label_t, vl_vdev_phys),
@ -1251,11 +1248,10 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
spa_t *spa = svd[0]->vdev_spa; spa_t *spa = svd[0]->vdev_spa;
zio_t *zio; zio_t *zio;
uint64_t good_writes = 0; uint64_t good_writes = 0;
int v;
zio = zio_root(spa, NULL, &good_writes, flags); zio = zio_root(spa, NULL, &good_writes, flags);
for (v = 0; v < svdcount; v++) for (int v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, ub, svd[v], flags); vdev_uberblock_sync(zio, ub, svd[v], flags);
(void) zio_wait(zio); (void) zio_wait(zio);
@ -1267,7 +1263,7 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
*/ */
zio = zio_root(spa, NULL, NULL, flags); zio = zio_root(spa, NULL, NULL, flags);
for (v = 0; v < svdcount; v++) for (int v = 0; v < svdcount; v++)
zio_flush(zio, svd[v]); zio_flush(zio, svd[v]);
(void) zio_wait(zio); (void) zio_wait(zio);
@ -1321,9 +1317,8 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
abd_t *vp_abd; abd_t *vp_abd;
char *buf; char *buf;
size_t buflen; size_t buflen;
int c;
for (c = 0; c < vd->vdev_children; c++) for (int c = 0; c < vd->vdev_children; c++)
vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags); vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags);
if (!vd->vdev_ops->vdev_op_leaf) if (!vd->vdev_ops->vdev_op_leaf)
@ -1373,12 +1368,11 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
uint64_t *good_writes; uint64_t *good_writes;
zio_t *vio;
ASSERT(!vd->vdev_ishole); ASSERT(!vd->vdev_ishole);
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
vio = zio_null(zio, spa, NULL, zio_t *vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ? (vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done, vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags); good_writes, flags);

View File

@ -282,7 +282,6 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
{ {
int numerrors = 0; int numerrors = 0;
int lasterror = 0; int lasterror = 0;
int c;
if (vd->vdev_children == 0) { if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
@ -291,7 +290,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
vdev_open_children(vd); vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error) { if (cvd->vdev_open_error) {
@ -316,9 +315,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
static void static void
vdev_mirror_close(vdev_t *vd) vdev_mirror_close(vdev_t *vd)
{ {
int c; for (int c = 0; c < vd->vdev_children; c++)
for (c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]); vdev_close(vd->vdev_child[c]);
} }
@ -538,9 +535,9 @@ vdev_mirror_io_start(zio_t *zio)
static int static int
vdev_mirror_worst_error(mirror_map_t *mm) vdev_mirror_worst_error(mirror_map_t *mm)
{ {
int c, error[2] = { 0, 0 }; int error[2] = { 0, 0 };
for (c = 0; c < mm->mm_children; c++) { for (int c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc = &mm->mm_child[c]; mirror_child_t *mc = &mm->mm_child[c];
int s = mc->mc_speculative; int s = mc->mc_speculative;
error[s] = zio_worst_error(error[s], mc->mc_error); error[s] = zio_worst_error(error[s], mc->mc_error);

View File

@ -400,9 +400,8 @@ void
vdev_queue_fini(vdev_t *vd) vdev_queue_fini(vdev_t *vd)
{ {
vdev_queue_t *vq = &vd->vdev_queue; vdev_queue_t *vq = &vd->vdev_queue;
zio_priority_t p;
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
avl_destroy(vdev_queue_class_tree(vq, p)); avl_destroy(vdev_queue_class_tree(vq, p));
avl_destroy(&vq->vq_active_tree); avl_destroy(&vq->vq_active_tree);
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));

View File

@ -695,9 +695,8 @@ vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
uint64_t *dst = dbuf; uint64_t *dst = dbuf;
uint64_t *src = sbuf; uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]); int cnt = size / sizeof (src[0]);
int i;
for (i = 0; i < cnt; i++) { for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i]; dst[i] ^= src[i];
} }
@ -713,9 +712,8 @@ vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
uint64_t *src = sbuf; uint64_t *src = sbuf;
uint64_t mask; uint64_t mask;
int cnt = size / sizeof (dst[0]); int cnt = size / sizeof (dst[0]);
int i;
for (i = 0; i < cnt; i++, dst++, src++) { for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask); VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src; *dst ^= *src;
} }
@ -730,9 +728,8 @@ vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
uint64_t *dst = buf; uint64_t *dst = buf;
uint64_t mask; uint64_t mask;
int cnt = size / sizeof (dst[0]); int cnt = size / sizeof (dst[0]);
int i;
for (i = 0; i < cnt; i++, dst++) { for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */ /* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask); VDEV_RAIDZ_64MUL_2(*dst, mask);
} }
@ -751,9 +748,8 @@ vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
struct reconst_q_struct *rq = private; struct reconst_q_struct *rq = private;
uint64_t *dst = buf; uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]); int cnt = size / sizeof (dst[0]);
int i;
for (i = 0; i < cnt; i++, dst++, rq->q++) { for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j; int j;
uint8_t *b; uint8_t *b;
@ -781,9 +777,8 @@ vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
struct reconst_pq_struct *rpq = private; struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf; uint8_t *xd = xbuf;
uint8_t *yd = ybuf; uint8_t *yd = ybuf;
int i;
for (i = 0; i < size; for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) { i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^ *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp); vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
@ -798,9 +793,8 @@ vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{ {
struct reconst_pq_struct *rpq = private; struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf; uint8_t *xd = xbuf;
int i;
for (i = 0; i < size; for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) { i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */ /* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^ *xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
@ -852,7 +846,6 @@ vdev_raidz_reconstruct_q(raidz_map_t *rm, int *tgts, int ntgts)
int x = tgts[0]; int x = tgts[0];
int c, exp; int c, exp;
abd_t *dst, *src; abd_t *dst, *src;
struct reconst_q_struct rq;
ASSERT(ntgts == 1); ASSERT(ntgts == 1);
@ -884,9 +877,8 @@ vdev_raidz_reconstruct_q(raidz_map_t *rm, int *tgts, int ntgts)
src = rm->rm_col[VDEV_RAIDZ_Q].rc_abd; src = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
dst = rm->rm_col[x].rc_abd; dst = rm->rm_col[x].rc_abd;
exp = 255 - (rm->rm_cols - 1 - x); exp = 255 - (rm->rm_cols - 1 - x);
rq.q = abd_to_buf(src);
rq.exp = exp;
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rm->rm_col[x].rc_size, (void) abd_iterate_func(dst, 0, rm->rm_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq); vdev_raidz_reconst_q_post_func, &rq);
@ -902,7 +894,6 @@ vdev_raidz_reconstruct_pq(raidz_map_t *rm, int *tgts, int ntgts)
int x = tgts[0]; int x = tgts[0];
int y = tgts[1]; int y = tgts[1];
abd_t *xd, *yd; abd_t *xd, *yd;
struct reconst_pq_struct rpq;
ASSERT(ntgts == 2); ASSERT(ntgts == 2);
ASSERT(x < y); ASSERT(x < y);
@ -965,12 +956,7 @@ vdev_raidz_reconstruct_pq(raidz_map_t *rm, int *tgts, int ntgts)
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)]; bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize); ASSERT3U(xsize, >=, ysize);
rpq.p = p; struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
rpq.q = q;
rpq.pxy = pxy;
rpq.qxy = qxy;
rpq.aexp = aexp;
rpq.bexp = bexp;
(void) abd_iterate_func2(xd, yd, 0, 0, ysize, (void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq); vdev_raidz_reconst_pq_func, &rpq);
@ -1781,11 +1767,10 @@ raidz_checksum_verify(zio_t *zio)
{ {
zio_bad_cksum_t zbc; zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd; raidz_map_t *rm = zio->io_vsd;
int ret;
bzero(&zbc, sizeof (zio_bad_cksum_t)); bzero(&zbc, sizeof (zio_bad_cksum_t));
ret = zio_checksum_error(zio, &zbc); int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0) if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1; rm->rm_ecksuminjected = 1;
@ -1841,9 +1826,9 @@ raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
static int static int
vdev_raidz_worst_error(raidz_map_t *rm) vdev_raidz_worst_error(raidz_map_t *rm)
{ {
int c, error = 0; int error = 0;
for (c = 0; c < rm->rm_cols; c++) for (int c = 0; c < rm->rm_cols; c++)
error = zio_worst_error(error, rm->rm_col[c].rc_error); error = zio_worst_error(error, rm->rm_col[c].rc_error);
return (error); return (error);

View File

@ -59,7 +59,6 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
{ {
int lasterror = 0; int lasterror = 0;
int numerrors = 0; int numerrors = 0;
int c;
if (vd->vdev_children == 0) { if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
@ -68,7 +67,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
vdev_open_children(vd); vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) { for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c]; vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error && !cvd->vdev_islog) { if (cvd->vdev_open_error && !cvd->vdev_islog) {
@ -92,9 +91,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
static void static void
vdev_root_close(vdev_t *vd) vdev_root_close(vdev_t *vd)
{ {
int c; for (int c = 0; c < vd->vdev_children; c++)
for (c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]); vdev_close(vd->vdev_child[c]);
} }

View File

@ -513,7 +513,6 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
zap_leaf_t *l; zap_leaf_t *l;
int bs = FZAP_BLOCK_SHIFT(zap); int bs = FZAP_BLOCK_SHIFT(zap);
int err; int err;
dnode_t *dn;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
@ -527,7 +526,7 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
if (blkid == 0) if (blkid == 0)
return (SET_ERROR(ENOENT)); return (SET_ERROR(ENOENT));
dn = dmu_buf_dnode_enter(zap->zap_dbuf); dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
err = dmu_buf_hold_by_dnode(dn, err = dmu_buf_hold_by_dnode(dn,
blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH); blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf); dmu_buf_dnode_exit(zap->zap_dbuf);

View File

@ -541,11 +541,10 @@ zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) { zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE; uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
if (newsz > MZAP_MAX_BLKSZ) { if (newsz > MZAP_MAX_BLKSZ) {
int err;
dprintf("upgrading obj %llu: num_entries=%u\n", dprintf("upgrading obj %llu: num_entries=%u\n",
obj, zap->zap_m.zap_num_entries); obj, zap->zap_m.zap_num_entries);
*zapp = zap; *zapp = zap;
err = mzap_upgrade(zapp, tag, tx, 0); int err = mzap_upgrade(zapp, tag, tx, 0);
if (err != 0) if (err != 0)
rw_exit(&zap->zap_rwlock); rw_exit(&zap->zap_rwlock);
return (err); return (err);

View File

@ -304,10 +304,8 @@ void
feature_sync(spa_t *spa, zfeature_info_t *feature, uint64_t refcount, feature_sync(spa_t *spa, zfeature_info_t *feature, uint64_t refcount,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
uint64_t zapobj;
ASSERT(VALID_FEATURE_OR_NONE(feature->fi_feature)); ASSERT(VALID_FEATURE_OR_NONE(feature->fi_feature));
zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? uint64_t zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj; spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj;
VERIFY0(zap_update(spa->spa_meta_objset, zapobj, feature->fi_guid, VERIFY0(zap_update(spa->spa_meta_objset, zapobj, feature->fi_guid,
sizeof (uint64_t), 1, &refcount, tx)); sizeof (uint64_t), 1, &refcount, tx));
@ -343,7 +341,6 @@ feature_enable_sync(spa_t *spa, zfeature_info_t *feature, dmu_tx_t *tx)
(feature->fi_flags & ZFEATURE_FLAG_ACTIVATE_ON_ENABLE) ? 1 : 0; (feature->fi_flags & ZFEATURE_FLAG_ACTIVATE_ON_ENABLE) ? 1 : 0;
uint64_t zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? uint64_t zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj; spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj;
int i;
ASSERT(0 != zapobj); ASSERT(0 != zapobj);
ASSERT(zfeature_is_valid_guid(feature->fi_guid)); ASSERT(zfeature_is_valid_guid(feature->fi_guid));
@ -355,7 +352,7 @@ feature_enable_sync(spa_t *spa, zfeature_info_t *feature, dmu_tx_t *tx)
if (zap_contains(spa->spa_meta_objset, zapobj, feature->fi_guid) == 0) if (zap_contains(spa->spa_meta_objset, zapobj, feature->fi_guid) == 0)
return; return;
for (i = 0; feature->fi_depends[i] != SPA_FEATURE_NONE; i++) for (int i = 0; feature->fi_depends[i] != SPA_FEATURE_NONE; i++)
spa_feature_enable(spa, feature->fi_depends[i], tx); spa_feature_enable(spa, feature->fi_depends[i], tx);
VERIFY0(zap_update(spa->spa_meta_objset, spa->spa_feat_desc_obj, VERIFY0(zap_update(spa->spa_meta_objset, spa->spa_feat_desc_obj,

View File

@ -525,13 +525,12 @@ zei_shrink_ranges(zfs_ecksum_info_t *eip)
uint32_t end = r[idx].zr_end; uint32_t end = r[idx].zr_end;
while (idx < max - 1) { while (idx < max - 1) {
uint32_t nstart, nend, gap;
idx++; idx++;
nstart = r[idx].zr_start;
nend = r[idx].zr_end;
gap = nstart - end; uint32_t nstart = r[idx].zr_start;
uint32_t nend = r[idx].zr_end;
uint32_t gap = nstart - end;
if (gap < new_allowed_gap) { if (gap < new_allowed_gap) {
end = nend; end = nend;
continue; continue;

View File

@ -1020,9 +1020,8 @@ static int
zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr) zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{ {
int error = 0; int error = 0;
nvpair_t *pair;
for (pair = nvlist_next_nvpair(innvl, NULL); for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *name = nvpair_name(pair); char *name = nvpair_name(pair);
char *hashp = strchr(name, '#'); char *hashp = strchr(name, '#');
@ -3390,7 +3389,7 @@ zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
nvlist_t *snaps; nvlist_t *snaps;
nvlist_t *props = NULL; nvlist_t *props = NULL;
int error, poollen; int error, poollen;
nvpair_t *pair, *pair2; nvpair_t *pair;
(void) nvlist_lookup_nvlist(innvl, "props", &props); (void) nvlist_lookup_nvlist(innvl, "props", &props);
if ((error = zfs_check_userprops(poolname, props)) != 0) if ((error = zfs_check_userprops(poolname, props)) != 0)
@ -3424,7 +3423,7 @@ zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
return (SET_ERROR(EXDEV)); return (SET_ERROR(EXDEV));
/* This must be the only snap of this fs. */ /* This must be the only snap of this fs. */
for (pair2 = nvlist_next_nvpair(snaps, pair); for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) { pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) {
if (strncmp(name, nvpair_name(pair2), cp - name + 1) if (strncmp(name, nvpair_name(pair2), cp - name + 1)
== 0) { == 0) {
@ -3582,9 +3581,7 @@ zfs_ioc_destroy_snaps(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
static int static int
zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl) zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{ {
nvpair_t *pair, *pair2; for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
for (pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *snap_name; char *snap_name;
@ -3596,7 +3593,7 @@ zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
/* Verify that the keys (bookmarks) are unique */ /* Verify that the keys (bookmarks) are unique */
for (pair2 = nvlist_next_nvpair(innvl, pair); for (nvpair_t *pair2 = nvlist_next_nvpair(innvl, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(innvl, pair2)) { pair2 != NULL; pair2 = nvlist_next_nvpair(innvl, pair2)) {
if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0) if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
@ -3636,10 +3633,9 @@ zfs_ioc_destroy_bookmarks(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl) nvlist_t *outnvl)
{ {
int error, poollen; int error, poollen;
nvpair_t *pair;
poollen = strlen(poolname); poollen = strlen(poolname);
for (pair = nvlist_next_nvpair(innvl, NULL); for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) { pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
const char *name = nvpair_name(pair); const char *name = nvpair_name(pair);
const char *cp = strchr(name, '#'); const char *cp = strchr(name, '#');
@ -5191,17 +5187,15 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
{ {
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size; int bufsize = zc->zc_nvlist_dst_size;
int error;
void *buf;
if (bufsize <= 0) if (bufsize <= 0)
return (SET_ERROR(ENOMEM)); return (SET_ERROR(ENOMEM));
error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0) if (error != 0)
return (error); return (error);
buf = vmem_alloc(bufsize, KM_SLEEP); void *buf = vmem_alloc(bufsize, KM_SLEEP);
error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie, error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
buf, &zc->zc_nvlist_dst_size); buf, &zc->zc_nvlist_dst_size);

View File

@ -550,9 +550,8 @@ boolean_t
zilog_is_dirty(zilog_t *zilog) zilog_is_dirty(zilog_t *zilog)
{ {
dsl_pool_t *dp = zilog->zl_dmu_pool; dsl_pool_t *dp = zilog->zl_dmu_pool;
int t;
for (t = 0; t < TXG_SIZE; t++) { for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE); return (B_TRUE);
} }
@ -1872,7 +1871,6 @@ zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys) zil_alloc(objset_t *os, zil_header_t *zh_phys)
{ {
zilog_t *zilog; zilog_t *zilog;
int i;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
@ -1887,7 +1885,7 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
for (i = 0; i < TXG_SIZE; i++) { for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL); MUTEX_DEFAULT, NULL);
} }

View File

@ -560,7 +560,6 @@ void
zio_add_child(zio_t *pio, zio_t *cio) zio_add_child(zio_t *pio, zio_t *cio)
{ {
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
int w;
/* /*
* Logical I/Os can have logical, gang, or vdev children. * Logical I/Os can have logical, gang, or vdev children.
@ -578,7 +577,7 @@ zio_add_child(zio_t *pio, zio_t *cio)
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
for (w = 0; w < ZIO_WAIT_TYPES; w++) for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl); list_insert_head(&pio->io_child_list, zl);
@ -831,8 +830,6 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
void void
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
{ {
int i;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
zfs_panic_recover("blkptr at %p has invalid TYPE %llu", zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp)); bp, (longlong_t)BP_GET_TYPE(bp));
@ -871,17 +868,16 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
* allows the birth time of log blocks (and dmu_sync()-ed blocks * allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large. * that are in the log) to be arbitrarily large.
*/ */
for (i = 0; i < BP_GET_NDVAS(bp); i++) { for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd;
uint64_t offset, asize;
if (vdevid >= spa->spa_root_vdev->vdev_children) { if (vdevid >= spa->spa_root_vdev->vdev_children) {
zfs_panic_recover("blkptr at %p DVA %u has invalid " zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu", "VDEV %llu",
bp, i, (longlong_t)vdevid); bp, i, (longlong_t)vdevid);
continue; continue;
} }
vd = spa->spa_root_vdev->vdev_child[vdevid]; vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) { if (vd == NULL) {
zfs_panic_recover("blkptr at %p DVA %u has invalid " zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu", "VDEV %llu",
@ -902,8 +898,8 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
*/ */
continue; continue;
} }
offset = DVA_GET_OFFSET(&bp->blk_dva[i]); uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
asize = DVA_GET_ASIZE(&bp->blk_dva[i]); uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (BP_IS_GANG(bp)) if (BP_IS_GANG(bp))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
if (offset + asize > vd->vdev_asize) { if (offset + asize > vd->vdev_asize) {
@ -1501,11 +1497,8 @@ zio_write_compress(zio_t *zio)
* in that we charge for the padding used to fill out * in that we charge for the padding used to fill out
* the last sector. * the last sector.
*/ */
size_t rounded;
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
size_t rounded = (size_t)P2ROUNDUP(psize,
rounded = (size_t)P2ROUNDUP(psize,
1ULL << spa->spa_min_ashift); 1ULL << spa->spa_min_ashift);
if (rounded >= lsize) { if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF; compress = ZIO_COMPRESS_OFF;
@ -1545,8 +1538,8 @@ zio_write_compress(zio_t *zio)
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize && BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) { pass >= zfs_sync_pass_rewrite) {
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
ASSERT(psize != 0); ASSERT(psize != 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE; zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else { } else {
@ -1654,9 +1647,8 @@ zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{ {
kthread_t *executor = zio->io_executor; kthread_t *executor = zio->io_executor;
spa_t *spa = zio->io_spa; spa_t *spa = zio->io_spa;
zio_type_t t;
for (t = 0; t < ZIO_TYPES; t++) { for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i; uint_t i;
for (i = 0; i < tqs->stqs_count; i++) { for (i = 0; i < tqs->stqs_count; i++) {
@ -1940,8 +1932,6 @@ static void
zio_reexecute(zio_t *pio) zio_reexecute(zio_t *pio)
{ {
zio_t *cio, *cio_next; zio_t *cio, *cio_next;
int c, w;
zio_link_t *zl = NULL;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
@ -1955,9 +1945,9 @@ zio_reexecute(zio_t *pio)
pio->io_flags |= ZIO_FLAG_REEXECUTED; pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0; pio->io_pipeline_trace = 0;
pio->io_error = 0; pio->io_error = 0;
for (w = 0; w < ZIO_WAIT_TYPES; w++) for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0; pio->io_state[w] = 0;
for (c = 0; c < ZIO_CHILD_TYPES; c++) for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0; pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio)) if (IO_IS_ALLOCATING(pio))
@ -1970,10 +1960,11 @@ zio_reexecute(zio_t *pio)
* the remainder of pio's io_child_list, from 'cio_next' onward, * the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'. * cannot be affected by any side effects of reexecuting 'cio'.
*/ */
zio_link_t *zl = NULL;
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl); cio_next = zio_walk_children(pio, &zl);
mutex_enter(&pio->io_lock); mutex_enter(&pio->io_lock);
for (w = 0; w < ZIO_WAIT_TYPES; w++) for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++; pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock); mutex_exit(&pio->io_lock);
zio_reexecute(cio); zio_reexecute(cio);
@ -2234,9 +2225,8 @@ static void
zio_gang_node_free(zio_gang_node_t **gnpp) zio_gang_node_free(zio_gang_node_t **gnpp)
{ {
zio_gang_node_t *gn = *gnpp; zio_gang_node_t *gn = *gnpp;
int g;
for (g = 0; g < SPA_GBH_NBLKPTRS; g++) for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL); ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
@ -2248,12 +2238,11 @@ static void
zio_gang_tree_free(zio_gang_node_t **gnpp) zio_gang_tree_free(zio_gang_node_t **gnpp)
{ {
zio_gang_node_t *gn = *gnpp; zio_gang_node_t *gn = *gnpp;
int g;
if (gn == NULL) if (gn == NULL)
return; return;
for (g = 0; g < SPA_GBH_NBLKPTRS; g++) for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]); zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp); zio_gang_node_free(gnpp);
@ -2279,7 +2268,6 @@ zio_gang_tree_assemble_done(zio_t *zio)
zio_t *gio = zio->io_gang_leader; zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private; zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp; blkptr_t *bp = zio->io_bp;
int g;
ASSERT(gio == zio_unique_parent(zio)); ASSERT(gio == zio_unique_parent(zio));
ASSERT(zio->io_child_count == 0); ASSERT(zio->io_child_count == 0);
@ -2297,7 +2285,7 @@ zio_gang_tree_assemble_done(zio_t *zio)
abd_put(zio->io_abd); abd_put(zio->io_abd);
for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp)) if (!BP_IS_GANG(gbp))
continue; continue;
@ -2311,7 +2299,6 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
{ {
zio_t *gio = pio->io_gang_leader; zio_t *gio = pio->io_gang_leader;
zio_t *zio; zio_t *zio;
int g;
ASSERT(BP_IS_GANG(bp) == !!gn); ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
@ -2326,7 +2313,7 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
if (gn != NULL) { if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp)) if (BP_IS_HOLE(gbp))
continue; continue;
@ -2387,7 +2374,6 @@ zio_write_gang_member_ready(zio_t *zio)
dva_t *cdva = zio->io_bp->blk_dva; dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva; dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize; uint64_t asize;
int d;
ASSERTV(zio_t *gio = zio->io_gang_leader); ASSERTV(zio_t *gio = zio->io_gang_leader);
if (BP_IS_HOLE(zio->io_bp)) if (BP_IS_HOLE(zio->io_bp))
@ -2402,7 +2388,7 @@ zio_write_gang_member_ready(zio_t *zio)
ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock); mutex_enter(&pio->io_lock);
for (d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d])); ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]); asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]); asize += DVA_GET_ASIZE(&cdva[d]);
@ -2434,8 +2420,7 @@ zio_write_gang_block(zio_t *pio)
int copies = gio->io_prop.zp_copies; int copies = gio->io_prop.zp_copies;
int gbh_copies; int gbh_copies;
zio_prop_t zp; zio_prop_t zp;
int g, error; int error;
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
/* /*
* encrypted blocks need DVA[2] free so encrypted gang headers can't * encrypted blocks need DVA[2] free so encrypted gang headers can't
@ -2445,6 +2430,7 @@ zio_write_gang_block(zio_t *pio)
if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP) if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
gbh_copies = SPA_DVAS_PER_BP - 1; gbh_copies = SPA_DVAS_PER_BP - 1;
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA)); ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
@ -2509,9 +2495,7 @@ zio_write_gang_block(zio_t *pio)
/* /*
* Create and nowait the gang children. * Create and nowait the gang children.
*/ */
for (g = 0; resid != 0; resid -= lsize, g++) { for (int g = 0; resid != 0; resid -= lsize, g++) {
zio_t *cio;
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE); SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
@ -2530,7 +2514,7 @@ zio_write_gang_block(zio_t *pio)
bzero(zp.zp_iv, ZIO_DATA_IV_LEN); bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
abd_get_offset(pio->io_abd, pio->io_size - resid), lsize, abd_get_offset(pio->io_abd, pio->io_size - resid), lsize,
lsize, &zp, zio_write_gang_member_ready, NULL, NULL, lsize, &zp, zio_write_gang_member_ready, NULL, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority, zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
@ -2664,7 +2648,6 @@ static int
zio_ddt_read_start(zio_t *zio) zio_ddt_read_start(zio_t *zio)
{ {
blkptr_t *bp = zio->io_bp; blkptr_t *bp = zio->io_bp;
int p;
ASSERT(BP_GET_DEDUP(bp)); ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size); ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
@ -2683,7 +2666,7 @@ zio_ddt_read_start(zio_t *zio)
if (ddp_self == NULL) if (ddp_self == NULL)
return (ZIO_PIPELINE_CONTINUE); return (ZIO_PIPELINE_CONTINUE);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue; continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
@ -2746,7 +2729,6 @@ static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{ {
spa_t *spa = zio->io_spa; spa_t *spa = zio->io_spa;
int p;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw)); ASSERT(!(zio->io_bp_override && do_raw));
@ -2763,7 +2745,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
* loaded). * loaded).
*/ */
for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p]; zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) { if (lio != NULL && do_raw) {
@ -2775,7 +2757,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
} }
} }
for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p]; ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) { if (ddp->ddp_phys_birth != 0 && do_raw) {
@ -2848,7 +2830,6 @@ zio_ddt_child_write_ready(zio_t *zio)
ddt_entry_t *dde = zio->io_private; ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p]; ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio; zio_t *pio;
zio_link_t *zl;
if (zio->io_error) if (zio->io_error)
return; return;
@ -2859,7 +2840,7 @@ zio_ddt_child_write_ready(zio_t *zio)
ddt_phys_fill(ddp, zio->io_bp); ddt_phys_fill(ddp, zio->io_bp);
zl = NULL; zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL) while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
@ -2895,12 +2876,12 @@ static void
zio_ddt_ditto_write_done(zio_t *zio) zio_ddt_ditto_write_done(zio_t *zio)
{ {
int p = DDT_PHYS_DITTO; int p = DDT_PHYS_DITTO;
ASSERTV(zio_prop_t *zp = &zio->io_prop);
blkptr_t *bp = zio->io_bp; blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(zio->io_spa, bp); ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_private; ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p]; ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_key_t *ddk = &dde->dde_key; ddt_key_t *ddk = &dde->dde_key;
ASSERTV(zio_prop_t *zp = &zio->io_prop);
ddt_enter(ddt); ddt_enter(ddt);
@ -3225,8 +3206,6 @@ zio_dva_claim(zio_t *zio)
static void static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{ {
int g;
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL); ASSERT(zio->io_bp_override == NULL);
@ -3234,7 +3213,7 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) { if (gn != NULL) {
for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g], zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]); &gn->gn_gbh->zg_blkptr[g]);
} }
@ -3955,10 +3934,10 @@ zio_ready(zio_t *zio)
static void static void
zio_dva_throttle_done(zio_t *zio) zio_dva_throttle_done(zio_t *zio)
{ {
ASSERTV(zio_t *lio = zio->io_logical);
zio_t *pio = zio_unique_parent(zio); zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd; vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC; int flags = METASLAB_ASYNC_ALLOC;
ASSERTV(zio_t *lio = zio->io_logical);
ASSERT3P(zio->io_bp, !=, NULL); ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
@ -4019,7 +3998,6 @@ zio_done(zio_t *zio)
*/ */
const uint64_t psize = zio->io_size; const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next; zio_t *pio, *pio_next;
int c, w;
zio_link_t *zl = NULL; zio_link_t *zl = NULL;
/* /*
@ -4059,8 +4037,8 @@ zio_done(zio_t *zio)
} }
for (c = 0; c < ZIO_CHILD_TYPES; c++) for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (w = 0; w < ZIO_WAIT_TYPES; w++) for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0); ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {

View File

@ -553,9 +553,8 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
void void
zio_checksum_templates_free(spa_t *spa) zio_checksum_templates_free(spa_t *spa)
{ {
enum zio_checksum checksum; for (enum zio_checksum checksum = 0;
for (checksum = 0; checksum < ZIO_CHECKSUM_FUNCTIONS; checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
checksum++) {
if (spa->spa_cksum_tmpls[checksum] != NULL) { if (spa->spa_cksum_tmpls[checksum] != NULL) {
zio_checksum_info_t *ci = &zio_checksum_table[checksum]; zio_checksum_info_t *ci = &zio_checksum_table[checksum];

View File

@ -89,9 +89,7 @@ static int
zio_compress_zeroed_cb(void *data, size_t len, void *private) zio_compress_zeroed_cb(void *data, size_t len, void *private)
{ {
uint64_t *end = (uint64_t *)((char *)data + len); uint64_t *end = (uint64_t *)((char *)data + len);
uint64_t *word; for (uint64_t *word = (uint64_t *)data; word < end; word++)
for (word = data; word < end; word++)
if (*word != 0) if (*word != 0)
return (1); return (1);
@ -103,7 +101,6 @@ zio_compress_data(enum zio_compress c, abd_t *src, void *dst, size_t s_len)
{ {
size_t c_len, d_len; size_t c_len, d_len;
zio_compress_info_t *ci = &zio_compress_table[c]; zio_compress_info_t *ci = &zio_compress_table[c];
void *tmp;
ASSERT((uint_t)c < ZIO_COMPRESS_FUNCTIONS); ASSERT((uint_t)c < ZIO_COMPRESS_FUNCTIONS);
ASSERT((uint_t)c == ZIO_COMPRESS_EMPTY || ci->ci_compress != NULL); ASSERT((uint_t)c == ZIO_COMPRESS_EMPTY || ci->ci_compress != NULL);
@ -122,7 +119,7 @@ zio_compress_data(enum zio_compress c, abd_t *src, void *dst, size_t s_len)
d_len = s_len - (s_len >> 3); d_len = s_len - (s_len >> 3);
/* No compression algorithms can read from ABDs directly */ /* No compression algorithms can read from ABDs directly */
tmp = abd_borrow_buf_copy(src, s_len); void *tmp = abd_borrow_buf_copy(src, s_len);
c_len = ci->ci_compress(tmp, dst, s_len, d_len, ci->ci_level); c_len = ci->ci_compress(tmp, dst, s_len, d_len, ci->ci_level);
abd_return_buf(src, tmp, s_len); abd_return_buf(src, tmp, s_len);

View File

@ -472,10 +472,6 @@ zio_handle_io_delay(zio_t *zio)
vdev_t *vd = zio->io_vd; vdev_t *vd = zio->io_vd;
inject_handler_t *min_handler = NULL; inject_handler_t *min_handler = NULL;
hrtime_t min_target = 0; hrtime_t min_target = 0;
inject_handler_t *handler;
hrtime_t idle;
hrtime_t busy;
hrtime_t target;
rw_enter(&inject_lock, RW_READER); rw_enter(&inject_lock, RW_READER);
@ -528,7 +524,7 @@ zio_handle_io_delay(zio_t *zio)
*/ */
mutex_enter(&inject_delay_mtx); mutex_enter(&inject_delay_mtx);
for (handler = list_head(&inject_handlers); for (inject_handler_t *handler = list_head(&inject_handlers);
handler != NULL; handler = list_next(&inject_handlers, handler)) { handler != NULL; handler = list_next(&inject_handlers, handler)) {
if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO) if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
continue; continue;
@ -580,10 +576,10 @@ zio_handle_io_delay(zio_t *zio)
* each lane will become idle, we use that value to * each lane will become idle, we use that value to
* determine when this request should complete. * determine when this request should complete.
*/ */
idle = handler->zi_record.zi_timer + gethrtime(); hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
busy = handler->zi_record.zi_timer + hrtime_t busy = handler->zi_record.zi_timer +
handler->zi_lanes[handler->zi_next_lane]; handler->zi_lanes[handler->zi_next_lane];
target = MAX(idle, busy); hrtime_t target = MAX(idle, busy);
if (min_handler == NULL) { if (min_handler == NULL) {
min_handler = handler; min_handler = handler;

View File

@ -157,11 +157,9 @@ zrl_exit(zrlock_t *zrl)
int int
zrl_refcount(zrlock_t *zrl) zrl_refcount(zrlock_t *zrl)
{ {
int n;
ASSERT3S(zrl->zr_refcount, >, ZRL_DESTROYED); ASSERT3S(zrl->zr_refcount, >, ZRL_DESTROYED);
n = (int)zrl->zr_refcount; int n = (int)zrl->zr_refcount;
return (n <= 0 ? 0 : n); return (n <= 0 ? 0 : n);
} }