mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Refactor dnode dirty context from dbuf_dirty
* Add dedicated donde_set_dirtyctx routine. * Add empty dirty record on destroy assertion. * Make much more extensive use of the SET_ERROR macro. Reviewed-by: Will Andrews <wca@FreeBSD.org> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Matthew Ahrens <mahrens@delphix.com> Signed-off-by: Matt Macy <mmacy@FreeBSD.org> Closes #9924
This commit is contained in:
parent
647ff8e975
commit
28caa74b19
@ -333,7 +333,7 @@ struct dnode {
|
||||
uint64_t dn_dirty_txg; /* txg dnode was last dirtied */
|
||||
kcondvar_t dn_notxholds;
|
||||
enum dnode_dirtycontext dn_dirtyctx;
|
||||
uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */
|
||||
void *dn_dirtyctx_firstset; /* dbg: contents meaningless */
|
||||
|
||||
/* protected by own devices */
|
||||
zfs_refcount_t dn_tx_holds;
|
||||
@ -425,6 +425,7 @@ void dnode_rele(dnode_t *dn, void *ref);
|
||||
void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting);
|
||||
int dnode_try_claim(objset_t *os, uint64_t object, int slots);
|
||||
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
|
||||
void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag);
|
||||
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
|
||||
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
|
||||
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
|
||||
|
@ -990,7 +990,7 @@ zfs_check_global_label(const char *dsname, const char *hexsl)
|
||||
if (dsl_prop_get_integer(dsname,
|
||||
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
|
||||
return (SET_ERROR(EACCES));
|
||||
return (rdonly ? 0 : EACCES);
|
||||
return (rdonly ? 0 : SET_ERROR(EACCES));
|
||||
}
|
||||
return (SET_ERROR(EACCES));
|
||||
}
|
||||
|
@ -1681,8 +1681,8 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
DBUF_STAT_BUMP(hash_misses);
|
||||
|
||||
/* Skip the wait per the caller's request. */
|
||||
mutex_enter(&db->db_mtx);
|
||||
if ((flags & DB_RF_NEVERWAIT) == 0) {
|
||||
mutex_enter(&db->db_mtx);
|
||||
while (db->db_state == DB_READ ||
|
||||
db->db_state == DB_FILL) {
|
||||
ASSERT(db->db_state == DB_READ ||
|
||||
@ -1693,9 +1693,9 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
}
|
||||
if (db->db_state == DB_UNCACHED)
|
||||
err = SET_ERROR(EIO);
|
||||
}
|
||||
mutex_exit(&db->db_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
return (err);
|
||||
}
|
||||
@ -1872,7 +1872,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
|
||||
void
|
||||
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
|
||||
{
|
||||
arc_buf_t *buf, *obuf;
|
||||
arc_buf_t *buf, *old_buf;
|
||||
dbuf_dirty_record_t *dr;
|
||||
int osize = db->db.db_size;
|
||||
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
|
||||
@ -1893,15 +1893,15 @@ dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
|
||||
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
|
||||
|
||||
/* copy old block data to the new block */
|
||||
obuf = db->db_buf;
|
||||
bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
|
||||
old_buf = db->db_buf;
|
||||
bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
|
||||
/* zero the remainder */
|
||||
if (size > osize)
|
||||
bzero((uint8_t *)buf->b_data + osize, size - osize);
|
||||
|
||||
mutex_enter(&db->db_mtx);
|
||||
dbuf_set_data(db, buf);
|
||||
arc_buf_destroy(obuf, db);
|
||||
arc_buf_destroy(old_buf, db);
|
||||
db->db.db_size = size;
|
||||
|
||||
dr = list_head(&db->db_dirty_records);
|
||||
@ -2009,27 +2009,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||
db->db_state == DB_NOFILL);
|
||||
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
/*
|
||||
* Don't set dirtyctx to SYNC if we're just modifying this as we
|
||||
* initialize the objset.
|
||||
*/
|
||||
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
|
||||
if (dn->dn_objset->os_dsl_dataset != NULL) {
|
||||
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
|
||||
RW_READER, FTAG);
|
||||
}
|
||||
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
|
||||
dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
|
||||
DN_DIRTY_SYNC : DN_DIRTY_OPEN);
|
||||
ASSERT(dn->dn_dirtyctx_firstset == NULL);
|
||||
dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
|
||||
}
|
||||
if (dn->dn_objset->os_dsl_dataset != NULL) {
|
||||
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
|
||||
FTAG);
|
||||
}
|
||||
}
|
||||
|
||||
dnode_set_dirtyctx(dn, tx, db);
|
||||
if (tx->tx_txg > dn->dn_dirty_txg)
|
||||
dn->dn_dirty_txg = tx->tx_txg;
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
@ -2715,6 +2695,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
|
||||
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
|
||||
ASSERT(db->db_data_pending == NULL);
|
||||
ASSERT(list_is_empty(&db->db_dirty_records));
|
||||
|
||||
db->db_state = DB_EVICTING;
|
||||
DTRACE_SET_STATE(db, "buffer eviction started");
|
||||
@ -4603,7 +4584,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
uint64_t txg = tx->tx_txg;
|
||||
zbookmark_phys_t zb;
|
||||
zio_prop_t zp;
|
||||
zio_t *zio;
|
||||
zio_t *pio; /* parent I/O */
|
||||
int wp_flag = 0;
|
||||
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
@ -4640,7 +4621,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
* our block pointer, so the parent must be released.
|
||||
*/
|
||||
ASSERT(arc_released(parent->db_buf));
|
||||
zio = parent->db_data_pending->dr_zio;
|
||||
pio = parent->db_data_pending->dr_zio;
|
||||
} else {
|
||||
/* Our parent is the dnode itself. */
|
||||
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
|
||||
@ -4649,12 +4630,12 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
if (db->db_blkid != DMU_SPILL_BLKID)
|
||||
ASSERT3P(db->db_blkptr, ==,
|
||||
&dn->dn_phys->dn_blkptr[db->db_blkid]);
|
||||
zio = dn->dn_zio;
|
||||
pio = dn->dn_zio;
|
||||
}
|
||||
|
||||
ASSERT(db->db_level == 0 || data == db->db_buf);
|
||||
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
|
||||
ASSERT(zio);
|
||||
ASSERT(pio);
|
||||
|
||||
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
|
||||
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
|
||||
@ -4684,9 +4665,9 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
abd_t *contents = (data != NULL) ?
|
||||
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
|
||||
|
||||
dr->dr_zio = zio_write(zio, os->os_spa, txg,
|
||||
&dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size,
|
||||
&zp, dbuf_write_override_ready, NULL, NULL,
|
||||
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
|
||||
contents, db->db.db_size, db->db.db_size, &zp,
|
||||
dbuf_write_override_ready, NULL, NULL,
|
||||
dbuf_write_override_done,
|
||||
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
|
||||
mutex_enter(&db->db_mtx);
|
||||
@ -4697,7 +4678,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
} else if (db->db_state == DB_NOFILL) {
|
||||
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
|
||||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
|
||||
dr->dr_zio = zio_write(zio, os->os_spa, txg,
|
||||
dr->dr_zio = zio_write(pio, os->os_spa, txg,
|
||||
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
|
||||
dbuf_write_nofill_ready, NULL, NULL,
|
||||
dbuf_write_nofill_done, db,
|
||||
@ -4715,7 +4696,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
|
||||
if (db->db_level != 0)
|
||||
children_ready_cb = dbuf_write_children_ready;
|
||||
|
||||
dr->dr_zio = arc_write(zio, os->os_spa, txg,
|
||||
dr->dr_zio = arc_write(pio, os->os_spa, txg,
|
||||
&dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
|
||||
&zp, dbuf_write_ready,
|
||||
children_ready_cb, dbuf_write_physdone,
|
||||
|
@ -46,7 +46,7 @@ ddt_zap_create(objset_t *os, uint64_t *objectp, dmu_tx_t *tx, boolean_t prehash)
|
||||
ddt_zap_leaf_blockshift, ddt_zap_indirect_blockshift,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
|
||||
return (*objectp == 0 ? ENOTSUP : 0);
|
||||
return (*objectp == 0 ? SET_ERROR(ENOTSUP) : 0);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2464,7 +2464,7 @@ dmu_send_impl(struct dmu_send_params *dspp)
|
||||
err = do_dump(&dsc, range);
|
||||
range = get_next_range(&spt_arg->q, range);
|
||||
if (issig(JUSTLOOKING) && issig(FORREAL))
|
||||
err = EINTR;
|
||||
err = SET_ERROR(EINTR);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1183,7 +1183,7 @@ dmu_tx_abort(dmu_tx_t *tx)
|
||||
* Call any registered callbacks with an error code.
|
||||
*/
|
||||
if (!list_is_empty(&tx->tx_callbacks))
|
||||
dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
|
||||
dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
|
||||
|
||||
dmu_tx_destroy(tx);
|
||||
}
|
||||
|
@ -541,10 +541,7 @@ dnode_destroy(dnode_t *dn)
|
||||
dn->dn_dirty_txg = 0;
|
||||
|
||||
dn->dn_dirtyctx = 0;
|
||||
if (dn->dn_dirtyctx_firstset != NULL) {
|
||||
kmem_free(dn->dn_dirtyctx_firstset, 1);
|
||||
dn->dn_dirtyctx_firstset = NULL;
|
||||
}
|
||||
if (dn->dn_bonus != NULL) {
|
||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||
dbuf_destroy(dn->dn_bonus);
|
||||
@ -649,10 +646,7 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
|
||||
dn->dn_dirtyctx = 0;
|
||||
|
||||
dn->dn_free_txg = 0;
|
||||
if (dn->dn_dirtyctx_firstset) {
|
||||
kmem_free(dn->dn_dirtyctx_firstset, 1);
|
||||
dn->dn_dirtyctx_firstset = NULL;
|
||||
}
|
||||
|
||||
dn->dn_allocated_txg = tx->tx_txg;
|
||||
dn->dn_id_flags = 0;
|
||||
@ -2007,6 +2001,32 @@ dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
|
||||
mutex_exit(&dn->dn_dbufs_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag)
|
||||
{
|
||||
/*
|
||||
* Don't set dirtyctx to SYNC if we're just modifying this as we
|
||||
* initialize the objset.
|
||||
*/
|
||||
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
|
||||
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
|
||||
|
||||
if (ds != NULL) {
|
||||
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
|
||||
}
|
||||
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
|
||||
if (dmu_tx_is_syncing(tx))
|
||||
dn->dn_dirtyctx = DN_DIRTY_SYNC;
|
||||
else
|
||||
dn->dn_dirtyctx = DN_DIRTY_OPEN;
|
||||
dn->dn_dirtyctx_firstset = tag;
|
||||
}
|
||||
if (ds != NULL) {
|
||||
rrw_exit(&ds->ds_bp_rwlock, tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
|
||||
{
|
||||
|
@ -88,7 +88,7 @@ dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
|
||||
sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
|
||||
NULL);
|
||||
|
||||
return (err == ENOENT ? ESRCH : err);
|
||||
return (err == ENOENT ? SET_ERROR(ESRCH) : err);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2286,7 +2286,7 @@ get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
|
||||
&count));
|
||||
}
|
||||
if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
|
||||
return (ENOENT);
|
||||
return (SET_ERROR(ENOENT));
|
||||
}
|
||||
for (zap_cursor_init(&zc, mos,
|
||||
dsl_dataset_phys(ds)->ds_next_clones_obj);
|
||||
@ -2660,7 +2660,7 @@ dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
|
||||
dsl_dataset_name(ds->ds_prev, snap);
|
||||
return (0);
|
||||
} else {
|
||||
return (ENOENT);
|
||||
return (SET_ERROR(ENOENT));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1058,7 +1058,7 @@ dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
|
||||
return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
|
||||
sizeof (*count), 1, count));
|
||||
} else {
|
||||
return (ENOENT);
|
||||
return (SET_ERROR(ENOENT));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1070,7 +1070,7 @@ dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
|
||||
return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
|
||||
sizeof (*count), 1, count));
|
||||
} else {
|
||||
return (ENOENT);
|
||||
return (SET_ERROR(ENOENT));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1315,7 +1315,7 @@ top_of_function:
|
||||
|
||||
if (avail < quota) {
|
||||
quota = avail;
|
||||
retval = ENOSPC;
|
||||
retval = SET_ERROR(ENOSPC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ int
|
||||
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
|
||||
int intsz, int numints, void *buf, char *setpoint, boolean_t snapshot)
|
||||
{
|
||||
int err = ENOENT;
|
||||
int err;
|
||||
dsl_dir_t *target = dd;
|
||||
objset_t *mos = dd->dd_pool->dp_meta_objset;
|
||||
zfs_prop_t prop;
|
||||
@ -98,8 +98,10 @@ dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
|
||||
*/
|
||||
for (; dd != NULL; dd = dd->dd_parent) {
|
||||
if (dd != target || snapshot) {
|
||||
if (!inheritable)
|
||||
if (!inheritable) {
|
||||
err = SET_ERROR(ENOENT);
|
||||
break;
|
||||
}
|
||||
inheriting = B_TRUE;
|
||||
}
|
||||
|
||||
|
@ -807,7 +807,7 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
|
||||
POOL_SCRUB_NORMAL);
|
||||
if (err == 0) {
|
||||
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
|
||||
return (ECANCELED);
|
||||
return (SET_ERROR(ECANCELED));
|
||||
}
|
||||
|
||||
return (SET_ERROR(err));
|
||||
|
@ -212,7 +212,7 @@ spa_checkpoint_discard_sync_callback(space_map_entry_t *sme, void *arg)
|
||||
uint64_t end = sme->sme_offset + sme->sme_run;
|
||||
|
||||
if (sdc->sdc_entry_limit == 0)
|
||||
return (EINTR);
|
||||
return (SET_ERROR(EINTR));
|
||||
|
||||
/*
|
||||
* Since the space map is not condensed, we know that
|
||||
|
@ -1702,7 +1702,7 @@ vdev_open(vdev_t *vd)
|
||||
*/
|
||||
vd->vdev_reopening = B_FALSE;
|
||||
if (zio_injection_enabled && error == 0)
|
||||
error = zio_handle_device_injection(vd, NULL, ENXIO);
|
||||
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
|
||||
|
||||
if (error) {
|
||||
if (vd->vdev_removed &&
|
||||
@ -2306,7 +2306,7 @@ vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
|
||||
|
||||
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
|
||||
vdev_close(vd);
|
||||
return (error ? error : ENXIO);
|
||||
return (error ? error : SET_ERROR(ENXIO));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2929,8 +2929,10 @@ vdev_dtl_required(vdev_t *vd)
|
||||
vd->vdev_cant_read = cant_read;
|
||||
vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
|
||||
|
||||
if (!required && zio_injection_enabled)
|
||||
required = !!zio_handle_device_injection(vd, NULL, ECHILD);
|
||||
if (!required && zio_injection_enabled) {
|
||||
required = !!zio_handle_device_injection(vd, NULL,
|
||||
SET_ERROR(ECHILD));
|
||||
}
|
||||
|
||||
return (required);
|
||||
}
|
||||
@ -3333,10 +3335,10 @@ vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
|
||||
spa_vdev_state_enter(spa, SCL_NONE);
|
||||
|
||||
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENODEV));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
|
||||
|
||||
if (!vd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
|
||||
|
||||
tvd = vd->vdev_top;
|
||||
|
||||
@ -3415,10 +3417,10 @@ vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
|
||||
spa_vdev_state_enter(spa, SCL_NONE);
|
||||
|
||||
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENODEV));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
|
||||
|
||||
if (!vd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
|
||||
|
||||
/*
|
||||
* If the vdev is already faulted, then don't do anything.
|
||||
@ -3452,10 +3454,10 @@ vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
|
||||
spa_vdev_state_enter(spa, SCL_NONE);
|
||||
|
||||
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENODEV));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
|
||||
|
||||
if (!vd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
|
||||
|
||||
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
|
||||
oldstate = vd->vdev_state;
|
||||
@ -3537,10 +3539,10 @@ top:
|
||||
spa_vdev_state_enter(spa, SCL_ALLOC);
|
||||
|
||||
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENODEV));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
|
||||
|
||||
if (!vd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
|
||||
|
||||
tvd = vd->vdev_top;
|
||||
mg = tvd->vdev_mg;
|
||||
@ -3557,7 +3559,8 @@ top:
|
||||
*/
|
||||
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
|
||||
vdev_dtl_required(vd))
|
||||
return (spa_vdev_state_exit(spa, NULL, EBUSY));
|
||||
return (spa_vdev_state_exit(spa, NULL,
|
||||
SET_ERROR(EBUSY)));
|
||||
|
||||
/*
|
||||
* If the top-level is a slog and it has had allocations
|
||||
@ -3614,7 +3617,8 @@ top:
|
||||
vdev_is_dead(tvd)) {
|
||||
vd->vdev_offline = B_FALSE;
|
||||
vdev_reopen(tvd);
|
||||
return (spa_vdev_state_exit(spa, NULL, EBUSY));
|
||||
return (spa_vdev_state_exit(spa, NULL,
|
||||
SET_ERROR(EBUSY)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -467,7 +467,7 @@ zap_leaf_lookup_closest(zap_leaf_t *l,
|
||||
}
|
||||
}
|
||||
|
||||
return (bestcd == -1U ? ENOENT : 0);
|
||||
return (bestcd == -1U ? SET_ERROR(ENOENT) : 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -396,7 +396,7 @@ zcp_lua_to_nvlist_impl(lua_State *state, int index, nvlist_t *nvl,
|
||||
case LUA_TTABLE: {
|
||||
nvlist_t *value_nvl = zcp_table_to_nvlist(state, index, depth);
|
||||
if (value_nvl == NULL)
|
||||
return (EINVAL);
|
||||
return (SET_ERROR(EINVAL));
|
||||
|
||||
fnvlist_add_nvlist(nvl, key, value_nvl);
|
||||
fnvlist_free(value_nvl);
|
||||
@ -406,7 +406,7 @@ zcp_lua_to_nvlist_impl(lua_State *state, int index, nvlist_t *nvl,
|
||||
(void) lua_pushfstring(state,
|
||||
"Invalid value type '%s' for key '%s'",
|
||||
lua_typename(state, lua_type(state, index)), key);
|
||||
return (EINVAL);
|
||||
return (SET_ERROR(EINVAL));
|
||||
}
|
||||
|
||||
return (0);
|
||||
@ -585,7 +585,7 @@ zcp_nvpair_value_to_lua(lua_State *state, nvpair_t *pair,
|
||||
"Unhandled nvpair type %d for key '%s'",
|
||||
nvpair_type(pair), nvpair_name(pair));
|
||||
}
|
||||
return (EINVAL);
|
||||
return (SET_ERROR(EINVAL));
|
||||
}
|
||||
}
|
||||
return (err);
|
||||
|
@ -207,7 +207,7 @@ get_dsl_dir_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop,
|
||||
break;
|
||||
default:
|
||||
mutex_exit(&dd->dd_lock);
|
||||
return (ENOENT);
|
||||
return (SET_ERROR(ENOENT));
|
||||
}
|
||||
mutex_exit(&dd->dd_lock);
|
||||
return (0);
|
||||
@ -669,7 +669,7 @@ zcp_get_userquota_prop(lua_State *state, dsl_pool_t *dp,
|
||||
|
||||
if ((value == 0) && ((type == ZFS_PROP_USERQUOTA) ||
|
||||
(type == ZFS_PROP_GROUPQUOTA)))
|
||||
error = ENOENT;
|
||||
error = SET_ERROR(ENOENT);
|
||||
if (error != 0) {
|
||||
return (zcp_handle_error(state, dataset_name,
|
||||
prop_name, error));
|
||||
|
@ -259,7 +259,7 @@ zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details)
|
||||
* context.
|
||||
*/
|
||||
if (spa_version(ri->zri_pool->dp_spa) < SPA_VERSION_FAST_SNAP) {
|
||||
return (ENOTSUP);
|
||||
return (SET_ERROR(ENOTSUP));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2292,7 +2292,7 @@ zfs_ioc_snapshot_list_next(zfs_cmd_t *zc)
|
||||
|
||||
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
|
||||
if (error != 0) {
|
||||
return (error == ENOENT ? ESRCH : error);
|
||||
return (error == ENOENT ? SET_ERROR(ESRCH) : error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5530,9 +5530,10 @@ zfs_ioc_clear(zfs_cmd_t *zc)
|
||||
} else {
|
||||
vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE);
|
||||
if (vd == NULL) {
|
||||
(void) spa_vdev_state_exit(spa, NULL, ENODEV);
|
||||
error = SET_ERROR(ENODEV);
|
||||
(void) spa_vdev_state_exit(spa, NULL, error);
|
||||
spa_close(spa, FTAG);
|
||||
return (SET_ERROR(ENODEV));
|
||||
return (error);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user