3598 want to dtrace when errors are generated in zfs
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Approved by: Garrett D'Amore <garrett@damore.org>

References:
  https://www.illumos.org/issues/3598
  illumos/illumos-gate@be6fd75a69

Ported-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #1775

Porting notes:

1. include/sys/zfs_context.h has been modified to render some new
   macros inert until dtrace is available on Linux.

2. Linux-specific changes have been adapted to use SET_ERROR().

3. I'm NOT happy about this change.  It does nothing but ugly
   up the code under Linux.  Unfortunately we need to take it to
   avoid more merge conflicts in the future.  -Brian
This commit is contained in:
Matthew Ahrens 2013-03-08 10:41:28 -08:00 committed by Brian Behlendorf
parent 7011fb6004
commit 2e528b49f8
56 changed files with 830 additions and 793 deletions

View File

@ -59,6 +59,7 @@
#include <sys/time.h>
#include <vm/seg_kmem.h>
#include <sys/zone.h>
#include <sys/sdt.h>
#include <sys/zfs_debug.h>
#include <sys/fm/fs/zfs.h>
#include <sys/sunddi.h>
@ -149,6 +150,7 @@ extern void vpanic(const char *, __va_list);
#define fm_panic panic
#ifdef __sun
/*
* DTrace SDT probes have different signatures in userland than they do in
* kernel. If they're being used in kernel code, re-define them out of
@ -157,29 +159,46 @@ extern void vpanic(const char *, __va_list);
#ifdef DTRACE_PROBE
#undef DTRACE_PROBE
#define DTRACE_PROBE(a) ((void)0)
#endif /* DTRACE_PROBE */
#define DTRACE_PROBE(a) \
ZFS_PROBE0(#a)
#ifdef DTRACE_PROBE1
#undef DTRACE_PROBE1
#define DTRACE_PROBE1(a, b, c) ((void)0)
#endif /* DTRACE_PROBE1 */
#define DTRACE_PROBE1(a, b, c) \
ZFS_PROBE1(#a, (unsigned long)c)
#ifdef DTRACE_PROBE2
#undef DTRACE_PROBE2
#define DTRACE_PROBE2(a, b, c, d, e) ((void)0)
#endif /* DTRACE_PROBE2 */
#define DTRACE_PROBE2(a, b, c, d, e) \
ZFS_PROBE2(#a, (unsigned long)c, (unsigned long)e)
#ifdef DTRACE_PROBE3
#undef DTRACE_PROBE3
#define DTRACE_PROBE3(a, b, c, d, e, f, g) ((void)0)
#endif /* DTRACE_PROBE3 */
#define DTRACE_PROBE3(a, b, c, d, e, f, g) \
ZFS_PROBE3(#a, (unsigned long)c, (unsigned long)e, (unsigned long)g)
#ifdef DTRACE_PROBE4
#undef DTRACE_PROBE4
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0)
#endif /* DTRACE_PROBE4 */
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) \
ZFS_PROBE4(#a, (unsigned long)c, (unsigned long)e, (unsigned long)g, \
(unsigned long)i)
/*
* We use the comma operator so that this macro can be used without much
* additional code. For example, "return (EINVAL);" becomes
* "return (SET_ERROR(EINVAL));". Note that the argument will be evaluated
* twice, so it should not have side effects (e.g. something like:
* "return (SET_ERROR(log_error(EINVAL, info)));" would log the error twice).
*/
#define SET_ERROR(err) (ZFS_SET_ERROR(err), err)
#else
#define SET_ERROR(err) (err)
#endif
/*
* Threads
*/

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
@ -3764,7 +3764,7 @@ arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
if (available_memory <= zfs_write_limit_max) {
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
if (inflight_data > available_memory / 4) {
@ -3802,7 +3802,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
arc_c = MIN(arc_c_max, reserve * 4);
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (ENOMEM);
return (SET_ERROR(ENOMEM));
}
/*
@ -3837,7 +3837,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
reserve>>10, arc_c>>10);
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (ERESTART);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
@ -3858,7 +3858,7 @@ arc_kstat_update(kstat_t *ksp, int rw)
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE) {
return (EACCES);
return (SET_ERROR(EACCES));
} else {
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
@ -4530,7 +4530,7 @@ l2arc_read_done(zio_t *zio)
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
}
if (!equal)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
@ -5109,7 +5109,7 @@ l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
bcopy(zio->io_data, cdata, csize);
if (zio_decompress_data(c, cdata, zio->io_data, csize,
hdr->b_size) != 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
zio_data_buf_free(cdata, csize);
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
@ -651,7 +651,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
ASSERT(!refcount_is_zero(&db->db_holds));
if (db->db_state == DB_NOFILL)
return (EIO);
return (SET_ERROR(EIO));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
@ -708,7 +708,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = EIO;
err = SET_ERROR(EIO);
}
mutex_exit(&db->db_mtx);
}
@ -1646,7 +1646,7 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
if (level >= nlevels ||
(blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (ENOENT);
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
@ -1909,7 +1909,7 @@ top:
&dh->dh_bp, dh);
if (dh->dh_fail_sparse) {
if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
dh->dh_err = ENOENT;
dh->dh_err = SET_ERROR(ENOENT);
if (dh->dh_err) {
if (dh->dh_parent)
dbuf_rele(dh->dh_parent, NULL);
@ -2047,7 +2047,7 @@ dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
dnode_t *dn;
if (db->db_blkid != DMU_SPILL_BLKID)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
if (blksz > SPA_MAXBLOCKSIZE)

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -170,7 +170,7 @@ ddt_object_lookup(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return (ENOENT);
return (SET_ERROR(ENOENT));
return (ddt_ops[type]->ddt_op_lookup(ddt->ddt_os,
ddt->ddt_object[type][class], dde));
@ -232,7 +232,7 @@ ddt_object_info(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_object_info_t *doi)
{
if (!ddt_object_exists(ddt, type, class))
return (ENOENT);
return (SET_ERROR(ENOENT));
return (dmu_object_info(ddt->ddt_os, ddt->ddt_object[type][class],
doi));
@ -1204,7 +1204,7 @@ ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
ddb->ddb_type = 0;
} while (++ddb->ddb_class < DDT_CLASSES);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
#if defined(_KERNEL) && defined(HAVE_SPL)

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
@ -138,7 +138,7 @@ dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
err = EIO;
err = SET_ERROR(EIO);
} else {
err = dbuf_read(db, NULL, db_flags);
if (err) {
@ -169,9 +169,9 @@ dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
@ -192,9 +192,9 @@ dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
@ -321,12 +321,12 @@ dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = EINVAL;
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
@ -392,7 +392,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (EIO);
return (SET_ERROR(EIO));
}
nblks = 1;
}
@ -409,7 +409,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
zio_nowait(zio);
return (EIO);
return (SET_ERROR(EIO));
}
/* initiate async i/o */
if (read) {
@ -438,7 +438,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = EIO;
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
@ -1519,7 +1519,8 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
dmu_tx_abort(tx);
return (EIO); /* Make zl_get_data do txg_waited_synced() */
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_PUSHPAGE);
@ -1605,7 +1606,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
@ -1627,7 +1628,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
ASSERT(dr->dr_txg == txg);
@ -1639,7 +1640,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (EALREADY);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -113,7 +113,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
int err = 0;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT)
return (0);
@ -136,7 +136,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
blk = abuf->b_data;
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
@ -168,7 +168,7 @@ dmu_diff(const char *tosnap_name, const char *fromsnap_name,
if (strchr(tosnap_name, '@') == NULL ||
strchr(fromsnap_name, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(tosnap_name, FTAG, &dp);
if (error != 0)
@ -191,7 +191,7 @@ dmu_diff(const char *tosnap_name, const char *fromsnap_name,
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
fromtxg = fromsnap->ds_phys->ds_creation_txg;

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -90,7 +91,7 @@ dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int err;
if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
return (EBADF);
return (SET_ERROR(EBADF));
err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, FTAG, &dn);
if (err)
@ -112,7 +113,7 @@ dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int err;
if (object == DMU_META_DNODE_OBJECT)
return (EBADF);
return (SET_ERROR(EBADF));
err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
FTAG, &dn);

View File

@ -20,8 +20,8 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -288,7 +288,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = EIO;
err = SET_ERROR(EIO);
return (err);
}
@ -501,10 +501,10 @@ dmu_objset_own(const char *name, dmu_objset_type_t type,
dsl_dataset_disown(ds, tag);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
dsl_dataset_disown(ds, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
dsl_dataset_disown(ds, tag);
return (EROFS);
return (SET_ERROR(EROFS));
}
return (err);
}
@ -719,14 +719,14 @@ dmu_objset_create_check(void *arg, dmu_tx_t *tx)
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
dsl_dir_rele(pdd, FTAG);
@ -800,19 +800,19 @@ dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/* You can't clone across pools. */
if (pdd->dd_pool != dp) {
dsl_dir_rele(pdd, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
dsl_dir_rele(pdd, FTAG);
@ -823,13 +823,13 @@ dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
/* You can't clone across pools. */
if (origin->ds_dir->dd_pool != dp) {
dsl_dataset_rele(origin, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/* You can only clone snapshots, not the head datasets. */
if (!dsl_dataset_is_snapshot(origin)) {
dsl_dataset_rele(origin, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
dsl_dataset_rele(origin, FTAG);
@ -1310,9 +1310,9 @@ dmu_objset_userspace_upgrade(objset_t *os)
if (dmu_objset_userspace_present(os))
return (0);
if (!dmu_objset_userused_enabled(os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (dmu_objset_is_snapshot(os))
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* We simply need to mark every object dirty, so that it will be
@ -1328,7 +1328,7 @@ dmu_objset_userspace_upgrade(objset_t *os)
int objerr;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
@ -1404,7 +1404,7 @@ dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
uint64_t ignored;
if (ds->ds_phys->ds_snapnames_zapobj == 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
@ -1422,7 +1422,7 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (ds->ds_phys->ds_snapnames_zapobj == 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
@ -1430,12 +1430,12 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
@ -1467,7 +1467,7 @@ dmu_dir_list_next(objset_t *os, int namelen, char *name,
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dd->dd_phys->dd_head_dataset_obj)
return (ENOENT);
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
@ -1475,12 +1475,12 @@ dmu_dir_list_next(objset_t *os, int namelen, char *name,
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
@ -1747,9 +1747,9 @@ dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (atp - snapname >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}

View File

@ -22,8 +22,8 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -123,7 +123,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
dsp->dsa_pending_op != PENDING_FREE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -147,7 +147,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
/* not a continuation. Push out pending record */
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
}
@ -161,7 +161,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
if (length == -1ULL) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
} else {
dsp->dsa_pending_op = PENDING_FREE;
}
@ -185,7 +185,7 @@ dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
/* write a DATA record */
@ -205,9 +205,9 @@ dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
drrw->drr_key.ddk_cksum = bp->blk_cksum;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, data, blksz) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -219,7 +219,7 @@ dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -231,9 +231,9 @@ dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
drrs->drr_toguid = dsp->dsa_toguid;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, data, blksz))
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -253,7 +253,7 @@ dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
@ -268,7 +268,7 @@ dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
/* can't be aggregated. Push out pending record */
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
}
@ -296,7 +296,7 @@ dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -313,17 +313,17 @@ dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
drro->drr_toguid = dsp->dsa_toguid;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
/* free anything past the end of the file */
if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
return (EINTR);
return (SET_ERROR(EINTR));
if (dsp->dsa_err != 0)
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -341,7 +341,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
int err = 0;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
@ -365,7 +365,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
blk = abuf->b_data;
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
@ -384,7 +384,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
(void) arc_buf_remove_ref(abuf, &abuf);
@ -406,7 +406,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (EIO);
return (SET_ERROR(EIO));
}
}
@ -436,7 +436,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
dsl_dataset_rele(fromds, tag);
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
err = dmu_objset_from_ds(ds, &os);
@ -463,7 +463,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
dsl_dataset_rele(fromds, tag);
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (version >= ZPL_VERSION_SA) {
DMU_SET_FEATUREFLAGS(
@ -521,7 +521,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
if (dsp->dsa_pending_op != PENDING_NONE)
if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
err = EINTR;
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsp->dsa_err != 0)
@ -594,9 +594,9 @@ dmu_send(const char *tosnap, const char *fromsnap,
int err;
if (strchr(tosnap, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dp);
if (err != 0)
@ -630,14 +630,14 @@ dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
/* tosnap must be a snapshot */
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* fromsnap must be an earlier snapshot from the same fs as tosnap,
* or the origin's fs.
*/
if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
return (EXDEV);
return (SET_ERROR(EXDEV));
/* Get uncompressed size estimate of changed data. */
if (fromds == NULL) {
@ -695,7 +695,7 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
/* must not have any changes since most recent snapshot */
if (!drba->drba_cookie->drc_force &&
dsl_dataset_modified_since_lastsnap(ds))
return (ETXTBSY);
return (SET_ERROR(ETXTBSY));
/* temporary clone name must not exist */
error = zap_lookup(dp->dp_meta_objset,
@ -714,7 +714,7 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
if (fromguid != 0) {
/* if incremental, most recent snapshot must match fromguid */
if (ds->ds_prev == NULL)
return (ENODEV);
return (SET_ERROR(ENODEV));
/*
* most recent snapshot must match fromguid, or there are no
@ -728,10 +728,10 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (ENODEV);
return (SET_ERROR(ENODEV));
if (snap->ds_phys->ds_creation_txg < birth) {
dsl_dataset_rele(snap, FTAG);
return (ENODEV);
return (SET_ERROR(ENODEV));
}
if (snap->ds_phys->ds_guid == fromguid) {
dsl_dataset_rele(snap, FTAG);
@ -741,12 +741,12 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (ENODEV);
return (SET_ERROR(ENODEV));
}
} else {
/* if full, most recent snapshot must be $ORIGIN */
if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
return (ENODEV);
return (SET_ERROR(ENODEV));
}
return (0);
@ -772,13 +772,13 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (EINVAL);
return (SET_ERROR(EINVAL));
/* Verify pool version supports SA if SA_SPILL feature set */
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(dp->dp_spa) < SPA_VERSION_SA) {
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
@ -788,7 +788,7 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid);
@ -802,7 +802,7 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
* target fs, so fail the recv.
*/
if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
return (ENOENT);
return (SET_ERROR(ENOENT));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, MAXNAMELEN);
@ -822,12 +822,12 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
if (!dsl_dataset_is_snapshot(origin)) {
dsl_dataset_rele(origin, FTAG);
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (origin->ds_phys->ds_guid != fromguid) {
dsl_dataset_rele(origin, FTAG);
dsl_dataset_rele(ds, FTAG);
return (ENODEV);
return (SET_ERROR(ENODEV));
}
dsl_dataset_rele(origin, FTAG);
}
@ -918,7 +918,7 @@ dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
drc->drc_byteswap = B_TRUE;
else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
return (EINVAL);
return (SET_ERROR(EINVAL));
drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
drr->drr_type = DRR_BEGIN;
@ -1012,7 +1012,7 @@ restore_read(struct restorearg *ra, int len)
RLIM64_INFINITY, CRED(), &resid);
if (resid == len - done)
ra->err = EINVAL;
ra->err = SET_ERROR(EINVAL);
ra->voff += len - done - resid;
done = len - resid;
if (ra->err != 0)
@ -1126,13 +1126,13 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > SPA_MAXBLOCKSIZE ||
drro->drr_bonuslen > DN_MAX_BONUSLEN) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
err = dmu_object_info(os, drro->drr_object, NULL);
if (err != 0 && err != ENOENT)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (drro->drr_bonuslen) {
data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
@ -1160,7 +1160,7 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
drro->drr_bonustype, drro->drr_bonuslen);
}
if (err != 0) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
tx = dmu_tx_create(os);
@ -1203,7 +1203,7 @@ restore_freeobjects(struct restorearg *ra, objset_t *os,
uint64_t obj;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (EINVAL);
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
@ -1230,14 +1230,14 @@ restore_write(struct restorearg *ra, objset_t *os,
if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (EINVAL);
return (SET_ERROR(EINVAL));
data = restore_read(ra, drrw->drr_length);
if (data == NULL)
return (ra->err);
if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
tx = dmu_tx_create(os);
@ -1279,7 +1279,7 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
dmu_buf_t *dbp;
if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* If the GUID of the referenced dataset is different from the
@ -1289,10 +1289,10 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
gmesrch.guid = drrwbr->drr_refguid;
if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
&where)) == NULL) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
return (EINVAL);
return (SET_ERROR(EINVAL));
} else {
ref_os = os;
}
@ -1328,14 +1328,14 @@ restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > SPA_MAXBLOCKSIZE)
return (EINVAL);
return (SET_ERROR(EINVAL));
data = restore_read(ra, drrs->drr_length);
if (data == NULL)
return (ra->err);
if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
@ -1377,10 +1377,10 @@ restore_free(struct restorearg *ra, objset_t *os,
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = dmu_free_long_range(os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
@ -1436,7 +1436,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
minor_t minor;
if (cleanup_fd == -1) {
ra.err = EBADF;
ra.err = SET_ERROR(EBADF);
goto out;
}
ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
@ -1473,7 +1473,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
while (ra.err == 0 &&
NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
ra.err = EINTR;
ra.err = SET_ERROR(EINTR);
goto out;
}
@ -1527,7 +1527,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
* everything before the DRR_END record.
*/
if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
ra.err = ECKSUM;
ra.err = SET_ERROR(ECKSUM);
goto out;
}
case DRR_SPILL:
@ -1537,7 +1537,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
break;
}
default:
ra.err = EINVAL;
ra.err = SET_ERROR(EINVAL);
goto out;
}
pcksum = ra.cksum;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -434,7 +434,7 @@ traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
ASSERT(pfd->pd_blks_fetched >= 0);
if (pfd->pd_cancel)
return (EINTR);
return (SET_ERROR(EINTR));
if (bp == NULL || !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0) ||

View File

@ -176,7 +176,7 @@ dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
db = dbuf_hold_level(dn, level, blkid, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL)
return (EIO);
return (SET_ERROR(EIO));
err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
dbuf_rele(db, FTAG);
return (err);
@ -387,7 +387,7 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
out:
if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
2 * DMU_MAX_ACCESS)
err = EFBIG;
err = SET_ERROR(EFBIG);
if (err)
txh->txh_tx->tx_err = err;
@ -945,9 +945,9 @@ dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
*/
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
txg_how != TXG_WAIT)
return (EIO);
return (SET_ERROR(EIO));
return (ERESTART);
return (SET_ERROR(ERESTART));
}
tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
@ -969,7 +969,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = txh;
DMU_TX_STAT_BUMP(dmu_tx_group);
return (ERESTART);
return (SET_ERROR(ERESTART));
}
if (dn->dn_assigned_txg == 0)
dn->dn_assigned_txg = tx->tx_txg;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -1031,12 +1031,12 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
dn = (object == DMU_USERUSED_OBJECT) ?
DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
if (dn == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (ENOENT);
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (EEXIST);
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
(void) refcount_add(&dn->dn_holds, tag);
*dnp = dn;
@ -1044,7 +1044,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (EINVAL);
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
@ -1062,7 +1062,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL)
return (EIO);
return (SET_ERROR(EIO));
err = dbuf_read(db, NULL, DB_RF_CANFAIL);
if (err) {
dbuf_rele(db, FTAG);
@ -1371,7 +1371,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
fail:
rw_exit(&dn->dn_struct_rwlock);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
/* read-holding callers must not rely on the lock being continuously held */
@ -1857,7 +1857,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (ESRCH);
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
if (error) {
@ -1873,7 +1873,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = ESRCH;
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
span = DNODE_SHIFT;
@ -1886,7 +1886,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset += (1ULL << span) * inc;
}
if (i < 0 || i == blkfill)
error = ESRCH;
error = SET_ERROR(ESRCH);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
@ -1918,7 +1918,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset = start;
}
if (i < 0 || i >= epb)
error = ESRCH;
error = SET_ERROR(ESRCH);
}
if (db)
@ -1962,7 +1962,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = ESRCH;
error = SET_ERROR(ESRCH);
goto out;
}
@ -1971,7 +1971,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = ESRCH;
error = SET_ERROR(ESRCH);
}
goto out;
}
@ -1992,7 +1992,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = ESRCH;
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);

View File

@ -361,7 +361,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_type != DMU_OT_DSL_DATASET)
return (EINVAL);
return (SET_ERROR(EINVAL));
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
@ -479,7 +479,7 @@ dsl_dataset_hold(dsl_pool_t *dp, const char *name,
if (obj != 0)
err = dsl_dataset_hold_obj(dp, obj, tag, dsp);
else
err = ENOENT;
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
@ -488,7 +488,7 @@ dsl_dataset_hold(dsl_pool_t *dp, const char *name,
if (*snapname++ != '@') {
dsl_dataset_rele(*dsp, tag);
dsl_dir_rele(dd, FTAG);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
@ -521,7 +521,7 @@ dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj,
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele(*dsp, tag);
*dsp = NULL;
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}
@ -535,7 +535,7 @@ dsl_dataset_own(dsl_pool_t *dp, const char *name,
return (err);
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele(*dsp, tag);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}
@ -907,7 +907,7 @@ dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
@ -942,14 +942,14 @@ dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
* is already one, try again.
*/
if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
@ -977,11 +977,11 @@ dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
name = nvpair_name(pair);
if (strlen(name) >= MAXNAMELEN)
error = ENAMETOOLONG;
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
@ -1187,7 +1187,7 @@ dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
atp = strchr(snapname, '@');
if (atp == NULL) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
@ -1249,7 +1249,7 @@ dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
@ -1553,14 +1553,14 @@ dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= MAXNAMELEN)
error = ENAMETOOLONG;
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
@ -1672,18 +1672,18 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
/* must not be a snapshot */
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (ds->ds_phys->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_long_held(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -1693,7 +1693,7 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
if (ds->ds_quota != 0 &&
ds->ds_prev->ds_phys->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
/*
@ -1710,7 +1710,7 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
@ -1786,7 +1786,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/*
@ -1836,7 +1836,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = EBUSY;
err = SET_ERROR(EBUSY);
goto out;
}
@ -1845,7 +1845,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
(void) strcpy(ddpa->err_ds, snap->ds->ds_snapname);
err = EEXIST;
err = SET_ERROR(EEXIST);
goto out;
}
if (err != ENOENT)
@ -2182,7 +2182,7 @@ promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
if (dsl_dataset_is_snapshot(ddpa->ddpa_clone) ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dd->dd_phys->dd_origin_obj,
@ -2270,30 +2270,30 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
/* they should both be heads */
if (dsl_dataset_is_snapshot(clone) ||
dsl_dataset_is_snapshot(origin_head))
return (EINVAL);
return (SET_ERROR(EINVAL));
/* the branch point should be just before them */
if (clone->ds_prev != origin_head->ds_prev)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_object !=
clone->ds_prev->ds_phys->ds_next_snap_obj)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force && dsl_dataset_modified_since_lastsnap(origin_head))
return (ETXTBSY);
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_long_held(origin_head))
return (EBUSY);
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
@ -2305,12 +2305,12 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/* clone can't be over the head's refquota */
if (origin_head->ds_quota != 0 &&
clone->ds_phys->ds_referenced_bytes > origin_head->ds_quota)
return (EDQUOT);
return (SET_ERROR(EDQUOT));
return (0);
}
@ -2505,9 +2505,9 @@ dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
if (inflight > 0 ||
ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
error = ERESTART;
error = SET_ERROR(ERESTART);
else
error = EDQUOT;
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
@ -2532,7 +2532,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
@ -2540,7 +2540,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
@ -2559,7 +2559,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
if (newval < ds->ds_phys->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
@ -2615,7 +2615,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
@ -2623,7 +2623,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
@ -2657,7 +2657,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
}
@ -2802,7 +2802,7 @@ dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
if (snap != new)
dsl_dataset_rele(snap, FTAG);
if (snapobj == 0) {
err = EINVAL;
err = SET_ERROR(EINVAL);
break;
}
@ -2844,7 +2844,7 @@ dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
if (firstsnap->ds_dir != lastsnap->ds_dir ||
firstsnap->ds_phys->ds_creation_txg >
lastsnap->ds_phys->ds_creation_txg)
return (EINVAL);
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -107,7 +107,7 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
const char *perm = nvpair_name(permpair);
if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0)
return (EPERM);
return (SET_ERROR(EPERM));
if ((error = dsl_deleg_access(ddname, perm, cr)) != 0)
return (error);
@ -139,10 +139,10 @@ dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr)
if (type != ZFS_DELEG_USER &&
type != ZFS_DELEG_USER_SETS)
return (EPERM);
return (SET_ERROR(EPERM));
if (strcmp(idstr, &nvpair_name(whopair)[3]) != 0)
return (EPERM);
return (SET_ERROR(EPERM));
}
return (0);
}
@ -261,7 +261,7 @@ dsl_deleg_check(void *arg, dmu_tx_t *tx)
if (spa_version(dmu_tx_pool(tx)->dp_spa) <
SPA_VERSION_DELEGATED_PERMS) {
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dir_hold(dmu_tx_pool(tx), dda->dda_name, FTAG, &dd, NULL);
@ -426,7 +426,7 @@ dsl_check_access(objset_t *mos, uint64_t zapobj,
if (error == 0) {
error = zap_lookup(mos, jumpobj, perm, 8, 1, &zero);
if (error == ENOENT)
error = EPERM;
error = SET_ERROR(EPERM);
}
return (error);
}
@ -471,7 +471,7 @@ dsl_check_user_access(objset_t *mos, uint64_t zapobj, const char *perm,
return (0);
}
return (EPERM);
return (SET_ERROR(EPERM));
}
/*
@ -564,11 +564,11 @@ dsl_deleg_access_impl(dsl_dataset_t *ds, const char *perm, cred_t *cr)
mos = dp->dp_meta_objset;
if (dsl_delegation_on(mos) == B_FALSE)
return (ECANCELED);
return (SET_ERROR(ECANCELED));
if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) <
SPA_VERSION_DELEGATED_PERMS)
return (EPERM);
return (SET_ERROR(EPERM));
if (dsl_dataset_is_snapshot(ds)) {
/*
@ -642,7 +642,7 @@ again:
if (error == 0)
goto success;
}
error = EPERM;
error = SET_ERROR(EPERM);
success:
cookie = NULL;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -52,10 +52,10 @@ static int
dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
{
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (dsl_dataset_long_held(ds))
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Only allow deferred destroy on pools that support it.
@ -64,7 +64,7 @@ dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
if (defer) {
if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
SPA_VERSION_USERREFS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
return (0);
}
@ -73,13 +73,13 @@ dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
* we can't destroy it yet.
*/
if (ds->ds_userrefs > 0)
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Can't delete a branch point.
*/
if (ds->ds_phys->ds_num_children > 1)
return (EEXIST);
return (SET_ERROR(EEXIST));
return (0);
}
@ -594,10 +594,10 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
objset_t *mos;
if (dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (refcount_count(&ds->ds_longholds) != expected_holds)
return (EBUSY);
return (SET_ERROR(EBUSY));
mos = ds->ds_dir->dd_pool->dp_meta_objset;
@ -608,7 +608,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
*/
if (ds->ds_prev != NULL &&
ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Can't delete if there are children of this fs.
@ -618,14 +618,14 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
if (error != 0)
return (error);
if (count != 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
ds->ds_prev->ds_phys->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0) {
/* We need to remove the origin snapshot as well. */
if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -249,12 +249,12 @@ getcomponent(const char *path, char *component, const char **nextp)
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (ENOENT);
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
@ -264,14 +264,14 @@ getcomponent(const char *path, char *component, const char **nextp)
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (strlen(path) >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strcpy(component, path);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
p++;
@ -281,9 +281,9 @@ getcomponent(const char *path, char *component, const char **nextp)
* any more slashes.
*/
if (strchr(path, '/'))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (p - path >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
} else {
@ -318,7 +318,7 @@ dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0) {
err = EINVAL;
err = SET_ERROR(EINVAL);
goto error;
}
@ -371,7 +371,7 @@ dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = ENOENT;
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
@ -682,7 +682,7 @@ dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
used_on_disk>>10, est_inflight>>10,
quota>>10, asize>>10, retval);
mutex_exit(&dd->dd_lock);
return (retval);
return (SET_ERROR(retval));
}
/* We need to up our estimated delta before dropping dd_lock */
@ -744,7 +744,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
} else {
if (err == EAGAIN) {
txg_delay(dd->dd_pool, tx->tx_txg, 1);
err = ERESTART;
err = SET_ERROR(ERESTART);
}
dsl_pool_memory_pressure(dd->dd_pool);
}
@ -956,7 +956,7 @@ dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < ds->ds_dir->dd_phys->dd_reserved ||
newval < ds->ds_dir->dd_phys->dd_used_bytes + towrite)) {
error = ENOSPC;
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
@ -1050,7 +1050,7 @@ dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
if (delta > avail ||
(dd->dd_phys->dd_quota > 0 &&
newval > dd->dd_phys->dd_quota))
error = ENOSPC;
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
@ -1157,7 +1157,7 @@ dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
dsl_dataset_name(ds, namebuf);
if (strlen(namebuf) + *deltap >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
@ -1188,14 +1188,14 @@ dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/* if the name length is growing, validate child name lengths */
@ -1218,7 +1218,7 @@ dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
@ -1320,7 +1320,7 @@ dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
return (0);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dsl_pool.h>
@ -606,7 +606,7 @@ dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
if (reserved && reserved > write_limit) {
DMU_TX_STAT_BUMP(dmu_tx_write_limit);
return (ERESTART);
return (SET_ERROR(ERESTART));
}
}
@ -890,7 +890,7 @@ dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
dsl_pool_user_hold_create_obj(dp, tx);
zapobj = dp->dp_tmp_userrefs_obj;
} else {
return (ENOENT);
return (SET_ERROR(ENOENT));
}
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -52,16 +52,16 @@ dodefault(const char *propname, int intsz, int numints, void *buf)
*/
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL ||
(zfs_prop_readonly(prop) && !zfs_prop_setonce(prop)))
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
if (intsz != 1)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
(void) strncpy(buf, zfs_prop_default_string(prop),
numints);
} else {
if (intsz != 8 || numints < 1)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
*(uint64_t *)buf = zfs_prop_default_numeric(prop);
}
@ -144,7 +144,7 @@ dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
* at the end of the loop (instead of at the beginning) ensures
* that err has a valid post-loop value.
*/
err = ENOENT;
err = SET_ERROR(ENOENT);
}
if (err == ENOENT)
@ -400,7 +400,7 @@ dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
if (cbr == NULL) {
mutex_exit(&dd->dd_lock);
return (ENOMSG);
return (SET_ERROR(ENOMSG));
}
list_remove(&dd->dd_prop_cbs, cbr);
@ -749,7 +749,7 @@ dsl_props_set_check(void *arg, dmu_tx_t *tx)
while ((elem = nvlist_next_nvpair(dpsa->dpsa_props, elem)) != NULL) {
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
dsl_dataset_rele(ds, FTAG);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
char *valstr = fnvpair_value_string(elem);
@ -764,7 +764,7 @@ dsl_props_set_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds) && version < SPA_VERSION_SNAP_PROPS) {
dsl_dataset_rele(ds, FTAG);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(ds, FTAG);
return (0);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dsl_scan.h>
@ -155,7 +155,7 @@ dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (scn->scn_phys.scn_state == DSS_SCANNING)
return (EBUSY);
return (SET_ERROR(EBUSY));
return (0);
}
@ -316,7 +316,7 @@ dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (scn->scn_phys.scn_state != DSS_SCANNING)
return (ENOENT);
return (SET_ERROR(ENOENT));
return (0);
}
@ -1339,7 +1339,7 @@ dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_free_should_pause(scn))
return (ERESTART);
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -155,7 +155,7 @@ dsl_sync_task_sync(dsl_sync_task_t *dst, dmu_tx_t *tx)
used = dp->dp_root_dir->dd_phys->dd_used_bytes;
/* MOS space is triple-dittoed, so we multiply by 3. */
if (dst->dst_space > 0 && used + dst->dst_space * 3 > quota) {
dst->dst_error = ENOSPC;
dst->dst_error = SET_ERROR(ENOSPC);
if (dst->dst_nowaiter)
kmem_free(dst, sizeof (*dst));
return;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -67,7 +67,7 @@ dsl_dataset_user_hold_check_one(dsl_dataset_t *ds, const char *htag,
error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj,
htag, 8, 1, &value);
if (error == 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
}
@ -86,7 +86,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
int rv = 0;
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
@ -96,7 +96,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
/* must be a snapshot */
if (strchr(nvpair_name(pair), '@') == NULL)
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0)
error = nvpair_value_string(pair, &htag);
@ -218,11 +218,11 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
*todelete = B_FALSE;
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
zapobj = ds->ds_phys->ds_userrefs_obj;
if (zapobj == 0)
return (ESRCH);
return (SET_ERROR(ESRCH));
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
@ -230,7 +230,7 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
uint64_t tmp;
error = zap_lookup(mos, zapobj, nvpair_name(pair), 8, 1, &tmp);
if (error == ENOENT)
error = ESRCH;
error = SET_ERROR(ESRCH);
if (error != 0)
return (error);
numholds++;
@ -241,7 +241,7 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
/* we need to destroy the snapshot as well */
if (dsl_dataset_long_held(ds))
return (EBUSY);
return (SET_ERROR(EBUSY));
*todelete = B_TRUE;
}
return (0);
@ -267,7 +267,7 @@ dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
error = nvpair_value_nvlist(pair, &holds);
if (error != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {

View File

@ -20,7 +20,8 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -1529,7 +1530,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
* For testing, make some blocks above a certain size be gang blocks.
*/
if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
if (flags & METASLAB_FASTWRITE)
mutex_enter(&mc->mc_fastwrite_lock);
@ -1712,7 +1713,8 @@ next:
if (flags & METASLAB_FASTWRITE)
mutex_exit(&mc->mc_fastwrite_lock);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
/*
@ -1781,7 +1783,7 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
return (ENXIO);
return (SET_ERROR(ENXIO));
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
@ -1794,7 +1796,7 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
error = ENOENT;
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
@ -1829,7 +1831,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
if (mc->mc_rotor == NULL) { /* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -390,7 +390,7 @@ sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
switch (data_op) {
case SA_LOOKUP:
if (bulk[i].sa_addr == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (bulk[i].sa_data) {
SA_COPY_DATA(bulk[i].sa_data_func,
bulk[i].sa_addr, bulk[i].sa_data,
@ -522,7 +522,7 @@ sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
blocksize = SPA_MINBLOCKSIZE;
} else if (size > SPA_MAXBLOCKSIZE) {
ASSERT(0);
return (EFBIG);
return (SET_ERROR(EFBIG));
} else {
blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
}
@ -696,7 +696,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
SA_BONUS, &i, &used, &spilling);
if (used > SPA_MAXBLOCKSIZE)
return (EFBIG);
return (SET_ERROR(EFBIG));
VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
MIN(DN_MAX_BONUSLEN - sizeof (blkptr_t), used + hdrsize) :
@ -720,7 +720,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
&spill_used, &dummy);
if (spill_used > SPA_MAXBLOCKSIZE)
return (EFBIG);
return (SET_ERROR(EFBIG));
buf_space = hdl->sa_spill->db_size - spillhdrsize;
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
@ -877,7 +877,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
*/
if (error || (error == 0 && sa_attr_count == 0)) {
if (error == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
goto bail;
}
sa_reg_count = sa_attr_count;
@ -908,7 +908,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
error = zap_lookup(os, sa->sa_reg_attr_obj,
reg_attrs[i].sa_name, 8, 1, &attr_value);
else
error = ENOENT;
error = SET_ERROR(ENOENT);
switch (error) {
case ENOENT:
sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
@ -1067,7 +1067,7 @@ sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
*/
if (error || (error == 0 && layout_count == 0)) {
if (error == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
goto fail;
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
*/
@ -405,7 +405,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
switch ((int)prop) {
case ZPROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -413,23 +413,23 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -442,7 +442,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
@ -451,7 +451,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
case ZPOOL_PROP_AUTOEXPAND:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_BOOTFS:
@ -461,7 +461,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
break;
}
@ -469,7 +469,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
break;
}
@ -493,13 +493,13 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
/* Must be ZPL and not gzip compressed. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else if ((error =
dsl_prop_get_int_ds(dmu_objset_ds(os),
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
&compress)) == 0 &&
!BOOTFS_COMPRESS_VALID(compress)) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
@ -511,7 +511,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
error = nvpair_value_uint64(elem, &intval);
if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
intval > ZIO_FAILURE_MODE_PANIC))
error = EINVAL;
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
@ -525,7 +525,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = EIO;
error = SET_ERROR(EIO);
}
break;
@ -540,7 +540,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
break;
if (strval[0] != '/') {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -549,7 +549,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
@ -557,23 +557,23 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
check++;
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = E2BIG;
error = SET_ERROR(E2BIG);
break;
case ZPOOL_PROP_DEDUPDITTO:
if (spa_version(spa) < SPA_VERSION_DEDUP)
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
else
error = nvpair_value_uint64(elem, &intval);
if (error == 0 &&
intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
default:
@ -708,7 +708,7 @@ spa_change_guid_check(void *arg, dmu_tx_t *tx)
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (ENXIO);
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
@ -1199,7 +1199,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
for (c = 0; c < children; c++) {
@ -1921,7 +1921,7 @@ spa_load_verify(spa_t *spa)
if (error) {
if (error != ENXIO && error != EIO)
error = EIO;
error = SET_ERROR(EIO);
return (error);
}
@ -2049,7 +2049,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
nvlist_t *nvl;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
return (EINVAL);
return (SET_ERROR(EINVAL));
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
@ -2068,7 +2068,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
error = EEXIST;
error = SET_ERROR(EEXIST);
} else {
spa->spa_config_guid = pool_guid;
@ -2135,7 +2135,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa->spa_load_state = state;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
return (EINVAL);
return (SET_ERROR(EINVAL));
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
@ -2195,7 +2195,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
return (error);
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (ENXIO);
return (SET_ERROR(ENXIO));
}
/*
@ -2431,7 +2431,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
"See: http://zfsonlinux.org/msg/ZFS-8000-EY",
spa_name(spa), hostname,
(unsigned long)hostid);
return (EBADF);
return (SET_ERROR(EBADF));
}
}
if (nvlist_lookup_nvlist(spa->spa_config,
@ -2620,7 +2620,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* more toplevel vdevs are faulted.
*/
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (ENXIO);
return (SET_ERROR(ENXIO));
if (spa_check_logs(spa)) {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
@ -2884,7 +2884,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
@ -2919,7 +2919,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (error) {
@ -3255,14 +3255,14 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
return (0);
if (ndev == 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
@ -3278,7 +3278,7 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3289,7 +3289,7 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
#ifdef _KERNEL
if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
error = ENOTBLK;
error = SET_ERROR(ENOTBLK);
vdev_free(vd);
goto out;
}
@ -3430,7 +3430,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/*
@ -3483,7 +3483,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
@ -3757,7 +3757,7 @@ spa_import_rootpool(char *devpath, char *devid)
if (config == NULL) {
cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
devpath);
return (EIO);
return (SET_ERROR(EIO));
}
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
@ -3800,7 +3800,7 @@ spa_import_rootpool(char *devpath, char *devid)
if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
(u_longlong_t)guid);
error = ENOENT;
error = SET_ERROR(ENOENT);
goto out;
}
@ -3812,7 +3812,7 @@ spa_import_rootpool(char *devpath, char *devid)
if (avd != bvd) {
cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
"try booting from '%s'", avd->vdev_path);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3826,7 +3826,7 @@ spa_import_rootpool(char *devpath, char *devid)
"try booting from '%s'",
bvd->vdev_parent->
vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3866,7 +3866,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/*
@ -4143,12 +4143,12 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
*oldconfig = NULL;
if (!(spa_mode_global & FWRITE))
return (EROFS);
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
/*
@ -4182,7 +4182,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
new_state != POOL_STATE_UNINITIALIZED)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -4195,7 +4195,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
spa_has_active_shared_spare(spa)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/*
@ -4391,12 +4391,12 @@ int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
{
uint64_t txg, dtl_max_txg;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev;)
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
ASSERT(spa_writeable(spa));
@ -4591,13 +4591,12 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev;)
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
int c, t;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
@ -4905,7 +4904,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
}
@ -4913,14 +4912,14 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = ENODEV;
error = SET_ERROR(ENODEV);
break;
}
@ -4934,12 +4933,12 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c])) {
error = EBUSY;
error = SET_ERROR(EBUSY);
break;
}
@ -5170,7 +5169,7 @@ spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
if (vd->vdev_stat.vs_alloc != 0)
error = spa_offline_log(spa);
} else {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
if (error)
@ -5279,7 +5278,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
} else {
error = EBUSY;
error = SET_ERROR(EBUSY);
}
} else if (spa->spa_l2cache.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
@ -5339,12 +5338,12 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
/*
* Normal vdevs cannot be removed (yet).
*/
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else {
/*
* There is no vdev of any kind with the specified guid.
*/
error = ENOENT;
error = SET_ERROR(ENOENT);
}
if (!locked)
@ -5532,7 +5531,7 @@ spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (EBUSY);
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
@ -5542,7 +5541,7 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -175,7 +176,7 @@ process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
if (*count == 0) {
zap_cursor_fini(&zc);
return (ENOMEM);
return (SET_ERROR(ENOMEM));
}
name_to_bookmark(za.za_name, &zb);
@ -183,7 +184,7 @@ process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
if (copyout(&zb, (char *)addr +
(*count - 1) * sizeof (zbookmark_t),
sizeof (zbookmark_t)) != 0)
return (EFAULT);
return (SET_ERROR(EFAULT));
*count -= 1;
}
@ -201,12 +202,12 @@ process_error_list(avl_tree_t *list, void *addr, size_t *count)
for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
if (*count == 0)
return (ENOMEM);
return (SET_ERROR(ENOMEM));
if (copyout(&se->se_bookmark, (char *)addr +
(*count - 1) * sizeof (zbookmark_t),
sizeof (zbookmark_t)) != 0)
return (EFAULT);
return (SET_ERROR(EFAULT));
*count -= 1;
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
@ -309,7 +309,7 @@ spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
nvlist_t *nvarg;
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa))
return (EINVAL);
return (SET_ERROR(EINVAL));
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
err = dmu_tx_assign(tx, TXG_WAIT);
@ -353,7 +353,7 @@ spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf)
* that's ok, just return ENOENT.
*/
if (!spa->spa_history)
return (ENOENT);
return (SET_ERROR(ENOENT));
/*
* The history is logged asynchronously, so when they request

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/
@ -1758,7 +1758,7 @@ spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (ENOENT);
return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -33,7 +34,7 @@ uberblock_verify(uberblock_t *ub)
byteswap_uint64_array(ub, sizeof (uberblock_t));
if (ub->ub_magic != UBERBLOCK_MAGIC)
return (EINVAL);
return (SET_ERROR(EINVAL));
return (0);
}

View File

@ -22,7 +22,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -355,10 +355,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
@ -369,26 +369,26 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
@ -396,10 +396,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* Set the nparity property for RAID-Z vdevs.
@ -409,24 +409,24 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 &&
spa_version(spa) < SPA_VERSION_RAIDZ2)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (nparity > 2 &&
spa_version(spa) < SPA_VERSION_RAIDZ3)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
@ -949,7 +949,7 @@ vdev_probe_done(zio_t *zio)
ASSERT(zio->io_error != 0);
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, 0, 0);
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
@ -959,7 +959,7 @@ vdev_probe_done(zio_t *zio)
while ((pio = zio_walk_parents(zio)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = ENXIO;
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
@ -1152,11 +1152,11 @@ vdev_open(vdev_t *vd)
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (ENXIO);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
@ -1191,7 +1191,7 @@ vdev_open(vdev_t *vd)
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
@ -1223,7 +1223,7 @@ vdev_open(vdev_t *vd)
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
@ -1234,7 +1234,7 @@ vdev_open(vdev_t *vd)
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
@ -1249,7 +1249,7 @@ vdev_open(vdev_t *vd)
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (vd->vdev_asize == 0) {
@ -1336,7 +1336,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
for (c = 0; c < vd->vdev_children; c++)
if (vdev_validate(vd->vdev_child[c], strict) != 0)
return (EBADF);
return (SET_ERROR(EBADF));
/*
* If the device has already failed, or was marked offline, don't do
@ -1422,7 +1422,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
return (EBADF);
return (SET_ERROR(EBADF));
/*
* If we were able to open and validate a vdev that was

View File

@ -22,6 +22,9 @@
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
@ -259,16 +262,16 @@ vdev_cache_read(zio_t *zio)
ASSERT(zio->io_type == ZIO_TYPE_READ);
if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zio->io_size > zfs_vdev_cache_max)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
/*
* If the I/O straddles two or more cache blocks, don't cache it.
*/
if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
return (EXDEV);
return (SET_ERROR(EXDEV));
ASSERT(cache_phase + zio->io_size <= VCBS);
@ -282,7 +285,7 @@ vdev_cache_read(zio_t *zio)
if (ve != NULL) {
if (ve->ve_missed_update) {
mutex_exit(&vc->vc_lock);
return (ESTALE);
return (SET_ERROR(ESTALE));
}
if ((fio = ve->ve_fill_io) != NULL) {
@ -305,7 +308,7 @@ vdev_cache_read(zio_t *zio)
if (ve == NULL) {
mutex_exit(&vc->vc_lock);
return (ENOMEM);
return (SET_ERROR(ENOMEM));
}
fio = zio_vdev_delegated_io(zio->io_vd, cache_offset,

View File

@ -23,6 +23,7 @@
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -656,7 +657,7 @@ vdev_disk_io_start(zio_t *zio)
case ZIO_TYPE_IOCTL:
if (!vdev_readable(v)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
return ZIO_PIPELINE_CONTINUE;
}
@ -667,7 +668,7 @@ vdev_disk_io_start(zio_t *zio)
break;
if (v->vdev_nowritecache) {
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
@ -682,7 +683,7 @@ vdev_disk_io_start(zio_t *zio)
break;
default:
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
}
return ZIO_PIPELINE_CONTINUE;
@ -696,7 +697,7 @@ vdev_disk_io_start(zio_t *zio)
break;
default:
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
return ZIO_PIPELINE_CONTINUE;
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -62,7 +62,7 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -100,7 +100,7 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (vp->v_type != VREG) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (ENODEV);
return (SET_ERROR(ENODEV));
}
#endif
@ -154,7 +154,7 @@ vdev_file_io_strategy(void *arg)
0, RLIM64_INFINITY, kcred, &resid);
if (resid != 0 && zio->io_error == 0)
zio->io_error = ENOSPC;
zio->io_error = SET_ERROR(ENOSPC);
zio_interrupt(zio);
}
@ -169,7 +169,7 @@ vdev_file_io_start(zio_t *zio)
if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
return (ZIO_PIPELINE_CONTINUE);
}
@ -179,7 +179,7 @@ vdev_file_io_start(zio_t *zio)
kcred, NULL);
break;
default:
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
}
return (ZIO_PIPELINE_CONTINUE);

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -668,14 +668,14 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (EIO);
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
@ -1086,7 +1086,7 @@ vdev_label_sync_top_done(zio_t *zio)
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -152,7 +152,7 @@ vdev_mirror_map_alloc(zio_t *zio)
continue;
if (!vdev_readable(mc->mc_vd)) {
mc->mc_error = ENXIO;
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1;
mc->mc_skipped = 1;
mc->mc_pending = INT_MAX;
@ -198,7 +198,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
@ -295,14 +295,14 @@ vdev_mirror_child_select(zio_t *zio)
if (mc->mc_tried || mc->mc_skipped)
continue;
if (!vdev_readable(mc->mc_vd)) {
mc->mc_error = ENXIO;
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1; /* don't even try */
mc->mc_skipped = 1;
continue;
}
if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1))
return (c);
mc->mc_error = ESTALE;
mc->mc_error = SET_ERROR(ESTALE);
mc->mc_skipped = 1;
mc->mc_speculative = 1;
}
@ -487,7 +487,7 @@ vdev_mirror_io_done(zio_t *zio)
!vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
zio->io_txg, 1))
continue;
mc->mc_error = ESTALE;
mc->mc_error = SET_ERROR(ESTALE);
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -69,7 +69,7 @@ vdev_missing_close(vdev_t *vd)
static int
vdev_missing_io_start(zio_t *zio)
{
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
return (ZIO_PIPELINE_CONTINUE);
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -1457,7 +1457,7 @@ vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
@ -1582,7 +1582,7 @@ vdev_raidz_io_start(zio_t *zio)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = ENXIO;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
@ -1592,7 +1592,7 @@ vdev_raidz_io_start(zio_t *zio)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = ESTALE;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
@ -1683,7 +1683,7 @@ raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
continue;
if (bcmp(orig[c], rc->rc_data, rc->rc_size) != 0) {
raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = ECKSUM;
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
zio_buf_free(orig[c], rc->rc_size);
@ -1807,7 +1807,7 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
if (rc->rc_tried)
raidz_checksum_error(zio, rc,
orig[i]);
rc->rc_error = ECKSUM;
rc->rc_error = SET_ERROR(ECKSUM);
}
ret = code;
@ -2083,7 +2083,7 @@ vdev_raidz_io_done(zio_t *zio)
* Start checksum ereports for all children which haven't
* failed, and the IO wasn't speculative.
*/
zio->io_error = ECKSUM;
zio->io_error = SET_ERROR(ECKSUM);
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
for (c = 0; c < rm->rm_cols; c++) {

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -63,7 +63,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -325,7 +325,7 @@ zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
* this is already an aberrant condition.
*/
if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
/*
@ -714,7 +714,7 @@ static int
fzap_checkname(zap_name_t *zn)
{
if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
@ -729,7 +729,7 @@ fzap_checksize(uint64_t integer_size, uint64_t num_integers)
case 8:
break;
default:
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (integer_size * num_integers > ZAP_MAXVALUELEN)
@ -805,7 +805,7 @@ fzap_add_cd(zap_name_t *zn,
retry:
err = zap_leaf_lookup(l, zn, &zeh);
if (err == 0) {
err = EEXIST;
err = SET_ERROR(EEXIST);
goto out;
}
if (err != ENOENT)
@ -996,7 +996,7 @@ zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_add(os, intoobj, za.za_name,
8, 1, &za.za_first_integer, tx);
if (err)
@ -1018,7 +1018,7 @@ zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_add(os, intoobj, za.za_name,
8, 1, &value, tx);
if (err)
@ -1042,7 +1042,7 @@ zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
uint64_t delta = 0;
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_lookup(os, intoobj, za.za_name, 8, 1, &delta);
if (err != 0 && err != ENOENT)
@ -1250,7 +1250,7 @@ fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn)
zap_entry_handle_t zeh;
if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
err = zap_deref_leaf(zc->zc_zap, zn->zn_hash, NULL, RW_READER, &l);
if (err != 0)

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -434,7 +435,7 @@ again:
goto again;
}
return (ENOENT);
return (SET_ERROR(ENOENT));
}
/* Return (h1,cd1 >= h2,cd2) */
@ -492,14 +493,14 @@ zap_entry_read(const zap_entry_handle_t *zeh,
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (le->le_value_intlen > integer_size)
return (EINVAL);
return (SET_ERROR(EINVAL));
zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk,
le->le_value_intlen, le->le_value_numints,
integer_size, num_integers, buf);
if (zeh->zeh_num_integers > num_integers)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
return (0);
}
@ -520,7 +521,7 @@ zap_entry_read_name(zap_t *zap, const zap_entry_handle_t *zeh, uint16_t buflen,
le->le_name_numints, 1, buflen, buf);
}
if (le->le_name_numints > buflen)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
return (0);
}
@ -536,7 +537,7 @@ zap_entry_update(zap_entry_handle_t *zeh,
ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen);
if ((int)l->l_phys->l_hdr.lh_nfree < delta_chunks)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
zap_leaf_array_free(l, &le->le_value_chunk);
le->le_value_chunk =
@ -626,7 +627,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
}
if (l->l_phys->l_hdr.lh_nfree < numchunks)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
/* make the entry */
chunk = zap_leaf_chunk_alloc(l);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zio.h>
@ -780,7 +780,7 @@ zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, mt);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
@ -789,12 +789,12 @@ zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
if (num_integers < 1) {
err = EOVERFLOW;
err = SET_ERROR(EOVERFLOW);
} else if (integer_size != 8) {
err = EINVAL;
err = SET_ERROR(EINVAL);
} else {
*(uint64_t *)buf =
MZE_PHYS(zap, mze)->mze_value;
@ -826,7 +826,7 @@ zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
@ -849,7 +849,7 @@ zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_lookup(zn, integer_size, num_integers, buf,
@ -884,14 +884,14 @@ zap_length(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_length(zn, integer_size, num_integers);
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
if (integer_size)
*integer_size = 8;
@ -918,7 +918,7 @@ zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_length(zn, integer_size, num_integers);
zap_name_free(zn);
@ -987,7 +987,7 @@ zap_add(objset_t *os, uint64_t zapobj, const char *key,
zn = zap_name_alloc(zap, key, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_add(zn, integer_size, num_integers, val, tx);
@ -1001,7 +1001,7 @@ zap_add(objset_t *os, uint64_t zapobj, const char *key,
} else {
mze = mze_find(zn);
if (mze != NULL) {
err = EEXIST;
err = SET_ERROR(EEXIST);
} else {
mzap_addent(zn, *intval);
}
@ -1028,7 +1028,7 @@ zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_add(zn, integer_size, num_integers, val, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
@ -1065,7 +1065,7 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_update(zn, integer_size, num_integers, val, tx);
@ -1110,7 +1110,7 @@ zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_update(zn, integer_size, num_integers, val, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
@ -1141,14 +1141,14 @@ zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, mt);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
zap->zap_m.zap_num_entries--;
bzero(&zap->zap_m.zap_phys->mz_chunk[mze->mze_chunkid],
@ -1175,7 +1175,7 @@ zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_remove(zn, tx);
zap_name_free(zn);
@ -1253,7 +1253,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
mzap_ent_t *mze;
if (zc->zc_hash == -1ULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zc->zc_zap == NULL) {
int hb;
@ -1279,8 +1279,6 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
if (!zc->zc_zap->zap_ismicro) {
err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
} else {
err = ENOENT;
mze_tofind.mze_hash = zc->zc_hash;
mze_tofind.mze_cd = zc->zc_cd;
@ -1303,6 +1301,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
err = 0;
} else {
zc->zc_hash = -1ULL;
err = SET_ERROR(ENOENT);
}
}
rw_exit(&zc->zc_zap->zap_rwlock);
@ -1336,7 +1335,7 @@ zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
zn = zap_name_alloc(zc->zc_zap, name, mt);
if (zn == NULL) {
rw_exit(&zc->zc_zap->zap_rwlock);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zc->zc_zap->zap_ismicro) {
@ -1344,7 +1343,7 @@ zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
goto out;
}
zc->zc_hash = mze->mze_hash;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -234,13 +234,13 @@ feature_get_refcount(objset_t *os, uint64_t read_obj, uint64_t write_obj,
* have been allocated yet. Act as though all features are disabled.
*/
if (zapobj == 0)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
err = zap_lookup(os, zapobj, feature->fi_guid, sizeof (uint64_t), 1,
&refcount);
if (err != 0) {
if (err == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
else
return (err);
}
@ -281,16 +281,16 @@ feature_do_action(objset_t *os, uint64_t read_obj, uint64_t write_obj,
break;
case FEATURE_ACTION_INCR:
if (error == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (refcount == UINT64_MAX)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
refcount++;
break;
case FEATURE_ACTION_DECR:
if (error == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (refcount == 0)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
refcount--;
break;
default:

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
@ -676,7 +677,7 @@ zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (EINVAL);
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
@ -783,7 +784,7 @@ zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (EINVAL);
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
@ -1117,7 +1118,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
goto done;
}
@ -1887,7 +1888,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (ENOSYS);
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
@ -1981,7 +1982,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zsb->z_version));
@ -2043,10 +2044,10 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
uint64_t acl_obj;
if (mask == 0)
return (ENOSYS);
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (EPERM);
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
@ -2142,7 +2143,7 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
(!S_ISDEV(ZTOI(zp)->i_mode) ||
(S_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (EROFS);
return (SET_ERROR(EROFS));
}
/*
@ -2153,17 +2154,17 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
(zp->z_pflags & (ZFS_READONLY | ZFS_IMMUTABLE))) ||
(S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_IMMUTABLE)))) {
return (EPERM);
return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (EPERM);
return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (EACCES);
return (SET_ERROR(EACCES));
}
return (0);
@ -2272,7 +2273,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (EIO);
return (SET_ERROR(EIO));
}
}
@ -2306,7 +2307,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (EACCES);
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
@ -2373,7 +2374,7 @@ zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (EACCES);
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
@ -2389,7 +2390,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (EACCES);
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(S_ISDIR(ZTOI(zdp)->i_mode)));
@ -2601,7 +2602,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = EACCES;
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
@ -2711,7 +2712,7 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (EPERM);
return (SET_ERROR(EPERM));
/*
* First row
@ -2778,7 +2779,7 @@ zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (EACCES);
return (SET_ERROR(EACCES));
add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;

View File

@ -27,6 +27,7 @@
* Rewritten for Linux by:
* Rohan Puri <rohan.puri15@gmail.com>
* Brian Behlendorf <behlendorf1@llnl.gov>
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -286,11 +287,11 @@ zfsctl_create(zfs_sb_t *zsb)
zsb->z_ctldir = zfsctl_inode_alloc(zsb, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root);
if (zsb->z_ctldir == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
return (0);
#else
return (EOPNOTSUPP);
return (SET_ERROR(EOPNOTSUPP));
#endif /* CONFIG_64BIT */
}
@ -331,7 +332,7 @@ zfsctl_fid(struct inode *ip, fid_t *fidp)
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
ZFS_EXIT(zsb);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
zfid = (zfid_short_t *)fidp;
@ -355,11 +356,11 @@ zfsctl_snapshot_zname(struct inode *ip, const char *name, int len, char *zname)
objset_t *os = ITOZSB(ip)->z_os;
if (snapshot_namecheck(name, NULL, NULL) != 0)
return (EILSEQ);
return (SET_ERROR(EILSEQ));
dmu_objset_name(os, zname);
if ((strlen(zname) + 1 + strlen(name)) >= len)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(zname, "@");
(void) strcat(zname, name);
@ -383,7 +384,7 @@ zfsctl_snapshot_zpath(struct path *path, int len, char *zpath)
path_len = path_buffer + len - 1 - path_ptr;
if (path_len > len) {
error = EFAULT;
error = SET_ERROR(EFAULT);
goto out;
}
@ -421,7 +422,7 @@ zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp,
}
if (*ipp == NULL)
error = ENOENT;
error = SET_ERROR(ENOENT);
ZFS_EXIT(zsb);
@ -457,7 +458,7 @@ zfsctl_snapdir_lookup(struct inode *dip, char *name, struct inode **ipp,
(*ipp)->i_flags |= S_AUTOMOUNT;
#endif /* HAVE_AUTOMOUNT */
} else {
error = ENOENT;
error = SET_ERROR(ENOENT);
}
ZFS_EXIT(zsb);
@ -529,7 +530,7 @@ zfsctl_snapdir_rename(struct inode *sdip, char *snm,
* Cannot move snapshots out of the snapdir.
*/
if (sdip != tdip) {
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -626,7 +627,7 @@ zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
if (snapshot_namecheck(dirname, NULL, NULL) != 0) {
error = EILSEQ;
error = SET_ERROR(EILSEQ);
goto out;
}
@ -710,7 +711,7 @@ __zfsctl_unmount_snapshot(zfs_snapentry_t *sep, int flags)
* converted to the more sensible EBUSY.
*/
if (error)
error = EBUSY;
error = SET_ERROR(EBUSY);
/*
* This was the result of a manual unmount, cancel the delayed work
@ -746,7 +747,7 @@ zfsctl_unmount_snapshot(zfs_sb_t *zsb, char *name, int flags)
else
zfsctl_sep_free(sep);
} else {
error = ENOENT;
error = SET_ERROR(ENOENT);
}
mutex_exit(&zsb->z_ctldir_lock);
@ -840,7 +841,7 @@ zfsctl_mount_snapshot(struct path *path, int flags)
if (error) {
printk("ZFS: Unable to automount %s at %s: %d\n",
full_name, full_path, error);
error = EISDIR;
error = SET_ERROR(EISDIR);
goto error;
}
@ -948,7 +949,7 @@ zfsctl_lookup_objset(struct super_block *sb, uint64_t objsetid, zfs_sb_t **zsbp)
deactivate_super(sbp);
}
} else {
error = EINVAL;
error = SET_ERROR(EINVAL);
}
out:
mutex_exit(&zsb->z_ctldir_lock);
@ -971,7 +972,7 @@ zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp,
if (zsb->z_shares_dir == 0) {
ZFS_EXIT(zsb);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = zfs_zget(zsb, zsb->z_shares_dir, &dzp);

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
@ -171,7 +172,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
if ((name[0] == '.' &&
(name[1] == '\0' || (name[1] == '.' && name[2] == '\0'))) ||
(zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0))
return (EEXIST);
return (SET_ERROR(EEXIST));
/*
* Case sensitivity and normalization preferences are set when
@ -242,7 +243,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
mutex_exit(&dzp->z_lock);
if (!(flag & ZHAVELOCK))
rw_exit(&dzp->z_name_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) {
if ((u8_strcmp(name, dl->dl_name, 0, cmpflags,
@ -253,7 +254,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
mutex_exit(&dzp->z_lock);
if (!(flag & ZHAVELOCK))
rw_exit(&dzp->z_name_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (dl == NULL) {
/*
@ -307,19 +308,19 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zsb), &zoid,
sizeof (zoid));
if (error == 0)
error = (zoid == 0 ? ENOENT : 0);
error = (zoid == 0 ? SET_ERROR(ENOENT) : 0);
} else {
#ifdef HAVE_DNLC
if (update)
vp = dnlc_lookup(ZTOI(dzp), name);
if (vp == DNLC_NO_VNODE) {
iput(vp);
error = ENOENT;
error = SET_ERROR(ENOENT);
} else if (vp) {
if (flag & ZNEW) {
zfs_dirent_unlock(dl);
iput(vp);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
*dlpp = dl;
*zpp = VTOZ(vp);
@ -341,7 +342,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
} else {
if (flag & ZNEW) {
zfs_dirent_unlock(dl);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
error = zfs_zget(zsb, zoid, zpp);
if (error) {
@ -762,7 +763,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
if (zp->z_unlinked) { /* no new links to unlinked zp */
ASSERT(!(flag & (ZNEW | ZEXISTS)));
mutex_exit(&zp->z_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
zp->z_links++;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
@ -865,7 +866,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
if (zp_is_dir && !zfs_dirempty(zp)) {
mutex_exit(&zp->z_lock);
return (ENOTEMPTY);
return (SET_ERROR(ENOTEMPTY));
}
/*
@ -969,7 +970,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
return (error);
if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
top:
@ -1051,12 +1052,12 @@ top:
if (!(flags & CREATE_XATTR_DIR)) {
zfs_dirent_unlock(dl);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (zfs_is_readonly(zsb)) {
zfs_dirent_unlock(dl);
return (EROFS);
return (SET_ERROR(EROFS));
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -109,7 +110,7 @@ zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
{
*zo = zfsdev_get_state(minor, ZST_ONEXIT);
if (*zo == NULL)
return (EBADF);
return (SET_ERROR(EBADF));
return (0);
}
@ -128,7 +129,7 @@ zfs_onexit_fd_hold(int fd, minor_t *minorp)
fp = getf(fd);
if (fp == NULL)
return (EBADF);
return (SET_ERROR(EBADF));
*minorp = zfsdev_getminor(fp->f_file);
return (zfs_onexit_minor_to_state(*minorp, &zo));
@ -211,7 +212,7 @@ zfs_onexit_del_cb(minor_t minor, uint64_t action_handle, boolean_t fire)
kmem_free(ap, sizeof (zfs_onexit_action_node_t));
} else {
mutex_exit(&zo->zo_lock);
error = ENOENT;
error = SET_ERROR(ENOENT);
}
return (error);
@ -240,7 +241,7 @@ zfs_onexit_cb_data(minor_t minor, uint64_t action_handle, void **data)
if (ap != NULL)
*data = ap->za_data;
else
error = ENOENT;
error = SET_ERROR(ENOENT);
mutex_exit(&zo->zo_lock);
return (error);

View File

@ -21,6 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -73,7 +74,7 @@ zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
static int
zfs_replay_error(zfs_sb_t *zsb, lr_t *lr, boolean_t byteswap)
{
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
static void
@ -387,7 +388,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
&ip, kcred, vflg, &vsec);
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
bail:
@ -513,7 +514,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
link, &ip, kcred, vflg);
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
out:
@ -553,7 +554,7 @@ zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap)
error = zfs_rmdir(ZTOI(dzp), name, NULL, kcred, vflg);
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
iput(ZTOI(dzp));
@ -677,7 +678,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
if (written < 0)
error = -written;
else if (written < length)
error = EIO; /* short write */
error = SET_ERROR(EIO); /* short write */
iput(ZTOI(zp));
zsb->z_replay_eof = 0; /* safety */

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -344,7 +344,7 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
* Is it a valid type of object to track?
*/
if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
return (ENOENT);
return (SET_ERROR(ENOENT));
/*
* If we have a NULL data pointer
@ -353,7 +353,7 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
* use the same ids
*/
if (data == NULL)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (bonustype == DMU_OT_ZNODE) {
znode_phys_t *znp = data;
@ -429,7 +429,7 @@ zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type)
case ZFS_PROP_GROUPQUOTA:
return (zsb->z_groupquota_obj);
default:
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
@ -445,7 +445,7 @@ zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type,
uint64_t obj;
if (!dmu_objset_userspace_present(zsb->z_os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
obj = zfs_userquota_prop_to_obj(zsb, type);
if (obj == 0) {
@ -490,7 +490,7 @@ id_to_fuidstr(zfs_sb_t *zsb, const char *domain, uid_t rid,
if (domain && domain[0]) {
domainid = zfs_fuid_find_by_domain(zsb, domain, NULL, addok);
if (domainid == -1)
return (ENOENT);
return (SET_ERROR(ENOENT));
}
fuid = FUID_ENCODE(domainid, rid);
(void) sprintf(buf, "%llx", (longlong_t)fuid);
@ -508,7 +508,7 @@ zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type,
*valp = 0;
if (!dmu_objset_userspace_present(zsb->z_os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
obj = zfs_userquota_prop_to_obj(zsb, type);
if (obj == 0)
@ -536,10 +536,10 @@ zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
boolean_t fuid_dirtied;
if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zsb->z_version < ZPL_VERSION_USERSPACE)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj :
&zsb->z_groupquota_obj;
@ -670,7 +670,7 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zsb->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
goto out;
}
if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
@ -968,10 +968,10 @@ zfs_check_global_label(const char *dsname, const char *hexsl)
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (EACCES);
return (SET_ERROR(EACCES));
return (rdonly ? 0 : EACCES);
}
return (EACCES);
return (SET_ERROR(EACCES));
}
EXPORT_SYMBOL(zfs_check_global_label);
#endif /* HAVE_MLSLABEL */
@ -1127,7 +1127,7 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
rw_exit(&zsb->z_teardown_inactive_lock);
rrw_exit(&zsb->z_teardown_lock, FTAG);
return (EIO);
return (SET_ERROR(EIO));
}
/*
@ -1274,7 +1274,7 @@ zfs_domount(struct super_block *sb, void *data, int silent)
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
error = ENOMEM;
error = SET_ERROR(ENOMEM);
goto out;
}
@ -1389,7 +1389,7 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
err = zfsctl_lookup_objset(sb, objsetid, &zsb);
if (err)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zsb);
}
@ -1404,7 +1404,7 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/* A zero fid_gen means we are in the .zfs control directories */
@ -1438,7 +1438,7 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
iput(ZTOI(zp));
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
*ipp = ZTOI(zp);
@ -1548,14 +1548,14 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers)
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (newvers < zsb->z_version)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zsb->z_os)))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
@ -1615,7 +1615,7 @@ int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
const char *pname;
int error = ENOENT;
int error = SET_ERROR(ENOENT);
/*
* Look up the file system's value for the property. For the

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
@ -192,7 +192,7 @@ zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
/* Virus scan eligible files on open */
@ -200,7 +200,7 @@ zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
if (zfs_vscan(ip, cr, 0) != 0) {
ZFS_EXIT(zsb);
return (EACCES);
return (SET_ERROR(EACCES));
}
}
@ -256,7 +256,7 @@ zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
file_sz = zp->z_size;
if (noff >= file_sz) {
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (cmd == SEEK_HOLE)
@ -275,7 +275,7 @@ zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
*off = file_sz;
return (0);
}
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (noff < *off)
@ -444,7 +444,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zsb);
return (EACCES);
return (SET_ERROR(EACCES));
}
/*
@ -452,7 +452,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
*/
if (uio->uio_loffset < (offset_t)0) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -469,7 +469,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
if (mandatory_lock(ip) &&
!lock_may_read(ip, uio->uio_loffset, uio->uio_resid)) {
ZFS_EXIT(zsb);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
/*
@ -538,7 +538,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
break;
}
@ -627,7 +627,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
(uio->uio_loffset < zp->z_size))) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
zilog = zsb->z_log;
@ -638,7 +638,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -647,7 +647,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
*/
if (mandatory_lock(ip) && !lock_may_write(ip, woff, n)) {
ZFS_EXIT(zsb);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
/*
@ -694,7 +694,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
if (woff >= limit) {
zfs_range_unlock(rl);
ZFS_EXIT(zsb);
return (EFBIG);
return (SET_ERROR(EFBIG));
}
if ((woff + n) > limit || woff > (limit - n))
@ -718,7 +718,7 @@ again:
zfs_owner_overquota(zsb, zp, B_TRUE)) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
error = EDQUOT;
error = SET_ERROR(EDQUOT);
break;
}
@ -987,14 +987,14 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
* Nothing to do if the file has been removed
*/
if (zfs_zget(zsb, object, &zp) != 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
return (ENOENT);
return (SET_ERROR(ENOENT));
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
@ -1012,7 +1012,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = ENOENT;
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
@ -1039,10 +1039,10 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = ENOENT;
error = SET_ERROR(ENOENT);
#ifdef DEBUG
if (zil_fault_io) {
error = EIO;
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
@ -1135,9 +1135,9 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (!S_ISDIR(dip->i_mode)) {
return (ENOTDIR);
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (EIO);
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
@ -1160,7 +1160,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
}
if (tvp == DNLC_NO_VNODE) {
iput(tvp);
return (ENOENT);
return (SET_ERROR(ENOENT));
} else {
*vpp = tvp;
return (specvp_check(vpp, cr));
@ -1182,7 +1182,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
@ -1206,7 +1206,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
if (!S_ISDIR(dip->i_mode)) {
ZFS_EXIT(zsb);
return (ENOTDIR);
return (SET_ERROR(ENOTDIR));
}
/*
@ -1221,7 +1221,7 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
@ -1285,7 +1285,7 @@ zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
if (zsb->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
@ -1295,7 +1295,7 @@ zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
if (zsb->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & ATTR_XVATTR) {
@ -1329,7 +1329,7 @@ top:
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = EISDIR;
error = SET_ERROR(EISDIR);
ZFS_EXIT(zsb);
return (error);
}
@ -1356,7 +1356,7 @@ top:
if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -1367,7 +1367,7 @@ top:
if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
error = EDQUOT;
error = SET_ERROR(EDQUOT);
goto out;
}
@ -1426,14 +1426,14 @@ top:
* Can't truncate an existing file if in exclusive mode.
*/
if (excl) {
error = EEXIST;
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = EISDIR;
error = SET_ERROR(EISDIR);
goto out;
}
/*
@ -1558,7 +1558,7 @@ top:
* Need to use rmdir for removing directories.
*/
if (S_ISDIR(ip->i_mode)) {
error = EPERM;
error = SET_ERROR(EPERM);
goto out;
}
@ -1716,7 +1716,7 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
uid = crgetuid(cr);
if (zsb->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(dzp);
@ -1724,13 +1724,13 @@ zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (zsb->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
@ -1776,7 +1776,7 @@ top:
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zsb);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
/*
@ -1904,12 +1904,12 @@ top:
}
if (!S_ISDIR(ip->i_mode)) {
error = ENOTDIR;
error = SET_ERROR(ENOTDIR);
goto out;
}
if (ip == cwd) {
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -2094,7 +2094,7 @@ zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr)
(u_longlong_t)offset,
zap.za_integer_length,
(u_longlong_t)zap.za_num_integers);
error = ENXIO;
error = SET_ERROR(ENXIO);
goto update;
}
@ -2470,17 +2470,17 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & ATTR_XVATTR))) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
ZFS_EXIT(zsb);
return (EISDIR);
return (SET_ERROR(EISDIR));
}
if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -3116,7 +3116,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (EINVAL);
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
@ -3176,7 +3176,7 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
if (tdip->i_sb != sdip->i_sb) {
ZFS_EXIT(zsb);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
tdzp = ITOZ(tdip);
@ -3184,7 +3184,7 @@ zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
if (zsb->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
@ -3202,7 +3202,7 @@ top:
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -3347,12 +3347,12 @@ top:
*/
if (S_ISDIR(ZTOI(szp)->i_mode)) {
if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
error = ENOTDIR;
error = SET_ERROR(ENOTDIR);
goto out;
}
} else {
if (S_ISDIR(ZTOI(tzp)->i_mode)) {
error = EISDIR;
error = SET_ERROR(EISDIR);
goto out;
}
}
@ -3515,14 +3515,14 @@ zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
if (zsb->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
ZFS_EXIT(zsb);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
@ -3554,7 +3554,7 @@ top:
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zsb);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zsb->z_os);
fuid_dirtied = zsb->z_fuid_dirty;
@ -3713,12 +3713,12 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
*/
if (S_ISDIR(sip->i_mode)) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (sip->i_sb != tdip->i_sb) {
ZFS_EXIT(zsb);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
szp = ITOZ(sip);
@ -3733,13 +3733,13 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
}
if (parent == zsb->z_shares_dir) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (zsb->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zsb);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
#ifdef HAVE_PN_UTILS
if (flags & FIGNORECASE)
@ -3754,13 +3754,13 @@ zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
*/
if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
@ -4125,7 +4125,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
if (err) {
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = EIO;
err = SET_ERROR(EIO);
return (err);
}
cur_pp = pl[++page_idx];
@ -4197,18 +4197,18 @@ zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
if ((vm_flags & VM_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
ZFS_EXIT(zsb);
return (EPERM);
return (SET_ERROR(EPERM));
}
if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
ZFS_EXIT(zsb);
return (EACCES);
return (SET_ERROR(EACCES));
}
if (off < 0 || len > MAXOFFSET_T - off) {
ZFS_EXIT(zsb);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
ZFS_EXIT(zsb);
@ -4241,11 +4241,11 @@ convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
case 0:
break;
default:
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (lckdat->l_start < 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
switch (whence) {
case 1:
@ -4257,7 +4257,7 @@ convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
case 0:
break;
default:
return (EINVAL);
return (SET_ERROR(EINVAL));
}
lckdat->l_whence = (short)whence;
@ -4298,7 +4298,7 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
if (cmd != F_FREESP) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if ((error = convoff(ip, bfp, 0, offset))) {
@ -4308,7 +4308,7 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
if (bfp->l_len < 0) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -4359,7 +4359,7 @@ zfs_fid(struct inode *ip, fid_t *fidp)
if (fidp->fid_len < size) {
fidp->fid_len = size;
ZFS_EXIT(zsb);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
zfid = (zfid_short_t *)fidp;
@ -4463,7 +4463,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
int preamble, postamble;
if (xuio->xu_type != UIOTYPE_ZEROCOPY)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
@ -4476,7 +4476,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
blksz = max_blksz;
if (size < blksz || zp->z_blksz != blksz) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
* Caller requests buffers for write before knowing where the
@ -4541,7 +4541,7 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
/* avoid potential complexity of dealing with it */
if (blksz > max_blksz) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
maxsize = zp->z_size - uio->uio_loffset;
@ -4550,12 +4550,12 @@ zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
if (size < blksz) {
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
break;
default:
ZFS_EXIT(zsb);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
uio->uio_extflg = UIO_XUIO;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
@ -881,7 +881,7 @@ again:
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
iput(ip);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
@ -913,7 +913,7 @@ again:
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
if (zp->z_unlinked) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
igrab(ZTOI(zp));
*zpp = zp;
@ -941,7 +941,7 @@ again:
zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
doi.doi_bonus_type, obj_num, NULL, NULL);
if (zp == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
@ -997,7 +997,7 @@ zfs_rezget(znode_t *zp)
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
@ -1023,7 +1023,7 @@ zfs_rezget(znode_t *zp)
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
return (SET_ERROR(EIO));
}
zp->z_mode = mode;
@ -1031,7 +1031,7 @@ zfs_rezget(znode_t *zp)
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
return (SET_ERROR(EIO));
}
zp->z_unlinked = (zp->z_links == 0);
@ -1408,7 +1408,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
if (ip->i_flock && mandatory_lock(ip)) {
uint64_t length = (len ? len : zp->z_size - off);
if (!lock_may_write(ip, off, length))
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
if (len == 0) {
@ -1629,7 +1629,7 @@ zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -164,7 +164,7 @@ zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
avl_index_t where;
if (avl_find(t, dva, &where) != NULL)
return (EEXIST);
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_PUSHPAGE);
zn->zn_dva = *dva;
@ -235,7 +235,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = ECKSUM;
error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, len);
*end = (char *)dst + len;
@ -249,7 +249,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = ECKSUM;
error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
@ -1952,7 +1952,7 @@ zil_suspend(const char *osname, void **cookiep)
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -2213,7 +2213,7 @@ zil_vdev_offline(const char *osname, void *arg)
error = zil_suspend(osname, NULL);
if (error != 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
return (0);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
*/
@ -395,7 +395,7 @@ zio_decompress(zio_t *zio, void *data, uint64_t size)
if (zio->io_error == 0 &&
zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_data, data, zio->io_size, size) != 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
}
/*
@ -2061,7 +2061,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
if (arc_buf_size(abuf) != zio->io_orig_size ||
bcmp(abuf->b_data, zio->io_orig_data,
zio->io_orig_size) != 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
VERIFY(arc_buf_remove_ref(abuf, &abuf));
}
@ -2529,7 +2529,7 @@ zio_vdev_io_start(zio_t *zio)
return (ZIO_PIPELINE_STOP);
if (!vdev_accessible(vd, zio)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (ZIO_PIPELINE_STOP);
}
@ -2566,7 +2566,7 @@ zio_vdev_io_done(zio_t *zio)
if (zio->io_error) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
@ -2651,7 +2651,7 @@ zio_vdev_io_assess(zio_t *zio)
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -201,7 +202,7 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
zio_cksum_t actual_cksum, expected_cksum, verifier;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (ci->ci_eck) {
zio_eck_t *eck;
@ -216,10 +217,10 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
else if (eck->zec_magic == BSWAP_64(ZEC_MAGIC))
nused = BSWAP_64(zilc->zc_nused);
else
return (ECKSUM);
return (SET_ERROR(ECKSUM));
if (nused > size)
return (ECKSUM);
return (SET_ERROR(ECKSUM));
size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
} else {
@ -261,7 +262,7 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
info->zbc_has_cksum = 1;
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (ECKSUM);
return (SET_ERROR(ECKSUM));
if (zio_injection_enabled && !zio->io_error &&
(error = zio_handle_fault_injection(zio, ECKSUM)) != 0) {

View File

@ -27,6 +27,10 @@
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/compress.h>
#include <sys/spa.h>
@ -130,7 +134,7 @@ zio_decompress_data(enum zio_compress c, void *src, void *dst,
zio_compress_info_t *ci = &zio_compress_table[c];
if ((uint_t)c >= ZIO_COMPRESS_FUNCTIONS || ci->ci_decompress == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
return (ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level));
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -276,7 +276,7 @@ zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
break;
}
if (handler->zi_record.zi_error == ENXIO) {
ret = EIO;
ret = SET_ERROR(EIO);
break;
}
}
@ -416,7 +416,7 @@ zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
* still allowing it to be unloaded.
*/
if ((spa = spa_inject_addref(name)) == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
@ -468,7 +468,7 @@ zio_inject_list_next(int *id, char *name, size_t buflen,
(void) strncpy(name, spa_name(handler->zi_spa), buflen);
ret = 0;
} else {
ret = ENOENT;
ret = SET_ERROR(ENOENT);
}
rw_exit(&inject_lock);
@ -495,7 +495,7 @@ zio_clear_fault(int id)
if (handler == NULL) {
rw_exit(&inject_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
list_remove(&inject_handlers, handler);

View File

@ -237,14 +237,14 @@ int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > MAXOFFSET_T)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
@ -283,7 +283,7 @@ zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
bdev = bdget_disk(zv->zv_disk, 0);
if (!bdev)
return (EIO);
return (SET_ERROR(EIO));
/*
* 2.6.28 API change
* Added check_disk_size_change() helper function.
@ -320,13 +320,13 @@ zvol_set_volsize(const char *name, uint64_t volsize)
if (error != 0)
return (error);
if (readonly)
return (EROFS);
return (SET_ERROR(EROFS));
mutex_enter(&zvol_state_lock);
zv = zvol_find_by_name(name);
if (zv == NULL) {
error = ENXIO;
error = SET_ERROR(ENXIO);
goto out;
}
@ -342,12 +342,12 @@ zvol_set_volsize(const char *name, uint64_t volsize)
VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
if (readonly) {
error = EROFS;
error = SET_ERROR(EROFS);
goto out_doi;
}
if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
error = EROFS;
error = SET_ERROR(EROFS);
goto out_doi;
}
@ -372,7 +372,7 @@ zvol_check_volblocksize(uint64_t volblocksize)
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (EDOM);
return (SET_ERROR(EDOM));
return (0);
}
@ -391,12 +391,12 @@ zvol_set_volblocksize(const char *name, uint64_t volblocksize)
zv = zvol_find_by_name(name);
if (zv == NULL) {
error = ENXIO;
error = SET_ERROR(ENXIO);
goto out;
}
if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
error = EROFS;
error = SET_ERROR(EROFS);
goto out;
}
@ -409,7 +409,7 @@ zvol_set_volblocksize(const char *name, uint64_t volblocksize)
error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
volblocksize, 0, tx);
if (error == ENOTSUP)
error = EBUSY;
error = SET_ERROR(EBUSY);
dmu_tx_commit(tx);
if (error == 0)
zv->zv_volblocksize = volblocksize;
@ -453,7 +453,7 @@ zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
static int
zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
{
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
/*
@ -705,7 +705,7 @@ zvol_read(void *arg)
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
blk_end_request(req, -error, size);
}
@ -922,7 +922,7 @@ zvol_first_open(zvol_state_t *zv)
if (!mutex_owned(&spa_namespace_lock)) {
locked = mutex_tryenter(&spa_namespace_lock);
if (!locked)
return (-ERESTARTSYS);
return (-SET_ERROR(ERESTARTSYS));
}
/* lie and say we're read-only */
@ -1068,7 +1068,7 @@ zvol_ioctl(struct block_device *bdev, fmode_t mode,
int error = 0;
if (zv == NULL)
return (-ENXIO);
return (-SET_ERROR(ENXIO));
switch (cmd) {
case BLKFLSBUF:
@ -1313,7 +1313,7 @@ __zvol_snapdev_hidden(const char *name)
*atp = '\0';
error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
error = ENODEV;
error = SET_ERROR(ENODEV);
}
kmem_free(parent, MAXPATHLEN);
return (error);
@ -1333,7 +1333,7 @@ __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
zv = zvol_find_by_name(name);
if (zv) {
error = EEXIST;
error = SET_ERROR(EEXIST);
goto out;
}
@ -1363,7 +1363,7 @@ __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = EAGAIN;
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
@ -1439,10 +1439,10 @@ __zvol_remove_minor(const char *name)
zv = zvol_find_by_name(name);
if (zv == NULL)
return (ENXIO);
return (SET_ERROR(ENXIO));
if (zv->zv_open_count > 0)
return (EBUSY);
return (SET_ERROR(EBUSY));
zvol_remove(zv);
zvol_free(zv);