Add linux kernel device support

This branch contains the majority of the changes required to cleanly
intergrate with Linux style special devices (/dev/zfs).  Mainly this
means dropping all the Solaris style callbacks and replacing them
with the Linux equivilants.

This patch also adds the onexit infrastructure needed to track
some minimal state between ioctls.  Under Linux it would be easy
to do this simply using the file->private_data.  But under Solaris
they apparent need to pass the file descriptor as part of the ioctl
data and then perform a lookup in the kernel.  Once again to keep
code change to a minimum I've implemented the Solaris solution.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2010-08-26 11:44:39 -07:00
parent 47d0ed1e6f
commit 325f023544
8 changed files with 798 additions and 257 deletions

View File

@ -726,7 +726,7 @@ setup_differ_info(zfs_handle_t *zhp, const char *fromsnap,
{ {
di->zhp = zhp; di->zhp = zhp;
di->cleanupfd = open(ZFS_DEV, O_RDWR|O_EXCL); di->cleanupfd = open(ZFS_DEV, O_RDWR);
VERIFY(di->cleanupfd >= 0); VERIFY(di->cleanupfd >= 0);
if (get_snapshot_names(di, fromsnap, tosnap) != 0) if (get_snapshot_names(di, fromsnap, tosnap) != 0)

View File

@ -1413,7 +1413,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
++holdseq; ++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag), (void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq); ".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL); sdd.cleanup_fd = open(ZFS_DEV, O_RDWR);
if (sdd.cleanup_fd < 0) { if (sdd.cleanup_fd < 0) {
err = errno; err = errno;
goto stderr_out; goto stderr_out;
@ -2988,7 +2988,7 @@ zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t flags,
int cleanup_fd; int cleanup_fd;
uint64_t action_handle = 0; uint64_t action_handle = 0;
cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL); cleanup_fd = open(ZFS_DEV, O_RDWR);
VERIFY(cleanup_fd >= 0); VERIFY(cleanup_fd >= 0);
err = zfs_receive_impl(hdl, tosnap, flags, infd, NULL, NULL, err = zfs_receive_impl(hdl, tosnap, flags, infd, NULL, NULL,

View File

@ -312,8 +312,6 @@ typedef struct zfs_creat {
nvlist_t *zct_props; nvlist_t *zct_props;
} zfs_creat_t; } zfs_creat_t;
extern dev_info_t *zfs_dip;
extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr); extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr);
extern int zfs_secpolicy_rename_perms(const char *from, extern int zfs_secpolicy_rename_perms(const char *from,
const char *to, cred_t *cr); const char *to, cred_t *cr);
@ -321,28 +319,24 @@ extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr);
extern int zfs_busy(void); extern int zfs_busy(void);
extern int zfs_unmount_snap(const char *, void *); extern int zfs_unmount_snap(const char *, void *);
/* enum zfsdev_state_type {
* ZFS minor numbers can refer to either a control device instance or ZST_ONEXIT,
* a zvol. Depending on the value of zss_type, zss_data points to either ZST_ZEVENT,
* a zvol_state_t or a zfs_onexit_t. ZST_ALL,
*/
enum zfs_soft_state_type {
ZSST_ZVOL,
ZSST_CTLDEV
}; };
typedef struct zfs_soft_state { typedef struct zfsdev_state {
enum zfs_soft_state_type zss_type; list_node_t zs_next; /* next zfsdev_state_t link */
void *zss_data; struct file *zs_file; /* associated file struct */
} zfs_soft_state_t; minor_t zs_minor; /* made up minor number */
void *zs_onexit; /* onexit data */
void *zs_zevent; /* zevent data */
} zfsdev_state_t;
extern void *zfsdev_get_soft_state(minor_t minor, extern void *zfsdev_get_state(minor_t minor, enum zfsdev_state_type which);
enum zfs_soft_state_type which); extern minor_t zfsdev_getminor(struct file *filp);
extern minor_t zfsdev_minor_alloc(void); extern minor_t zfsdev_minor_alloc(void);
extern void *zfsdev_state;
extern kmutex_t zfsdev_state_lock;
#endif /* _KERNEL */ #endif /* _KERNEL */
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -65,20 +65,21 @@
#include <sys/dsl_scan.h> #include <sys/dsl_scan.h>
#include <sharefs/share.h> #include <sharefs/share.h>
#include <sys/dmu_objset.h> #include <sys/dmu_objset.h>
#include <sys/fm/util.h>
#include <linux/miscdevice.h>
#include "zfs_namecheck.h" #include "zfs_namecheck.h"
#include "zfs_prop.h" #include "zfs_prop.h"
#include "zfs_deleg.h" #include "zfs_deleg.h"
#include "zfs_comutil.h" #include "zfs_comutil.h"
extern struct modlfs zfs_modlfs; kmutex_t zfsdev_state_lock;
list_t zfsdev_state_list;
extern void zfs_init(void); extern void zfs_init(void);
extern void zfs_fini(void); extern void zfs_fini(void);
ldi_ident_t zfs_li = NULL;
dev_info_t *zfs_dip;
typedef int zfs_ioc_func_t(zfs_cmd_t *); typedef int zfs_ioc_func_t(zfs_cmd_t *);
typedef int zfs_secpolicy_func_t(zfs_cmd_t *, cred_t *); typedef int zfs_secpolicy_func_t(zfs_cmd_t *, cred_t *);
@ -591,6 +592,7 @@ zfs_secpolicy_send(zfs_cmd_t *zc, cred_t *cr)
return (error); return (error);
} }
#ifdef HAVE_ZPL
static int static int
zfs_secpolicy_deleg_share(zfs_cmd_t *zc, cred_t *cr) zfs_secpolicy_deleg_share(zfs_cmd_t *zc, cred_t *cr)
{ {
@ -614,10 +616,12 @@ zfs_secpolicy_deleg_share(zfs_cmd_t *zc, cred_t *cr)
return (dsl_deleg_access(zc->zc_name, return (dsl_deleg_access(zc->zc_name,
ZFS_DELEG_PERM_SHARE, cr)); ZFS_DELEG_PERM_SHARE, cr));
} }
#endif /* HAVE_ZPL */
int int
zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr) zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr)
{ {
#ifdef HAVE_ZPL
if (!INGLOBALZONE(curproc)) if (!INGLOBALZONE(curproc))
return (EPERM); return (EPERM);
@ -626,11 +630,15 @@ zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr)
} else { } else {
return (zfs_secpolicy_deleg_share(zc, cr)); return (zfs_secpolicy_deleg_share(zc, cr));
} }
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
int int
zfs_secpolicy_smb_acl(zfs_cmd_t *zc, cred_t *cr) zfs_secpolicy_smb_acl(zfs_cmd_t *zc, cred_t *cr)
{ {
#ifdef HAVE_ZPL
if (!INGLOBALZONE(curproc)) if (!INGLOBALZONE(curproc))
return (EPERM); return (EPERM);
@ -639,6 +647,9 @@ zfs_secpolicy_smb_acl(zfs_cmd_t *zc, cred_t *cr)
} else { } else {
return (zfs_secpolicy_deleg_share(zc, cr)); return (zfs_secpolicy_deleg_share(zc, cr));
} }
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
static int static int
@ -839,6 +850,7 @@ zfs_secpolicy_create(zfs_cmd_t *zc, cred_t *cr)
return (error); return (error);
} }
#ifdef HAVE_ZPL
static int static int
zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr) zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr)
{ {
@ -850,6 +862,7 @@ zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr)
} }
return (error); return (error);
} }
#endif /* HAVE_ZPL */
/* /*
* Policy for pool operations - create/destroy pools, add vdevs, etc. Requires * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
@ -1093,6 +1106,7 @@ put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
return (error); return (error);
} }
#ifdef HAVE_ZPL
static int static int
getzfsvfs(const char *dsname, zfsvfs_t **zfvp) getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
{ {
@ -1118,6 +1132,7 @@ getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
dmu_objset_rele(os, FTAG); dmu_objset_rele(os, FTAG);
return (error); return (error);
} }
#endif
/* /*
* Find a zfsvfs_t for a mounted filesystem, or create our own, in which * Find a zfsvfs_t for a mounted filesystem, or create our own, in which
@ -1126,6 +1141,7 @@ getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
static int static int
zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer) zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
{ {
#ifdef HAVE_ZPL
int error = 0; int error = 0;
if (getzfsvfs(name, zfvp) != 0) if (getzfsvfs(name, zfvp) != 0)
@ -1144,11 +1160,15 @@ zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
} }
} }
return (error); return (error);
#else
return ENOTSUP;
#endif
} }
static void static void
zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag) zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
{ {
#ifdef HAVE_ZPL
rrw_exit(&zfsvfs->z_teardown_lock, tag); rrw_exit(&zfsvfs->z_teardown_lock, tag);
if (zfsvfs->z_vfs) { if (zfsvfs->z_vfs) {
@ -1157,6 +1177,7 @@ zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
dmu_objset_disown(zfsvfs->z_os, zfsvfs); dmu_objset_disown(zfsvfs->z_os, zfsvfs);
zfsvfs_free(zfsvfs); zfsvfs_free(zfsvfs);
} }
#endif
} }
static int static int
@ -2063,6 +2084,7 @@ top:
static int static int
zfs_prop_set_userquota(const char *dsname, nvpair_t *pair) zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
{ {
#ifdef HAVE_ZPL
const char *propname = nvpair_name(pair); const char *propname = nvpair_name(pair);
uint64_t *valary; uint64_t *valary;
unsigned int vallen; unsigned int vallen;
@ -2103,6 +2125,9 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
} }
return (err); return (err);
#else
return ENOTSUP;
#endif
} }
/* /*
@ -2164,7 +2189,9 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0) if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
break; break;
#ifdef HAVE_ZPL
err = zfs_set_version(zfsvfs, intval); err = zfs_set_version(zfsvfs, intval);
#endif
zfsvfs_rele(zfsvfs, FTAG); zfsvfs_rele(zfsvfs, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) { if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
@ -2696,6 +2723,7 @@ zfs_ioc_get_fsacl(zfs_cmd_t *zc)
return (error); return (error);
} }
#ifdef HAVE_ZPL
/* /*
* Search the vfs list for a specified resource. Returns a pointer to it * Search the vfs list for a specified resource. Returns a pointer to it
* or NULL if no suitable entry is found. The caller of this routine * or NULL if no suitable entry is found. The caller of this routine
@ -2720,6 +2748,7 @@ zfs_get_vfs(const char *resource)
vfs_list_unlock(); vfs_list_unlock();
return (vfs_found); return (vfs_found);
} }
#endif /* HAVE_ZPL */
/* ARGSUSED */ /* ARGSUSED */
static void static void
@ -3074,6 +3103,7 @@ out:
int int
zfs_unmount_snap(const char *name, void *arg) zfs_unmount_snap(const char *name, void *arg)
{ {
#ifdef HAVE_ZPL
vfs_t *vfsp = NULL; vfs_t *vfsp = NULL;
if (arg) { if (arg) {
@ -3100,6 +3130,7 @@ zfs_unmount_snap(const char *name, void *arg)
if ((err = dounmount(vfsp, flag, kcred)) != 0) if ((err = dounmount(vfsp, flag, kcred)) != 0)
return (err); return (err);
} }
#endif /* HAVE_ZPL */
return (0); return (0);
} }
@ -3159,6 +3190,7 @@ zfs_ioc_destroy(zfs_cmd_t *zc)
static int static int
zfs_ioc_rollback(zfs_cmd_t *zc) zfs_ioc_rollback(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
dsl_dataset_t *ds, *clone; dsl_dataset_t *ds, *clone;
int error; int error;
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
@ -3232,6 +3264,9 @@ out:
if (ds) if (ds)
dsl_dataset_rele(ds, FTAG); dsl_dataset_rele(ds, FTAG);
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
/* /*
@ -3675,6 +3710,7 @@ zfs_ioc_recv(zfs_cmd_t *zc)
&zc->zc_action_handle); &zc->zc_action_handle);
if (error == 0) { if (error == 0) {
#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs = NULL; zfsvfs_t *zfsvfs = NULL;
if (getzfsvfs(tofs, &zfsvfs) == 0) { if (getzfsvfs(tofs, &zfsvfs) == 0) {
@ -3694,6 +3730,9 @@ zfs_ioc_recv(zfs_cmd_t *zc)
} else { } else {
error = dmu_recv_end(&drc); error = dmu_recv_end(&drc);
} }
#else
error = dmu_recv_end(&drc);
#endif /* HAVE_ZPL */
} }
zc->zc_cookie = off - fp->f_offset; zc->zc_cookie = off - fp->f_offset;
@ -4017,6 +4056,7 @@ zfs_ioc_promote(zfs_cmd_t *zc)
static int static int
zfs_ioc_userspace_one(zfs_cmd_t *zc) zfs_ioc_userspace_one(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
int error; int error;
@ -4032,6 +4072,9 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc)
zfsvfs_rele(zfsvfs, FTAG); zfsvfs_rele(zfsvfs, FTAG);
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
/* /*
@ -4048,6 +4091,7 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc)
static int static int
zfs_ioc_userspace_many(zfs_cmd_t *zc) zfs_ioc_userspace_many(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size; int bufsize = zc->zc_nvlist_dst_size;
@ -4072,6 +4116,9 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
zfsvfs_rele(zfsvfs, FTAG); zfsvfs_rele(zfsvfs, FTAG);
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
/* /*
@ -4084,6 +4131,7 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
static int static int
zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
objset_t *os; objset_t *os;
int error = 0; int error = 0;
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
@ -4113,6 +4161,9 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
} }
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
/* /*
@ -4122,6 +4173,7 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
* the first file system is shared. * the first file system is shared.
* Neither sharefs, nfs or smbsrv are unloadable modules. * Neither sharefs, nfs or smbsrv are unloadable modules.
*/ */
#ifdef HAVE_ZPL
int (*znfsexport_fs)(void *arg); int (*znfsexport_fs)(void *arg);
int (*zshare_fs)(enum sharefs_sys_op, share_t *, uint32_t); int (*zshare_fs)(enum sharefs_sys_op, share_t *, uint32_t);
int (*zsmbexport_fs)(void *arg, boolean_t add_share); int (*zsmbexport_fs)(void *arg, boolean_t add_share);
@ -4153,10 +4205,12 @@ zfs_init_sharefs()
} }
return (0); return (0);
} }
#endif /* HAVE_ZPL */
static int static int
zfs_ioc_share(zfs_cmd_t *zc) zfs_ioc_share(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
int error; int error;
int opcode; int opcode;
@ -4246,7 +4300,9 @@ zfs_ioc_share(zfs_cmd_t *zc)
zc->zc_share.z_sharemax); zc->zc_share.z_sharemax);
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
ace_t full_access[] = { ace_t full_access[] = {
@ -4363,6 +4419,7 @@ zfs_ioc_diff(zfs_cmd_t *zc)
/* /*
* Remove all ACL files in shares dir * Remove all ACL files in shares dir
*/ */
#ifdef HAVE_ZPL
static int static int
zfs_smb_acl_purge(znode_t *dzp) zfs_smb_acl_purge(znode_t *dzp)
{ {
@ -4381,10 +4438,12 @@ zfs_smb_acl_purge(znode_t *dzp)
zap_cursor_fini(&zc); zap_cursor_fini(&zc);
return (error); return (error);
} }
#endif /* HAVE ZPL */
static int static int
zfs_ioc_smb_acl(zfs_cmd_t *zc) zfs_ioc_smb_acl(zfs_cmd_t *zc)
{ {
#ifdef HAVE_ZPL
vnode_t *vp; vnode_t *vp;
znode_t *dzp; znode_t *dzp;
vnode_t *resourcevp = NULL; vnode_t *resourcevp = NULL;
@ -4507,6 +4566,9 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (error); return (error);
#else
return (ENOTSUP);
#endif /* HAVE_ZPL */
} }
/* /*
@ -4842,13 +4904,56 @@ pool_status_check(const char *name, zfs_ioc_namecheck_t type,
return (error); return (error);
} }
static void *
zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which)
{
zfsdev_state_t *zs;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
for (zs = list_head(&zfsdev_state_list); zs != NULL;
zs = list_next(&zfsdev_state_list, zs)) {
if (zs->zs_minor == minor) {
switch (which) {
case ZST_ONEXIT: return (zs->zs_onexit);
case ZST_ZEVENT: return (zs->zs_zevent);
case ZST_ALL: return (zs);
}
}
}
return NULL;
}
void *
zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
{
void *ptr;
mutex_enter(&zfsdev_state_lock);
ptr = zfsdev_get_state_impl(minor, which);
mutex_exit(&zfsdev_state_lock);
return ptr;
}
minor_t
zfsdev_getminor(struct file *filp)
{
ASSERT(filp != NULL);
ASSERT(filp->private_data != NULL);
return (((zfsdev_state_t *)filp->private_data)->zs_minor);
}
/* /*
* Find a free minor number. * Find a free minor number. The zfsdev_state_list is expected to
* be short since it is only a list of currently open file handles.
*/ */
minor_t minor_t
zfsdev_minor_alloc(void) zfsdev_minor_alloc(void)
{ {
static minor_t last_minor; static minor_t last_minor = 0;
minor_t m; minor_t m;
ASSERT(MUTEX_HELD(&zfsdev_state_lock)); ASSERT(MUTEX_HELD(&zfsdev_state_lock));
@ -4856,7 +4961,7 @@ zfsdev_minor_alloc(void)
for (m = last_minor + 1; m != last_minor; m++) { for (m = last_minor + 1; m != last_minor; m++) {
if (m > ZFSDEV_MAX_MINOR) if (m > ZFSDEV_MAX_MINOR)
m = 1; m = 1;
if (ddi_get_soft_state(zfsdev_state, m) == NULL) { if (zfsdev_get_state_impl(m, ZST_ALL) == NULL) {
last_minor = m; last_minor = m;
return (m); return (m);
} }
@ -4866,107 +4971,85 @@ zfsdev_minor_alloc(void)
} }
static int static int
zfs_ctldev_init(dev_t *devp) zfsdev_state_init(struct file *filp)
{ {
zfsdev_state_t *zs;
minor_t minor; minor_t minor;
zfs_soft_state_t *zs;
ASSERT(MUTEX_HELD(&zfsdev_state_lock)); ASSERT(MUTEX_HELD(&zfsdev_state_lock));
ASSERT(getminor(*devp) == 0);
minor = zfsdev_minor_alloc(); minor = zfsdev_minor_alloc();
if (minor == 0) if (minor == 0)
return (ENXIO); return (ENXIO);
if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) zs = kmem_zalloc( sizeof(zfsdev_state_t), KM_SLEEP);
return (EAGAIN); if (zs == NULL)
return (ENOMEM);
*devp = makedevice(getemajor(*devp), minor); zs->zs_file = filp;
zs->zs_minor = minor;
filp->private_data = zs;
zs = ddi_get_soft_state(zfsdev_state, minor); zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zs->zss_type = ZSST_CTLDEV; zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
zfs_onexit_init((zfs_onexit_t **)&zs->zss_data);
list_insert_tail(&zfsdev_state_list, zs);
return (0); return (0);
} }
static void static int
zfs_ctldev_destroy(zfs_onexit_t *zo, minor_t minor) zfsdev_state_destroy(struct file *filp)
{ {
zfsdev_state_t *zs;
ASSERT(MUTEX_HELD(&zfsdev_state_lock)); ASSERT(MUTEX_HELD(&zfsdev_state_lock));
ASSERT(filp->private_data != NULL);
zfs_onexit_destroy(zo); zs = filp->private_data;
ddi_soft_state_free(zfsdev_state, minor); zfs_onexit_destroy(zs->zs_onexit);
} zfs_zevent_destroy(zs->zs_zevent);
void * list_remove(&zfsdev_state_list, zs);
zfsdev_get_soft_state(minor_t minor, enum zfs_soft_state_type which) kmem_free(zs, sizeof(zfsdev_state_t));
{
zfs_soft_state_t *zp;
zp = ddi_get_soft_state(zfsdev_state, minor); return 0;
if (zp == NULL || zp->zss_type != which)
return (NULL);
return (zp->zss_data);
} }
static int static int
zfsdev_open(dev_t *devp, int flag, int otyp, cred_t *cr) zfsdev_open(struct inode *ino, struct file *filp)
{ {
int error = 0; int error;
if (getminor(*devp) != 0)
return (zvol_open(devp, flag, otyp, cr));
/* This is the control device. Allocate a new minor if requested. */
if (flag & FEXCL) {
mutex_enter(&zfsdev_state_lock);
error = zfs_ctldev_init(devp);
mutex_exit(&zfsdev_state_lock);
}
return (error);
}
static int
zfsdev_close(dev_t dev, int flag, int otyp, cred_t *cr)
{
zfs_onexit_t *zo;
minor_t minor = getminor(dev);
if (minor == 0)
return (0);
mutex_enter(&zfsdev_state_lock); mutex_enter(&zfsdev_state_lock);
zo = zfsdev_get_soft_state(minor, ZSST_CTLDEV); error = zfsdev_state_init(filp);
if (zo == NULL) {
mutex_exit(&zfsdev_state_lock);
return (zvol_close(dev, flag, otyp, cr));
}
zfs_ctldev_destroy(zo, minor);
mutex_exit(&zfsdev_state_lock); mutex_exit(&zfsdev_state_lock);
return (0); return (-error);
} }
static int static int
zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) zfsdev_release(struct inode *ino, struct file *filp)
{
int error;
mutex_enter(&zfsdev_state_lock);
error = zfsdev_state_destroy(filp);
mutex_exit(&zfsdev_state_lock);
return (-error);
}
static long
zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{ {
zfs_cmd_t *zc; zfs_cmd_t *zc;
uint_t vec; uint_t vec;
int error, rc; int error, rc, flag = 0;
minor_t minor = getminor(dev);
if (minor != 0 &&
zfsdev_get_soft_state(minor, ZSST_CTLDEV) == NULL)
return (zvol_ioctl(dev, cmd, arg, flag, cr, rvalp));
vec = cmd - ZFS_IOC; vec = cmd - ZFS_IOC;
ASSERT3U(getmajor(dev), ==, ddi_driver_major(zfs_dip));
if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0])) if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
return (EINVAL); return (-EINVAL);
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
@ -4975,7 +5058,7 @@ zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
error = EFAULT; error = EFAULT;
if ((error == 0) && !(flag & FKIOCTL)) if ((error == 0) && !(flag & FKIOCTL))
error = zfs_ioc_vec[vec].zvec_secpolicy(zc, cr); error = zfs_ioc_vec[vec].zvec_secpolicy(zc, NULL);
/* /*
* Ensure that all pool/dataset names are valid before we pass down to * Ensure that all pool/dataset names are valid before we pass down to
@ -5018,121 +5101,74 @@ zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
} }
kmem_free(zc, sizeof (zfs_cmd_t)); kmem_free(zc, sizeof (zfs_cmd_t));
return (-error);
}
#ifdef CONFIG_COMPAT
static long
zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{
return zfsdev_ioctl(filp, cmd, arg);
}
#else
#define zfs_compat_ioctl NULL
#endif
static const struct file_operations zfsdev_fops = {
.open = zfsdev_open,
.release = zfsdev_release,
.unlocked_ioctl = zfsdev_ioctl,
.compat_ioctl = zfsdev_compat_ioctl,
.owner = THIS_MODULE,
};
static struct miscdevice zfs_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = ZFS_DRIVER,
.fops = &zfsdev_fops,
};
static int
zfs_attach(void)
{
int error;
mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsdev_state_list, sizeof (zfsdev_state_t),
offsetof(zfsdev_state_t, zs_next));
error = misc_register(&zfs_misc);
if (error) {
printk(KERN_INFO "ZFS: misc_register() failed %d\n", error);
return (error); return (error);
} }
static int return (0);
zfs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) }
static void
zfs_detach(void)
{ {
if (cmd != DDI_ATTACH) int error;
return (DDI_FAILURE);
if (ddi_create_minor_node(dip, "zfs", S_IFCHR, 0, error = misc_deregister(&zfs_misc);
DDI_PSEUDO, 0) == DDI_FAILURE) if (error)
return (DDI_FAILURE); printk(KERN_INFO "ZFS: misc_deregister() failed %d\n", error);
zfs_dip = dip; mutex_destroy(&zfsdev_state_lock);
list_destroy(&zfsdev_state_list);
ddi_report_dev(dip);
return (DDI_SUCCESS);
} }
static int #ifdef HAVE_ZPL
zfs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
if (spa_busy() || zfs_busy() || zvol_busy())
return (DDI_FAILURE);
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
zfs_dip = NULL;
ddi_prop_remove_all(dip);
ddi_remove_minor_node(dip, NULL);
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
zfs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
*result = zfs_dip;
return (DDI_SUCCESS);
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)0;
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
/*
* OK, so this is a little weird.
*
* /dev/zfs is the control node, i.e. minor 0.
* /dev/zvol/[r]dsk/pool/dataset are the zvols, minor > 0.
*
* /dev/zfs has basically nothing to do except serve up ioctls,
* so most of the standard driver entry points are in zvol.c.
*/
static struct cb_ops zfs_cb_ops = {
zfsdev_open, /* open */
zfsdev_close, /* close */
zvol_strategy, /* strategy */
nodev, /* print */
zvol_dump, /* dump */
zvol_read, /* read */
zvol_write, /* write */
zfsdev_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* prop_op */
NULL, /* streamtab */
D_NEW | D_MP | D_64BIT, /* Driver compatibility flag */
CB_REV, /* version */
nodev, /* async read */
nodev, /* async write */
};
static struct dev_ops zfs_dev_ops = {
DEVO_REV, /* version */
0, /* refcnt */
zfs_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
zfs_attach, /* attach */
zfs_detach, /* detach */
nodev, /* reset */
&zfs_cb_ops, /* driver operations */
NULL, /* no bus operations */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
static struct modldrv zfs_modldrv = {
&mod_driverops,
"ZFS storage pool",
&zfs_dev_ops
};
static struct modlinkage modlinkage = {
MODREV_1,
(void *)&zfs_modlfs,
(void *)&zfs_modldrv,
NULL
};
uint_t zfs_fsyncer_key; uint_t zfs_fsyncer_key;
extern uint_t rrw_tsd_key; extern uint_t rrw_tsd_key;
#endif
#ifdef DEBUG
#define ZFS_DEBUG_STR " (DEBUG mode)"
#else
#define ZFS_DEBUG_STR ""
#endif
int int
_init(void) _init(void)
@ -5141,39 +5177,44 @@ _init(void)
spa_init(FREAD | FWRITE); spa_init(FREAD | FWRITE);
zfs_init(); zfs_init();
zvol_init();
if ((error = mod_install(&modlinkage)) != 0) { if ((error = zvol_init()) != 0)
zvol_fini(); goto out1;
zfs_fini();
spa_fini();
return (error);
}
if ((error = zfs_attach()) != 0)
goto out2;
#ifdef HAVE_ZPL
tsd_create(&zfs_fsyncer_key, NULL); tsd_create(&zfs_fsyncer_key, NULL);
tsd_create(&rrw_tsd_key, NULL); tsd_create(&rrw_tsd_key, NULL);
error = ldi_ident_from_mod(&modlinkage, &zfs_li);
ASSERT(error == 0);
mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL);
#endif /* HAVE_ZPL */
printk(KERN_NOTICE "ZFS: Loaded ZFS Filesystem v%s%s\n",
ZFS_META_VERSION, ZFS_DEBUG_STR);
return (0); return (0);
out2:
(void) zvol_fini();
out1:
zfs_fini();
spa_fini();
printk(KERN_NOTICE "ZFS: Failed to Load ZFS Filesystem v%s%s"
", rc = %d\n", ZFS_META_VERSION, ZFS_DEBUG_STR, error);
return (error);
} }
int int
_fini(void) _fini(void)
{ {
int error; zfs_detach();
if (spa_busy() || zfs_busy() || zvol_busy() || zio_injection_enabled)
return (EBUSY);
if ((error = mod_remove(&modlinkage)) != 0)
return (error);
zvol_fini(); zvol_fini();
zfs_fini(); zfs_fini();
spa_fini(); spa_fini();
#ifdef HAVE_ZPL
if (zfs_nfsshare_inited) if (zfs_nfsshare_inited)
(void) ddi_modclose(nfs_mod); (void) ddi_modclose(nfs_mod);
if (zfs_smbshare_inited) if (zfs_smbshare_inited)
@ -5181,16 +5222,21 @@ _fini(void)
if (zfs_nfsshare_inited || zfs_smbshare_inited) if (zfs_nfsshare_inited || zfs_smbshare_inited)
(void) ddi_modclose(sharefs_mod); (void) ddi_modclose(sharefs_mod);
tsd_destroy(&zfs_fsyncer_key);
ldi_ident_release(zfs_li);
zfs_li = NULL;
mutex_destroy(&zfs_share_lock); mutex_destroy(&zfs_share_lock);
tsd_destroy(&zfs_fsyncer_key);
#endif /* HAVE_ZPL */
return (error); printk(KERN_NOTICE "ZFS: Unloaded ZFS Filesystem v%s%s\n",
ZFS_META_VERSION, ZFS_DEBUG_STR);
return (0);
} }
int #ifdef HAVE_SPL
_info(struct modinfo *modinfop) spl_module_init(_init);
{ spl_module_exit(_fini);
return (mod_info(&modlinkage, modinfop));
} MODULE_DESCRIPTION("ZFS");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_LICENSE(ZFS_META_LICENSE);
#endif /* HAVE_SPL */

View File

@ -42,9 +42,10 @@
* *
* These cleanup callbacks are intended to allow for the accumulation * These cleanup callbacks are intended to allow for the accumulation
* of kernel state across multiple ioctls. User processes participate * of kernel state across multiple ioctls. User processes participate
* by opening ZFS_DEV with O_EXCL. This causes the ZFS driver to do a * simply by opening ZFS_DEV. This causes the ZFS driver to do create
* clone-open, generating a unique minor number. The process then passes * some private data for the file descriptor and generating a unique
* along that file descriptor to each ioctl that might have a cleanup operation. * minor number. The process then passes along that file descriptor to
* each ioctl that might have a cleanup operation.
* *
* Consumers of the onexit routines should call zfs_onexit_fd_hold() early * Consumers of the onexit routines should call zfs_onexit_fd_hold() early
* on to validate the given fd and add a reference to its file table entry. * on to validate the given fd and add a reference to its file table entry.
@ -106,7 +107,7 @@ zfs_onexit_destroy(zfs_onexit_t *zo)
static int static int
zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo) zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
{ {
*zo = zfsdev_get_soft_state(minor, ZSST_CTLDEV); *zo = zfsdev_get_state(minor, ZST_ONEXIT);
if (*zo == NULL) if (*zo == NULL)
return (EBADF); return (EBADF);
@ -129,7 +130,7 @@ zfs_onexit_fd_hold(int fd, minor_t *minorp)
if (fp == NULL) if (fp == NULL)
return (EBADF); return (EBADF);
*minorp = getminor(fp->f_vnode->v_rdev); *minorp = zfsdev_getminor(fp->f_file);
return (zfs_onexit_minor_to_state(*minorp, &zo)); return (zfs_onexit_minor_to_state(*minorp, &zo));
} }

443
scripts/common.sh Normal file
View File

@ -0,0 +1,443 @@
#!/bin/bash
#
# Common support functions for testing scripts. If a .script-config
# files is available it will be sourced so in-tree kernel modules and
# utilities will be used. If no .script-config can be found then the
# installed kernel modules and utilities will be used.
basedir="$(dirname $0)"
SCRIPT_CONFIG=.script-config
if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
. "${basedir}/../${SCRIPT_CONFIG}"
else
MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
fi
PROG="<define PROG>"
CLEANUP=
VERBOSE=
VERBOSE_FLAG=
FORCE=
FORCE_FLAG=
DUMP_LOG=
ERROR=
RAID0S=()
RAID10S=()
RAIDZS=()
RAIDZ2S=()
TESTS_RUN=${TESTS_RUN:-'*'}
TESTS_SKIP=${TESTS_SKIP:-}
prefix=/usr/local
exec_prefix=${prefix}
libexecdir=${exec_prefix}/libexec
pkglibexecdir=${libexecdir}/zfs
bindir=${exec_prefix}/bin
sbindir=${exec_prefix}/sbin
ETCDIR=${ETCDIR:-/etc}
DEVDIR=${DEVDIR:-/dev/disk/zpool}
ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
ZDB=${ZDB:-${sbindir}/zdb}
ZFS=${ZFS:-${sbindir}/zfs}
ZINJECT=${ZINJECT:-${sbindir}/zinject}
ZPOOL=${ZPOOL:-${sbindir}/zpool}
ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
ZTEST=${ZTEST:-${sbindir}/ztest}
ZPIOS=${ZPIOS:-${sbindir}/zpios}
COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
LDMOD=${LDMOD:-/sbin/modprobe}
LSMOD=${LSMOD:-/sbin/lsmod}
RMMOD=${RMMOD:-/sbin/rmmod}
INFOMOD=${INFOMOD:-/sbin/modinfo}
LOSETUP=${LOSETUP:-/sbin/losetup}
SYSCTL=${SYSCTL:-/sbin/sysctl}
UDEVADM=${UDEVADM:-/sbin/udevadm}
AWK=${AWK:-/usr/bin/awk}
COLOR_BLACK="\033[0;30m"
COLOR_DK_GRAY="\033[1;30m"
COLOR_BLUE="\033[0;34m"
COLOR_LT_BLUE="\033[1;34m"
COLOR_GREEN="\033[0;32m"
COLOR_LT_GREEN="\033[1;32m"
COLOR_CYAN="\033[0;36m"
COLOR_LT_CYAN="\033[1;36m"
COLOR_RED="\033[0;31m"
COLOR_LT_RED="\033[1;31m"
COLOR_PURPLE="\033[0;35m"
COLOR_LT_PURPLE="\033[1;35m"
COLOR_BROWN="\033[0;33m"
COLOR_YELLOW="\033[1;33m"
COLOR_LT_GRAY="\033[0;37m"
COLOR_WHITE="\033[1;37m"
COLOR_RESET="\033[0m"
die() {
echo -e "${PROG}: $1" >&2
exit 1
}
msg() {
if [ ${VERBOSE} ]; then
echo "$@"
fi
}
pass() {
echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
}
fail() {
echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
exit $1
}
skip() {
echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
}
spl_dump_log() {
${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
echo
echo "Dumped debug log: ${NAME}.log"
tail -n1 ${NAME}.log
echo
return 0
}
check_modules() {
local LOADED_MODULES=()
local MISSING_MODULES=()
for MOD in ${MODULES[*]}; do
local NAME=`basename $MOD .ko`
if ${LSMOD} | egrep -q "^${NAME}"; then
LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
fi
if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
fi
done
if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
ERROR="Unload these modules with '${PROG} -u':\n"
ERROR="${ERROR}${LOADED_MODULES[*]}"
return 1
fi
if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
ERROR="The following modules can not be found,"
ERROR="${ERROR} ensure your source trees are built:\n"
ERROR="${ERROR}${MISSING_MODULES[*]}"
return 1
fi
return 0
}
load_module() {
local NAME=`basename $1 .ko`
if [ ${VERBOSE} ]; then
echo "Loading ${NAME} ($@)"
fi
${LDMOD} $* || ERROR="Failed to load $1" return 1
return 0
}
load_modules() {
mkdir -p /etc/zfs
for MOD in ${MODULES[*]}; do
local NAME=`basename ${MOD} .ko`
local VALUE=
for OPT in "$@"; do
OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
if [ ${NAME} = "${OPT_NAME}" ]; then
VALUE=`echo ${OPT} | cut -f2- -d'='`
fi
done
load_module ${MOD} ${VALUE} || return 1
done
if [ ${VERBOSE} ]; then
echo "Successfully loaded ZFS module stack"
fi
return 0
}
unload_module() {
local NAME=`basename $1 .ko`
if [ ${VERBOSE} ]; then
echo "Unloading ${NAME} ($@)"
fi
${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
return 0
}
unload_modules() {
local MODULES_REVERSE=( $(echo ${MODULES[@]} |
${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
for MOD in ${MODULES_REVERSE[*]}; do
local NAME=`basename ${MOD} .ko`
local USE_COUNT=`${LSMOD} |
egrep "^${NAME} "| ${AWK} '{print $3}'`
if [ "${USE_COUNT}" = 0 ] ; then
if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
spl_dump_log
fi
unload_module ${MOD} || return 1
fi
done
if [ ${VERBOSE} ]; then
echo "Successfully unloaded ZFS module stack"
fi
return 0
}
unused_loop_device() {
for DEVICE in `ls -1 /dev/loop*`; do
${LOSETUP} ${DEVICE} &>/dev/null
if [ $? -ne 0 ]; then
echo ${DEVICE}
return
fi
done
die "Error: Unable to find unused loopback device"
}
#
# This can be slightly dangerous because the loop devices we are
# cleanup up may not be ours. However, if the devices are currently
# in use we will not be able to remove them, and we only remove
# devices which include 'zpool' in the name. So any damage we might
# do should be limited to other zfs related testing.
#
cleanup_loop_devices() {
local TMP_FILE=`mktemp`
${LOSETUP} -a | tr -d '()' >${TMP_FILE}
${AWK} -F":" -v losetup="$LOSETUP" \
'/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
rm -f ${TMP_FILE}
}
#
# The following udev helper functions assume that the provided
# udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
# disk mapping. In this mapping each CHANNEL is represented by
# the letters a-z, and the RANK is represented by the numbers
# 1-n. A CHANNEL should identify a group of RANKS which are all
# attached to a single controller, each RANK represents a disk.
# This provides a simply mechanism to locate a specific drive
# given a known hardware configuration.
#
udev_setup() {
local SRC_PATH=$1
# When running in tree manually contruct symlinks in tree to
# the proper devices. Symlinks are installed for all entires
# in the config file regardless of if that device actually
# exists. When installed as a package udev can be relied on for
# this and it will only create links for devices which exist.
if [ ${INTREE} ]; then
PWD=`pwd`
mkdir -p ${DEVDIR}/
cd ${DEVDIR}/
${AWK} '!/^#/ && /./ { system( \
"ln -f -s /dev/disk/by-path/"$2" "$1";" \
"ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
"ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
) }' $SRC_PATH
cd ${PWD}
else
DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
DST_PATH=/etc/zfs/${DST_FILE}
if [ -e ${DST_PATH} ]; then
die "Error: Config ${DST_PATH} already exists"
fi
cp ${SRC_PATH} ${DST_PATH}
if [ -f ${UDEVADM} ]; then
${UDEVADM} trigger
${UDEVADM} settle
else
/sbin/udevtrigger
/sbin/udevsettle
fi
fi
return 0
}
udev_cleanup() {
local SRC_PATH=$1
if [ ${INTREE} ]; then
PWD=`pwd`
cd ${DEVDIR}/
${AWK} '!/^#/ && /./ { system( \
"rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
cd ${PWD}
fi
return 0
}
udev_cr2d() {
local CHANNEL=`echo "obase=16; $1+96" | bc`
local RANK=$2
printf "\x${CHANNEL}${RANK}"
}
udev_raid0_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID0S=()
for RANK in `seq 1 ${RANKS}`; do
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAID0S[${IDX}]="${DEVDIR}/${DISK}"
let IDX=IDX+1
done
done
return 0
}
udev_raid10_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID10S=()
for RANK in `seq 1 ${RANKS}`; do
for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
let CHANNEL2=CHANNEL1+1
DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
RAID10S[${IDX}]="mirror ${GROUP}"
let IDX=IDX+1
done
done
return 0
}
udev_raidz_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZS=()
for RANK in `seq 1 ${RANKS}`; do
RAIDZ=("raidz")
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
done
RAIDZS[${RANK}]="${RAIDZ[*]}"
done
return 0
}
udev_raidz2_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZ2S=()
for RANK in `seq 1 ${RANKS}`; do
RAIDZ2=("raidz2")
for CHANNEL in `seq 1 ${CHANNELS}`; do
DISK=`udev_cr2d ${CHANNEL} ${RANK}`
RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
done
RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
done
return 0
}
run_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-36s " ${TEST_NUM} "${TEST_NAME}"
test_${TEST_NUM}
}
skip_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-36s " ${TEST_NUM} "${TEST_NAME}"
skip
}
run_test() {
local TEST_NUM=$1
local TEST_NAME=$2
for i in ${TESTS_SKIP[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
skip_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
if [ "${TESTS_RUN[0]}" = "*" ]; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
else
for i in ${TESTS_RUN[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
skip_one_test ${TEST_NUM} "${TEST_NAME}"
fi
}

View File

@ -26,6 +26,8 @@ RAID0S=()
RAID10S=() RAID10S=()
RAIDZS=() RAIDZS=()
RAIDZ2S=() RAIDZ2S=()
TESTS_RUN=${TESTS_RUN:-'*'}
TESTS_SKIP=${TESTS_SKIP:-}
prefix=@prefix@ prefix=@prefix@
exec_prefix=@exec_prefix@ exec_prefix=@exec_prefix@
@ -58,6 +60,24 @@ SYSCTL=${SYSCTL:-/sbin/sysctl}
UDEVADM=${UDEVADM:-/sbin/udevadm} UDEVADM=${UDEVADM:-/sbin/udevadm}
AWK=${AWK:-/usr/bin/awk} AWK=${AWK:-/usr/bin/awk}
COLOR_BLACK="\033[0;30m"
COLOR_DK_GRAY="\033[1;30m"
COLOR_BLUE="\033[0;34m"
COLOR_LT_BLUE="\033[1;34m"
COLOR_GREEN="\033[0;32m"
COLOR_LT_GREEN="\033[1;32m"
COLOR_CYAN="\033[0;36m"
COLOR_LT_CYAN="\033[1;36m"
COLOR_RED="\033[0;31m"
COLOR_LT_RED="\033[1;31m"
COLOR_PURPLE="\033[0;35m"
COLOR_LT_PURPLE="\033[1;35m"
COLOR_BROWN="\033[0;33m"
COLOR_YELLOW="\033[1;33m"
COLOR_LT_GRAY="\033[0;37m"
COLOR_WHITE="\033[1;37m"
COLOR_RESET="\033[0m"
die() { die() {
echo -e "${PROG}: $1" >&2 echo -e "${PROG}: $1" >&2
exit 1 exit 1
@ -70,14 +90,18 @@ msg() {
} }
pass() { pass() {
echo "PASS" echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
} }
fail() { fail() {
echo "FAIL ($1)" echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
exit $1 exit $1
} }
skip() {
echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
}
spl_dump_log() { spl_dump_log() {
${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
local NAME=`dmesg | tail -n 1 | cut -f5 -d' '` local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
@ -371,3 +395,44 @@ udev_raidz2_setup() {
return 0 return 0
} }
run_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-36s " ${TEST_NUM} "${TEST_NAME}"
test_${TEST_NUM}
}
skip_one_test() {
local TEST_NUM=$1
local TEST_NAME=$2
printf "%-4d %-36s " ${TEST_NUM} "${TEST_NAME}"
skip
}
run_test() {
local TEST_NUM=$1
local TEST_NAME=$2
for i in ${TESTS_SKIP[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
skip_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
if [ "${TESTS_RUN[0]}" = "*" ]; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
else
for i in ${TESTS_RUN[@]}; do
if [[ $i == ${TEST_NUM} ]] ; then
run_one_test ${TEST_NUM} "${TEST_NAME}"
return 0
fi
done
skip_one_test ${TEST_NUM} "${TEST_NAME}"
fi
}

View File

@ -29,7 +29,7 @@ OPTIONS:
EOF EOF
} }
while getopts 'hvc?' OPTION; do while getopts 'hvct:s:?' OPTION; do
case $OPTION in case $OPTION in
h) h)
usage usage
@ -41,6 +41,12 @@ while getopts 'hvc?' OPTION; do
c) c)
CLEANUP=1 CLEANUP=1
;; ;;
t)
TESTS_RUN=($OPTARG)
;;
s)
TESTS_SKIP=($OPTARG)
;;
?) ?)
usage usage
exit exit
@ -75,14 +81,12 @@ EOF
} }
# Validate persistent zpool.cache configuration. # Validate persistent zpool.cache configuration.
zconfig_test1() { test_1() {
local POOL_NAME=test1 local POOL_NAME=test1
local TMP_FILE1=`mktemp` local TMP_FILE1=`mktemp`
local TMP_FILE2=`mktemp` local TMP_FILE2=`mktemp`
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 1 - persistent zpool.cache: "
# Create a pool save its status for comparison. # Create a pool save its status for comparison.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -101,17 +105,15 @@ zconfig_test1() {
pass pass
} }
zconfig_test1 run_test 1 "persistent zpool.cache"
# Validate ZFS disk scanning and import w/out zpool.cache configuration. # Validate ZFS disk scanning and import w/out zpool.cache configuration.
zconfig_test2() { test_2() {
local POOL_NAME=test2 local POOL_NAME=test2
local TMP_FILE1=`mktemp` local TMP_FILE1=`mktemp`
local TMP_FILE2=`mktemp` local TMP_FILE2=`mktemp`
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 2 - scan disks for pools to import: "
# Create a pool save its status for comparison. # Create a pool save its status for comparison.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -135,7 +137,7 @@ zconfig_test2() {
pass pass
} }
zconfig_test2 run_test 2 "scan disks for pools to import"
zconfig_zvol_device_stat() { zconfig_zvol_device_stat() {
local EXPECT=$1 local EXPECT=$1
@ -175,7 +177,7 @@ zconfig_zvol_device_stat() {
# zpool import/export device check # zpool import/export device check
# (1 volume, 2 partitions, 1 snapshot, 1 clone) # (1 volume, 2 partitions, 1 snapshot, 1 clone)
zconfig_test3() { test_3() {
local POOL_NAME=tank local POOL_NAME=tank
local ZVOL_NAME=volume local ZVOL_NAME=volume
local SNAP_NAME=snap local SNAP_NAME=snap
@ -185,8 +187,6 @@ zconfig_test3() {
local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 3 - zpool import/export device: "
# Create a pool, volume, partition, snapshot, and clone. # Create a pool, volume, partition, snapshot, and clone.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -225,10 +225,10 @@ zconfig_test3() {
pass pass
} }
zconfig_test3 run_test 3 "zpool import/export device"
# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone) # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
zconfig_test4() { test_4() {
POOL_NAME=tank POOL_NAME=tank
ZVOL_NAME=volume ZVOL_NAME=volume
SNAP_NAME=snap SNAP_NAME=snap
@ -238,8 +238,6 @@ zconfig_test4() {
FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 4 - zpool insmod/rmmod device: "
# Create a pool, volume, snapshot, and clone # Create a pool, volume, snapshot, and clone
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -278,18 +276,16 @@ zconfig_test4() {
pass pass
} }
zconfig_test4 run_test 4 "zpool insmod/rmmod device"
# ZVOL volume sanity check # ZVOL volume sanity check
zconfig_test5() { test_5() {
local POOL_NAME=tank local POOL_NAME=tank
local ZVOL_NAME=fish local ZVOL_NAME=fish
local FULL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
local SRC_DIR=/bin/ local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 5 - zvol+ext3 volume: "
# Create a pool and volume. # Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -323,10 +319,10 @@ zconfig_test5() {
pass pass
} }
zconfig_test5 run_test 5 "zvol+ext3 volume"
# ZVOL snapshot sanity check # ZVOL snapshot sanity check
zconfig_test6() { test_6() {
local POOL_NAME=tank local POOL_NAME=tank
local ZVOL_NAME=fish local ZVOL_NAME=fish
local SNAP_NAME=pristine local SNAP_NAME=pristine
@ -335,8 +331,6 @@ zconfig_test6() {
local SRC_DIR=/bin/ local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 6 - zvol+ext2 snapshot: "
# Create a pool and volume. # Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -382,10 +376,10 @@ zconfig_test6() {
pass pass
} }
zconfig_test6 run_test 6 "zvol+ext2 snapshot"
# ZVOL clone sanity check # ZVOL clone sanity check
zconfig_test7() { test_7() {
local POOL_NAME=tank local POOL_NAME=tank
local ZVOL_NAME=fish local ZVOL_NAME=fish
local SNAP_NAME=pristine local SNAP_NAME=pristine
@ -396,8 +390,6 @@ zconfig_test7() {
local SRC_DIR=/bin/ local SRC_DIR=/bin/
local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
echo -n "test 7 - zvol+ext2 clone: "
# Create a pool and volume. # Create a pool and volume.
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
@ -464,7 +456,7 @@ zconfig_test7() {
pass pass
} }
zconfig_test7 run_test 7 "zvol+ext2 clone"
# Send/Receive sanity check # Send/Receive sanity check
test_8() { test_8() {