Align mount options handling and type/function names with OpenZFS

Refactor the temporary mount option in a way which minimizes
differences with upstream.  Additionally, replace the zfs_sb_t
type with zfsvfs_t and rename several functions to be consistent
with the upstream names.

Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #5876
This commit is contained in:
Brian Behlendorf 2017-03-13 15:08:40 -07:00 committed by GitHub
commit 09ec770c2c
24 changed files with 1807 additions and 1824 deletions

View File

@ -2602,7 +2602,7 @@ ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
mutex_enter(&zd->zd_dirobj_lock);
(void) rw_wrlock(&zd->zd_zilog_lock);
/* zfs_sb_teardown() */
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */

View File

@ -202,13 +202,13 @@ typedef struct zfs_acl_ids {
#define ZFS_ACL_PASSTHROUGH_X 5
struct znode;
struct zfs_sb;
struct zfsvfs;
#ifdef _KERNEL
int zfs_acl_ids_create(struct znode *, int, vattr_t *,
cred_t *, vsecattr_t *, zfs_acl_ids_t *);
void zfs_acl_ids_free(zfs_acl_ids_t *);
boolean_t zfs_acl_ids_overquota(struct zfs_sb *, zfs_acl_ids_t *);
boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *);
int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
int zfs_setacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
void zfs_acl_rele(void *);
@ -225,7 +225,7 @@ int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *);
int zfs_zaccess_rename(struct znode *, struct znode *,
struct znode *, struct znode *, cred_t *cr);
void zfs_acl_free(zfs_acl_t *);
int zfs_vsec_2_aclp(struct zfs_sb *, umode_t, vsecattr_t *, cred_t *,
int zfs_vsec_2_aclp(struct zfsvfs *, umode_t, vsecattr_t *, cred_t *,
struct zfs_fuid_info **, zfs_acl_t **);
int zfs_aclset_common(struct znode *, zfs_acl_t *, cred_t *, dmu_tx_t *);
uint64_t zfs_external_acl(struct znode *);

View File

@ -50,9 +50,9 @@
extern int zfs_expire_snapshot;
/* zfsctl generic functions */
extern int zfsctl_create(zfs_sb_t *zsb);
extern void zfsctl_destroy(zfs_sb_t *zsb);
extern struct inode *zfsctl_root(znode_t *zp);
extern int zfsctl_create(zfsvfs_t *);
extern void zfsctl_destroy(zfsvfs_t *);
extern struct inode *zfsctl_root(znode_t *);
extern void zfsctl_init(void);
extern void zfsctl_fini(void);
extern boolean_t zfsctl_is_node(struct inode *ip);

View File

@ -63,7 +63,7 @@ extern void zfs_rmnode(znode_t *);
extern void zfs_dl_name_switch(zfs_dirlock_t *dl, char *new, char **old);
extern boolean_t zfs_dirempty(znode_t *);
extern void zfs_unlinked_add(znode_t *, dmu_tx_t *);
extern void zfs_unlinked_drain(zfs_sb_t *);
extern void zfs_unlinked_drain(zfsvfs_t *zfsvfs);
extern int zfs_sticky_remove_access(znode_t *, znode_t *, cred_t *cr);
extern int zfs_get_xattrdir(znode_t *, struct inode **, cred_t *, int);
extern int zfs_make_xattrdir(znode_t *, vattr_t *, struct inode **, cred_t *);

View File

@ -33,7 +33,6 @@
#include <sys/zfs_vfsops.h>
#endif
#include <sys/avl.h>
#include <sys/list.h>
#ifdef __cplusplus
extern "C" {
@ -100,24 +99,24 @@ typedef struct zfs_fuid_info {
#ifdef _KERNEL
struct znode;
extern uid_t zfs_fuid_map_id(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t);
extern uid_t zfs_fuid_map_id(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t);
extern void zfs_fuid_node_add(zfs_fuid_info_t **, const char *, uint32_t,
uint64_t, uint64_t, zfs_fuid_type_t);
extern void zfs_fuid_destroy(zfs_sb_t *);
extern uint64_t zfs_fuid_create_cred(zfs_sb_t *, zfs_fuid_type_t,
extern void zfs_fuid_destroy(zfsvfs_t *);
extern uint64_t zfs_fuid_create_cred(zfsvfs_t *, zfs_fuid_type_t,
cred_t *, zfs_fuid_info_t **);
extern uint64_t zfs_fuid_create(zfs_sb_t *, uint64_t, cred_t *, zfs_fuid_type_t,
extern uint64_t zfs_fuid_create(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t,
zfs_fuid_info_t **);
extern void zfs_fuid_map_ids(struct znode *zp, cred_t *cr,
uid_t *uid, uid_t *gid);
extern zfs_fuid_info_t *zfs_fuid_info_alloc(void);
extern void zfs_fuid_info_free(zfs_fuid_info_t *);
extern boolean_t zfs_groupmember(zfs_sb_t *, uint64_t, cred_t *);
void zfs_fuid_sync(zfs_sb_t *, dmu_tx_t *);
extern int zfs_fuid_find_by_domain(zfs_sb_t *, const char *domain,
extern boolean_t zfs_groupmember(zfsvfs_t *, uint64_t, cred_t *);
void zfs_fuid_sync(zfsvfs_t *, dmu_tx_t *);
extern int zfs_fuid_find_by_domain(zfsvfs_t *, const char *domain,
char **retdomain, boolean_t addok);
extern const char *zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx);
extern void zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx);
extern const char *zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx);
extern void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
#endif
char *zfs_fuid_idx_domain(avl_tree_t *, uint32_t);

View File

@ -50,7 +50,7 @@ typedef struct zfs_rlock {
avl_tree_t zr_avl; /* avl tree of range locks */
uint64_t *zr_size; /* points to znode->z_size */
uint_t *zr_blksz; /* points to znode->z_blksz */
uint64_t *zr_max_blksz; /* points to zsb->z_max_blksz */
uint64_t *zr_max_blksz; /* points to zfsvfs->z_max_blksz */
} zfs_rlock_t;
typedef struct rl {

View File

@ -38,36 +38,46 @@
extern "C" {
#endif
struct zfs_sb;
typedef struct zfsvfs zfsvfs_t;
struct znode;
typedef struct zfs_mntopts {
char *z_osname; /* Objset name */
char *z_mntpoint; /* Primary mount point */
uint64_t z_xattr;
boolean_t z_readonly;
boolean_t z_do_readonly;
boolean_t z_setuid;
boolean_t z_do_setuid;
boolean_t z_exec;
boolean_t z_do_exec;
boolean_t z_devices;
boolean_t z_do_devices;
boolean_t z_do_xattr;
boolean_t z_atime;
boolean_t z_do_atime;
boolean_t z_relatime;
boolean_t z_do_relatime;
boolean_t z_nbmand;
boolean_t z_do_nbmand;
} zfs_mntopts_t;
/*
* This structure emulates the vfs_t from other platforms. It's purpose
* is to faciliate the handling of mount options and minimize structural
* differences between the platforms.
*/
typedef struct vfs {
struct zfsvfs *vfs_data;
char *vfs_mntpoint; /* Primary mount point */
uint64_t vfs_xattr;
boolean_t vfs_readonly;
boolean_t vfs_do_readonly;
boolean_t vfs_setuid;
boolean_t vfs_do_setuid;
boolean_t vfs_exec;
boolean_t vfs_do_exec;
boolean_t vfs_devices;
boolean_t vfs_do_devices;
boolean_t vfs_do_xattr;
boolean_t vfs_atime;
boolean_t vfs_do_atime;
boolean_t vfs_relatime;
boolean_t vfs_do_relatime;
boolean_t vfs_nbmand;
boolean_t vfs_do_nbmand;
} vfs_t;
typedef struct zfs_sb {
typedef struct zfs_mnt {
const char *mnt_osname; /* Objset name */
char *mnt_data; /* Raw mount options */
} zfs_mnt_t;
struct zfsvfs {
vfs_t *z_vfs; /* generic fs struct */
struct super_block *z_sb; /* generic super_block */
struct backing_dev_info z_bdi; /* generic backing dev info */
struct zfs_sb *z_parent; /* parent fs */
struct zfsvfs *z_parent; /* parent fs */
objset_t *z_os; /* objset reference */
zfs_mntopts_t *z_mntopts; /* passed mount options */
uint64_t z_flags; /* super_block flags */
uint64_t z_root; /* id of root znode */
uint64_t z_unlinkedobj; /* id of unlinked zapobj */
@ -117,7 +127,7 @@ typedef struct zfs_sb {
uint64_t z_hold_size; /* znode hold array size */
avl_tree_t *z_hold_trees; /* znode hold trees */
kmutex_t *z_hold_locks; /* znode hold locks */
} zfs_sb_t;
};
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
@ -178,44 +188,34 @@ typedef struct zfid_long {
extern uint_t zfs_fsyncer_key;
extern int zfs_suspend_fs(zfs_sb_t *zsb);
extern int zfs_resume_fs(zfs_sb_t *zsb, struct dsl_dataset *ds);
extern int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type,
extern int zfs_suspend_fs(zfsvfs_t *zfsvfs);
extern int zfs_resume_fs(zfsvfs_t *zfsvfs, struct dsl_dataset *ds);
extern int zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t *valuep);
extern int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type,
extern int zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
uint64_t *cookiep, void *vbuf, uint64_t *bufsizep);
extern int zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
extern int zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
const char *domain, uint64_t rid, uint64_t quota);
extern boolean_t zfs_owner_overquota(zfs_sb_t *zsb, struct znode *,
extern boolean_t zfs_owner_overquota(zfsvfs_t *zfsvfs, struct znode *,
boolean_t isgroup);
extern boolean_t zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup,
extern boolean_t zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup,
uint64_t fuid);
extern boolean_t zfs_fuid_overobjquota(zfs_sb_t *zsb, boolean_t isgroup,
extern boolean_t zfs_fuid_overobjquota(zfsvfs_t *zfsvfs, boolean_t isgroup,
uint64_t fuid);
extern int zfs_set_version(zfs_sb_t *zsb, uint64_t newvers);
extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop,
uint64_t *value);
extern zfs_mntopts_t *zfs_mntopts_alloc(void);
extern void zfs_mntopts_free(zfs_mntopts_t *zmo);
extern int zfs_sb_create(const char *name, zfs_mntopts_t *zmo,
zfs_sb_t **zsbp);
extern int zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting);
extern void zfs_sb_free(zfs_sb_t *zsb);
extern int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan,
int *objects);
extern int zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting);
extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
extern int zfsvfs_create(const char *name, zfsvfs_t **zfvp);
extern void zfsvfs_free(zfsvfs_t *zfsvfs);
extern int zfs_check_global_label(const char *dsname, const char *hexsl);
extern boolean_t zfs_is_readonly(zfs_sb_t *zsb);
extern int zfs_register_callbacks(zfs_sb_t *zsb);
extern void zfs_unregister_callbacks(zfs_sb_t *zsb);
extern int zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent);
extern boolean_t zfs_is_readonly(zfsvfs_t *zfsvfs);
extern int zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent);
extern void zfs_preumount(struct super_block *sb);
extern int zfs_umount(struct super_block *sb);
extern int zfs_remount(struct super_block *sb, int *flags, zfs_mntopts_t *zmo);
extern int zfs_root(zfs_sb_t *zsb, struct inode **ipp);
extern int zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm);
extern int zfs_statvfs(struct dentry *dentry, struct kstatfs *statp);
extern int zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp);
extern int zfs_prune(struct super_block *sb, unsigned long nr_to_scan,
int *objects);
#ifdef __cplusplus
}

View File

@ -63,7 +63,6 @@ extern int zfs_rename(struct inode *sdip, char *snm, struct inode *tdip,
char *tnm, cred_t *cr, int flags);
extern int zfs_symlink(struct inode *dip, char *name, vattr_t *vap,
char *link, struct inode **ipp, cred_t *cr, int flags);
extern int zfs_follow_link(struct dentry *dentry, struct nameidata *nd);
extern int zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr);
extern int zfs_link(struct inode *tdip, struct inode *sip,
char *name, cred_t *cr, int flags);

View File

@ -233,25 +233,25 @@ typedef struct znode_hold {
*/
#define ZTOI(znode) (&((znode)->z_inode))
#define ITOZ(inode) (container_of((inode), znode_t, z_inode))
#define ZTOZSB(znode) ((zfs_sb_t *)(ZTOI(znode)->i_sb->s_fs_info))
#define ITOZSB(inode) ((zfs_sb_t *)((inode)->i_sb->s_fs_info))
#define ZTOZSB(znode) ((zfsvfs_t *)(ZTOI(znode)->i_sb->s_fs_info))
#define ITOZSB(inode) ((zfsvfs_t *)((inode)->i_sb->s_fs_info))
#define S_ISDEV(mode) (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode))
/* Called on entry to each ZFS vnode and vfs operation */
#define ZFS_ENTER(zsb) \
#define ZFS_ENTER(zfsvfs) \
{ \
rrm_enter_read(&(zsb)->z_teardown_lock, FTAG); \
if ((zsb)->z_unmounted) { \
ZFS_EXIT(zsb); \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); \
if ((zfsvfs)->z_unmounted) { \
ZFS_EXIT(zfsvfs); \
return (EIO); \
} \
}
/* Must be called before exiting the vop */
#define ZFS_EXIT(zsb) \
#define ZFS_EXIT(zfsvfs) \
{ \
rrm_exit(&(zsb)->z_teardown_lock, FTAG); \
rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); \
}
/* Verifies the znode is valid */
@ -266,7 +266,7 @@ typedef struct znode_hold {
*/
#define ZFS_OBJ_MTX_SZ 64
#define ZFS_OBJ_MTX_MAX (1024 * 1024)
#define ZFS_OBJ_HASH(zsb, obj) ((obj) & ((zsb->z_hold_size) - 1))
#define ZFS_OBJ_HASH(zfsvfs, obj) ((obj) & ((zfsvfs->z_hold_size) - 1))
extern unsigned int zfs_object_mutex_size;
@ -291,7 +291,7 @@ extern unsigned int zfs_object_mutex_size;
#define STATE_CHANGED (ATTR_CTIME)
#define CONTENT_MODIFIED (ATTR_MTIME | ATTR_CTIME)
extern int zfs_init_fs(zfs_sb_t *, znode_t **);
extern int zfs_init_fs(zfsvfs_t *, znode_t **);
extern void zfs_set_dataprop(objset_t *);
extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *,
dmu_tx_t *tx);
@ -302,7 +302,7 @@ extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t);
extern void zfs_znode_init(void);
extern void zfs_znode_fini(void);
extern int zfs_znode_hold_compare(const void *, const void *);
extern int zfs_zget(zfs_sb_t *, uint64_t, znode_t **);
extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **);
extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
@ -343,8 +343,8 @@ extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
vsecattr_t *vsecp, zfs_fuid_info_t *fuidp);
extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx);
extern void zfs_upgrade(zfs_sb_t *zsb, dmu_tx_t *tx);
extern int zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx);
extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
extern int zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
#if defined(HAVE_UIO_RW)
extern caddr_t zfs_map_page(page_t *, enum seg_rw);

View File

@ -371,23 +371,23 @@ static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zsb),
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zsb),
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
@ -651,7 +651,7 @@ zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
* ACE FUIDs will be created later.
*/
int
zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
@ -669,7 +669,7 @@ zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zsb, acep->a_who,
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
@ -713,7 +713,7 @@ zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr,
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
@ -756,7 +756,7 @@ zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr,
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zsb, who,
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
@ -1316,7 +1316,7 @@ int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
@ -1330,11 +1330,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid));
zp->z_mode = ZTOI(zp)->i_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
@ -1345,11 +1345,11 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
/*
* Upgrade needed?
*/
if (!zsb->z_use_fuids) {
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zsb->z_version >= ZPL_VERSION_FUID))
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
@ -1362,9 +1362,9 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
@ -1372,7 +1372,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
zfs_acl_phys_t acl_phys;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
@ -1386,20 +1386,20 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zsb->z_os, aoid, tx);
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zsb->z_os,
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zsb->z_os,
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
@ -1407,7 +1407,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zsb->z_os, aoid, off,
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
@ -1417,7 +1417,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zsb->z_os,
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
@ -1446,7 +1446,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
@ -1465,7 +1465,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
}
static void
zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp)
zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t mode, zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
@ -1538,7 +1538,7 @@ zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp)
* Limit permissions to be no greater than
* group permissions
*/
if (zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) {
if (zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) {
if (!(mode & S_IRGRP))
access_mask &= ~ACE_READ_DATA;
if (!(mode & S_IWGRP))
@ -1590,11 +1590,11 @@ zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
* strip off write_owner and write_acl
*/
static void
zfs_restricted_update(zfs_sb_t *zsb, zfs_acl_t *aclp, void *acep)
zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep)
{
uint32_t mask = aclp->z_ops->ace_mask_get(acep);
if ((zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
(aclp->z_ops->ace_type_get(acep) == ALLOW)) {
mask &= ~RESTRICTED_CLEAR;
aclp->z_ops->ace_mask_set(acep, mask);
@ -1621,7 +1621,7 @@ zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp,
zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep;
@ -1639,16 +1639,16 @@ zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp,
boolean_t passthrough, passthrough_x, noallow;
passthrough_x =
zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
passthrough = passthrough_x ||
zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
noallow =
zsb->z_acl_inherit == ZFS_ACL_NOALLOW;
zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW;
*need_chmod = B_TRUE;
pacep = NULL;
aclp = zfs_acl_alloc(paclp->z_version);
if (zsb->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode))
if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
@ -1712,7 +1712,7 @@ zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp,
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
zfs_restricted_update(zsb, aclp, acep);
zfs_restricted_update(zfsvfs, aclp, acep);
continue;
}
@ -1745,7 +1745,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_acl_t *paclp;
gid_t gid = vap->va_gid;
boolean_t need_chmod = B_TRUE;
@ -1755,7 +1755,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
acl_ids->z_mode = vap->va_mode;
if (vsecp)
if ((error = zfs_vsec_2_aclp(zsb, vap->va_mode, vsecp,
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp,
cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
@ -1765,19 +1765,19 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zsb->z_replay ||
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
acl_ids->z_fuid = zfs_fuid_create(zsb, (uint64_t)vap->va_uid,
acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid,
cr, ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zsb, ZFS_OWNER,
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zsb,
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
@ -1793,13 +1793,13 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
acl_ids->z_fgid = KGID_TO_SGID(
ZTOI(dzp)->i_gid);
gid = zfs_fuid_map_id(zsb, acl_ids->z_fgid,
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zsb->z_use_fuids &&
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
&zsb->z_fuid_idx,
&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
@ -1808,7 +1808,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
acl_ids->z_fgid = zfs_fuid_create_cred(zsb,
acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
@ -1840,7 +1840,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zsb,
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
@ -1853,7 +1853,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (need_chmod) {
acl_ids->z_aclp->z_hints |= S_ISDIR(vap->va_mode) ?
ZFS_ACL_AUTO_INHERIT : 0;
zfs_acl_chmod(zsb, acl_ids->z_mode, acl_ids->z_aclp);
zfs_acl_chmod(zfsvfs, acl_ids->z_mode, acl_ids->z_aclp);
}
}
@ -1883,12 +1883,12 @@ zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
}
boolean_t
zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids)
zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids)
{
return (zfs_fuid_overquota(zsb, B_FALSE, acl_ids->z_fuid) ||
zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid) ||
zfs_fuid_overobjquota(zsb, B_FALSE, acl_ids->z_fuid) ||
zfs_fuid_overobjquota(zsb, B_TRUE, acl_ids->z_fgid));
return (zfs_fuid_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid) ||
zfs_fuid_overobjquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
zfs_fuid_overobjquota(zfsvfs, B_TRUE, acl_ids->z_fgid));
}
/*
@ -1992,7 +1992,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
}
int
zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
@ -2003,7 +2003,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zsb->z_version));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
@ -2016,7 +2016,7 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zsb, obj_mode, aclp,
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
@ -2052,8 +2052,8 @@ zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zilog_t *zilog = zsb->z_log;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
@ -2071,7 +2071,7 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zsb, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
@ -2088,13 +2088,13 @@ top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zsb->z_fuid_dirty;
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zsb, tx);
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
@ -2102,7 +2102,7 @@ top:
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zsb->z_version >= ZPL_VERSION_FUID &&
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
@ -2137,7 +2137,7 @@ top:
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zsb, tx);
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
@ -2218,7 +2218,7 @@ static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
@ -2273,7 +2273,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
who = gowner;
/*FALLTHROUGH*/
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zsb, who, cr);
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
@ -2284,7 +2284,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zsb, who, cr,
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
@ -2357,7 +2357,7 @@ static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
@ -2366,7 +2366,7 @@ zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zsb->z_replay) {
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}

View File

@ -60,12 +60,12 @@
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') all share the same
* share the same zfs_sb_t as the head filesystem (what '.zfs' lives under).
* share the same zfsvfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted on top of the '.zfs/snapshot/<snapname>' paths
* (ie: snapshots) are complete ZFS filesystems and have their own unique
* zfs_sb_t. However, the fsid reported by these mounts will be the same
* as that used by the parent zfs_sb_t to make NFS happy.
* zfsvfs_t. However, the fsid reported by these mounts will be the same
* as that used by the parent zfsvfs_t to make NFS happy.
*/
#include <sys/types.h>
@ -448,14 +448,14 @@ zfsctl_is_snapdir(struct inode *ip)
* Allocate a new inode with the passed id and ops.
*/
static struct inode *
zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id,
zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
struct timespec now = current_fs_time(zsb->z_sb);
struct timespec now = current_fs_time(zfsvfs->z_sb);
struct inode *ip;
znode_t *zp;
ip = new_inode(zsb->z_sb);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
@ -498,11 +498,11 @@ zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id,
return (NULL);
}
mutex_enter(&zsb->z_znodes_lock);
list_insert_tail(&zsb->z_all_znodes, zp);
zsb->z_nr_znodes++;
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
membar_producer();
mutex_exit(&zsb->z_znodes_lock);
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
@ -513,18 +513,18 @@ zfsctl_inode_alloc(zfs_sb_t *zsb, uint64_t id,
* Lookup the inode with given id, it will be allocated if needed.
*/
static struct inode *
zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id,
zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
const struct file_operations *fops, const struct inode_operations *ops)
{
struct inode *ip = NULL;
while (ip == NULL) {
ip = ilookup(zsb->z_sb, (unsigned long)id);
ip = ilookup(zfsvfs->z_sb, (unsigned long)id);
if (ip)
break;
/* May fail due to concurrent zfsctl_inode_alloc() */
ip = zfsctl_inode_alloc(zsb, id, fops, ops);
ip = zfsctl_inode_alloc(zfsvfs, id, fops, ops);
}
return (ip);
@ -532,7 +532,7 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id,
/*
* Create the '.zfs' directory. This directory is cached as part of the VFS
* structure. This results in a hold on the zfs_sb_t. The code in zfs_umount()
* structure. This results in a hold on the zfsvfs_t. The code in zfs_umount()
* therefore checks against a vfs_count of 2 instead of 1. This reference
* is removed when the ctldir is destroyed in the unmount. All other entities
* under the '.zfs' directory are created dynamically as needed.
@ -541,13 +541,13 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id,
* of 64-bit inode numbers this support must be disabled on 32-bit systems.
*/
int
zfsctl_create(zfs_sb_t *zsb)
zfsctl_create(zfsvfs_t *zfsvfs)
{
ASSERT(zsb->z_ctldir == NULL);
ASSERT(zfsvfs->z_ctldir == NULL);
zsb->z_ctldir = zfsctl_inode_alloc(zsb, ZFSCTL_INO_ROOT,
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root);
if (zsb->z_ctldir == NULL)
if (zfsvfs->z_ctldir == NULL)
return (SET_ERROR(ENOENT));
return (0);
@ -558,12 +558,12 @@ zfsctl_create(zfs_sb_t *zsb)
* Only called when the filesystem is unmounted.
*/
void
zfsctl_destroy(zfs_sb_t *zsb)
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
if (zsb->z_issnap) {
if (zfsvfs->z_issnap) {
zfs_snapentry_t *se;
spa_t *spa = zsb->z_os->os_spa;
uint64_t objsetid = dmu_objset_id(zsb->z_os);
spa_t *spa = zfsvfs->z_os->os_spa;
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
rw_enter(&zfs_snapshot_lock, RW_WRITER);
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid))
@ -573,9 +573,9 @@ zfsctl_destroy(zfs_sb_t *zsb)
zfsctl_snapshot_rele(se);
}
rw_exit(&zfs_snapshot_lock);
} else if (zsb->z_ctldir) {
iput(zsb->z_ctldir);
zsb->z_ctldir = NULL;
} else if (zfsvfs->z_ctldir) {
iput(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
}
@ -646,21 +646,21 @@ int
zfsctl_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ITOZSB(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int i;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
if (zfsctl_is_snapdir(ip)) {
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (zfsctl_snapdir_fid(ip, fidp));
}
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOSPC));
}
@ -675,7 +675,7 @@ zfsctl_fid(struct inode *ip, fid_t *fidp)
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (0);
}
@ -683,10 +683,10 @@ zfsctl_fid(struct inode *ip, fid_t *fidp)
* Construct a full dataset name in full_name: "pool/dataset@snap_name"
*/
static int
zfsctl_snapshot_name(zfs_sb_t *zsb, const char *snap_name, int len,
zfsctl_snapshot_name(zfsvfs_t *zfsvfs, const char *snap_name, int len,
char *full_name)
{
objset_t *os = zsb->z_os;
objset_t *os = zfsvfs->z_os;
if (zfs_component_namecheck(snap_name, NULL, NULL) != 0)
return (SET_ERROR(EILSEQ));
@ -736,17 +736,17 @@ out:
* Returns full path in full_path: "/pool/dataset/.zfs/snapshot/snap_name/"
*/
static int
zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid,
zfsctl_snapshot_path_objset(zfsvfs_t *zfsvfs, uint64_t objsetid,
int path_len, char *full_path)
{
objset_t *os = zsb->z_os;
objset_t *os = zfsvfs->z_os;
fstrans_cookie_t cookie;
char *snapname;
boolean_t case_conflict;
uint64_t id, pos = 0;
int error = 0;
if (zsb->z_mntopts->z_mntpoint == NULL)
if (zfsvfs->z_vfs->vfs_mntpoint == NULL)
return (ENOENT);
cookie = spl_fstrans_mark();
@ -754,7 +754,7 @@ zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid,
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dmu_snapshot_list_next(zsb->z_os,
error = dmu_snapshot_list_next(zfsvfs->z_os,
ZFS_MAX_DATASET_NAME_LEN, snapname, &id, &pos,
&case_conflict);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
@ -767,7 +767,7 @@ zfsctl_snapshot_path_objset(zfs_sb_t *zsb, uint64_t objsetid,
memset(full_path, 0, path_len);
snprintf(full_path, path_len - 1, "%s/.zfs/snapshot/%s",
zsb->z_mntopts->z_mntpoint, snapname);
zfsvfs->z_vfs->vfs_mntpoint, snapname);
out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
spl_fstrans_unmark(cookie);
@ -782,18 +782,18 @@ int
zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfs_sb_t *zsb = ITOZSB(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
int error = 0;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
if (strcmp(name, "..") == 0) {
*ipp = dip->i_sb->s_root->d_inode;
} else if (strcmp(name, ZFS_SNAPDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIR,
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIR,
&zpl_fops_snapdir, &zpl_ops_snapdir);
} else if (strcmp(name, ZFS_SHAREDIR_NAME) == 0) {
*ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SHARES,
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SHARES,
&zpl_fops_shares, &zpl_ops_shares);
} else {
*ipp = NULL;
@ -802,7 +802,7 @@ zfsctl_root_lookup(struct inode *dip, char *name, struct inode **ipp,
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -816,24 +816,24 @@ int
zfsctl_snapdir_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfs_sb_t *zsb = ITOZSB(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
uint64_t id;
int error;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
error = dmu_snapshot_lookup(zsb->z_os, name, &id);
error = dmu_snapshot_lookup(zfsvfs->z_os, name, &id);
if (error) {
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
*ipp = zfsctl_inode_lookup(zsb, ZFSCTL_INO_SNAPDIRS - id,
*ipp = zfsctl_inode_lookup(zfsvfs, ZFSCTL_INO_SNAPDIRS - id,
&simple_dir_operations, &simple_dir_inode_operations);
if (*ipp == NULL)
error = SET_ERROR(ENOENT);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -847,22 +847,22 @@ int
zfsctl_snapdir_rename(struct inode *sdip, char *snm,
struct inode *tdip, char *tnm, cred_t *cr, int flags)
{
zfs_sb_t *zsb = ITOZSB(sdip);
zfsvfs_t *zfsvfs = ITOZSB(sdip);
char *to, *from, *real, *fsname;
int error;
if (!zfs_admin_snapshot)
return (EACCES);
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
to = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
from = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
fsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zsb->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zsb->z_os, snm, real,
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
snm = real;
@ -871,7 +871,7 @@ zfsctl_snapdir_rename(struct inode *sdip, char *snm,
}
}
dmu_objset_name(zsb->z_os, fsname);
dmu_objset_name(zfsvfs->z_os, fsname);
error = zfsctl_snapshot_name(ITOZSB(sdip), snm,
ZFS_MAX_DATASET_NAME_LEN, from);
@ -912,7 +912,7 @@ out:
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(fsname, ZFS_MAX_DATASET_NAME_LEN);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -924,20 +924,20 @@ out:
int
zfsctl_snapdir_remove(struct inode *dip, char *name, cred_t *cr, int flags)
{
zfs_sb_t *zsb = ITOZSB(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *snapname, *real;
int error;
if (!zfs_admin_snapshot)
return (EACCES);
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
snapname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
real = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
if (zsb->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zsb->z_os, name, real,
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
error = dmu_snapshot_realname(zfsvfs->z_os, name, real,
ZFS_MAX_DATASET_NAME_LEN, NULL);
if (error == 0) {
name = real;
@ -960,7 +960,7 @@ out:
kmem_free(snapname, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(real, ZFS_MAX_DATASET_NAME_LEN);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -973,7 +973,7 @@ int
zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
struct inode **ipp, cred_t *cr, int flags)
{
zfs_sb_t *zsb = ITOZSB(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
char *dsname;
int error;
@ -987,7 +987,7 @@ zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
goto out;
}
dmu_objset_name(zsb->z_os, dsname);
dmu_objset_name(zfsvfs->z_os, dsname);
error = zfs_secpolicy_snapshot_perms(dsname, cr);
if (error != 0)
@ -1055,8 +1055,8 @@ zfsctl_snapshot_mount(struct path *path, int flags)
{
struct dentry *dentry = path->dentry;
struct inode *ip = dentry->d_inode;
zfs_sb_t *zsb;
zfs_sb_t *snap_zsb;
zfsvfs_t *zfsvfs;
zfsvfs_t *snap_zfsvfs;
zfs_snapentry_t *se;
char *full_name, *full_path;
char *argv[] = { "/usr/bin/env", "mount", "-t", "zfs", "-n", NULL, NULL,
@ -1068,13 +1068,13 @@ zfsctl_snapshot_mount(struct path *path, int flags)
if (ip == NULL)
return (EISDIR);
zsb = ITOZSB(ip);
ZFS_ENTER(zsb);
zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
full_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
full_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
error = zfsctl_snapshot_name(zsb, dname(dentry),
error = zfsctl_snapshot_name(zfsvfs, dname(dentry),
ZFS_MAX_DATASET_NAME_LEN, full_name);
if (error)
goto error;
@ -1134,14 +1134,14 @@ zfsctl_snapshot_mount(struct path *path, int flags)
spath = *path;
path_get(&spath);
if (zpl_follow_down_one(&spath)) {
snap_zsb = ITOZSB(spath.dentry->d_inode);
snap_zsb->z_parent = zsb;
snap_zfsvfs = ITOZSB(spath.dentry->d_inode);
snap_zfsvfs->z_parent = zfsvfs;
dentry = spath.dentry;
spath.mnt->mnt_flags |= MNT_SHRINKABLE;
rw_enter(&zfs_snapshot_lock, RW_WRITER);
se = zfsctl_snapshot_alloc(full_name, full_path,
snap_zsb->z_os->os_spa, dmu_objset_id(snap_zsb->z_os),
snap_zfsvfs->z_os->os_spa, dmu_objset_id(snap_zfsvfs->z_os),
dentry);
zfsctl_snapshot_add(se);
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
@ -1152,7 +1152,7 @@ error:
kmem_free(full_name, ZFS_MAX_DATASET_NAME_LEN);
kmem_free(full_path, MAXPATHLEN);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -1211,24 +1211,24 @@ int
zfsctl_shares_lookup(struct inode *dip, char *name, struct inode **ipp,
int flags, cred_t *cr, int *direntflags, pathname_t *realpnp)
{
zfs_sb_t *zsb = ITOZSB(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
struct inode *ip;
znode_t *dzp;
int error;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
if (zsb->z_shares_dir == 0) {
ZFS_EXIT(zsb);
if (zfsvfs->z_shares_dir == 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zsb, zsb->z_shares_dir, &dzp)) == 0) {
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
error = zfs_lookup(ZTOI(dzp), name, &ip, 0, cr, NULL, NULL);
iput(ZTOI(dzp));
}
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}

View File

@ -25,7 +25,6 @@
* Copyright 2017 Nexenta Systems, Inc.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
@ -66,13 +65,13 @@
* of names after deciding which is the appropriate lookup interface.
*/
static int
zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt,
zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, matchtype_t mt,
boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid)
{
boolean_t conflict = B_FALSE;
int error;
if (zsb->z_norm) {
if (zfsvfs->z_norm) {
size_t bufsz = 0;
char *buf = NULL;
@ -85,10 +84,10 @@ zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt,
* In the non-mixed case we only expect there would ever
* be one match, but we need to use the normalizing lookup.
*/
error = zap_lookup_norm(zsb->z_os, dzp->z_id, name, 8, 1,
error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1,
zoid, mt, buf, bufsz, &conflict);
} else {
error = zap_lookup(zsb->z_os, dzp->z_id, name, 8, 1, zoid);
error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid);
}
/*
@ -101,7 +100,7 @@ zfs_match_find(zfs_sb_t *zsb, znode_t *dzp, char *name, matchtype_t mt,
if (error == EOVERFLOW)
error = 0;
if (zsb->z_norm && !error && deflags)
if (zfsvfs->z_norm && !error && deflags)
*deflags = conflict ? ED_CASE_CONFLICT : 0;
*zoid = ZFS_DIRENT_OBJ(*zoid);
@ -153,7 +152,7 @@ int
zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
int flag, int *direntflags, pathname_t *realpnp)
{
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_dirlock_t *dl;
boolean_t update;
matchtype_t mt = 0;
@ -178,7 +177,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
/*
* Case sensitivity and normalization preferences are set when
* the file system is created. These are stored in the
* zsb->z_case and zsb->z_norm fields. These choices
* zfsvfs->z_case and zfsvfs->z_norm fields. These choices
* affect what vnodes can be cached in the DNLC, how we
* perform zap lookups, and the "width" of our dirlocks.
*
@ -202,7 +201,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
*
* See the table above zfs_dropname().
*/
if (zsb->z_norm != 0) {
if (zfsvfs->z_norm != 0) {
mt = MT_NORMALIZE;
/*
@ -210,9 +209,9 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* lookup, and if so keep track of that so that during
* normalization we don't fold case.
*/
if ((zsb->z_case == ZFS_CASE_INSENSITIVE &&
if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE &&
(flag & ZCIEXACT)) ||
(zsb->z_case == ZFS_CASE_MIXED && !(flag & ZCILOOK))) {
(zfsvfs->z_case == ZFS_CASE_MIXED && !(flag & ZCILOOK))) {
mt |= MT_MATCH_CASE;
}
}
@ -227,9 +226,9 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* Maybe can add TO-UPPERed version of name to dnlc in ci-only
* case for performance improvement?
*/
update = !zsb->z_norm ||
(zsb->z_case == ZFS_CASE_MIXED &&
!(zsb->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));
update = !zfsvfs->z_norm ||
(zfsvfs->z_case == ZFS_CASE_MIXED &&
!(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));
/*
* ZRENAMING indicates we are in a situation where we should
@ -242,7 +241,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
if (flag & ZRENAMING)
cmpflags = 0;
else
cmpflags = zsb->z_norm;
cmpflags = zfsvfs->z_norm;
/*
* Wait until there are no locks on this name.
@ -322,7 +321,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
* See if there's an object by this name; if so, put a hold on it.
*/
if (flag & ZXATTR) {
error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zsb), &zoid,
error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid,
sizeof (zoid));
if (error == 0)
error = (zoid == 0 ? SET_ERROR(ENOENT) : 0);
@ -343,11 +342,11 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
*zpp = VTOZ(vp);
return (0);
} else {
error = zfs_match_find(zsb, dzp, name, mt,
error = zfs_match_find(zfsvfs, dzp, name, mt,
update, direntflags, realpnp, &zoid);
}
#else
error = zfs_match_find(zsb, dzp, name, mt,
error = zfs_match_find(zfsvfs, dzp, name, mt,
update, direntflags, realpnp, &zoid);
#endif /* HAVE_DNLC */
}
@ -361,7 +360,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
zfs_dirent_unlock(dl);
return (SET_ERROR(EEXIST));
}
error = zfs_zget(zsb, zoid, zpp);
error = zfs_zget(zfsvfs, zoid, zpp);
if (error) {
zfs_dirent_unlock(dl);
return (error);
@ -430,23 +429,23 @@ zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags,
*ipp = ZTOI(dzp);
igrab(*ipp);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
/*
* If we are a snapshot mounted under .zfs, return
* the inode pointer for the snapshot directory.
*/
if ((error = sa_lookup(dzp->z_sa_hdl,
SA_ZPL_PARENT(zsb), &parent, sizeof (parent))) != 0)
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
return (error);
if (parent == dzp->z_id && zsb->z_parent != zsb) {
error = zfsctl_root_lookup(zsb->z_parent->z_ctldir,
if (parent == dzp->z_id && zfsvfs->z_parent != zfsvfs) {
error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir,
"snapshot", ipp, 0, kcred, NULL, NULL);
return (error);
}
rw_enter(&dzp->z_parent_lock, RW_READER);
error = zfs_zget(zsb, parent, &zp);
error = zfs_zget(zfsvfs, parent, &zp);
if (error == 0)
*ipp = ZTOI(zp);
rw_exit(&dzp->z_parent_lock);
@ -491,13 +490,13 @@ zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags,
void
zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
ASSERT(zp->z_unlinked);
ASSERT(ZTOI(zp)->i_nlink == 0);
VERIFY3U(0, ==,
zap_add_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));
zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
}
/*
@ -505,7 +504,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
* (force) umounted the file system.
*/
void
zfs_unlinked_drain(zfs_sb_t *zsb)
zfs_unlinked_drain(zfsvfs_t *zfsvfs)
{
zap_cursor_t zc;
zap_attribute_t zap;
@ -516,7 +515,7 @@ zfs_unlinked_drain(zfs_sb_t *zsb)
/*
* Iterate over the contents of the unlinked set.
*/
for (zap_cursor_init(&zc, zsb->z_os, zsb->z_unlinkedobj);
for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj);
zap_cursor_retrieve(&zc, &zap) == 0;
zap_cursor_advance(&zc)) {
@ -524,7 +523,8 @@ zfs_unlinked_drain(zfs_sb_t *zsb)
* See what kind of object we have in list
*/
error = dmu_object_info(zsb->z_os, zap.za_first_integer, &doi);
error = dmu_object_info(zfsvfs->z_os,
zap.za_first_integer, &doi);
if (error != 0)
continue;
@ -534,7 +534,7 @@ zfs_unlinked_drain(zfs_sb_t *zsb)
* We need to re-mark these list entries for deletion,
* so we pull them back into core and set zp->z_unlinked.
*/
error = zfs_zget(zsb, zap.za_first_integer, &zp);
error = zfs_zget(zfsvfs, zap.za_first_integer, &zp);
/*
* We may pick up znodes that are already marked for deletion.
@ -569,15 +569,15 @@ zfs_purgedir(znode_t *dzp)
zap_attribute_t zap;
znode_t *xzp;
dmu_tx_t *tx;
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_dirlock_t dl;
int skipped = 0;
int error;
for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id);
for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
error = zfs_zget(zsb,
error = zfs_zget(zfsvfs,
ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp);
if (error) {
skipped += 1;
@ -587,11 +587,11 @@ zfs_purgedir(znode_t *dzp)
ASSERT(S_ISREG(ZTOI(xzp)->i_mode) ||
S_ISLNK(ZTOI(xzp)->i_mode));
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/* Is this really needed ? */
zfs_sa_upgrade_txholds(tx, xzp);
dmu_tx_mark_netfree(tx);
@ -622,8 +622,8 @@ zfs_purgedir(znode_t *dzp)
void
zfs_rmnode(znode_t *zp)
{
zfs_sb_t *zsb = ZTOZSB(zp);
objset_t *os = zsb->z_os;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
znode_t *xzp = NULL;
dmu_tx_t *tx;
uint64_t acl_obj;
@ -672,10 +672,10 @@ zfs_rmnode(znode_t *zp)
* If the file has extended attributes, we're going to unlink
* the xattr dir.
*/
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zsb, xattr_obj, &xzp);
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT(error == 0);
}
@ -686,9 +686,9 @@ zfs_rmnode(znode_t *zp)
*/
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
if (xzp) {
dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, TRUE, NULL);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
if (acl_obj)
@ -713,7 +713,7 @@ zfs_rmnode(znode_t *zp)
xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
clear_nlink(ZTOI(xzp)); /* no more links to it */
links = 0;
VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb),
VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx));
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
@ -721,7 +721,7 @@ zfs_rmnode(znode_t *zp)
/* Remove this znode from the unlinked set */
VERIFY3U(0, ==,
zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));
zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
zfs_znode_delete(zp, tx);
@ -748,7 +748,7 @@ int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
znode_t *dzp = dl->dl_dzp;
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t value;
int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
sa_bulk_attr_t bulk[5];
@ -772,17 +772,17 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
*/
inc_nlink(ZTOI(zp));
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
}
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&dzp->z_id, sizeof (dzp->z_id));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (!(flag & ZNEW)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
@ -798,15 +798,15 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
inc_nlink(ZTOI(dzp));
links = ZTOI(dzp)->i_nlink;
count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
@ -881,7 +881,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
boolean_t *unlinkedp)
{
znode_t *dzp = dl->dl_dzp;
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
int zp_is_dir = S_ISDIR(ZTOI(zp)->i_mode);
boolean_t unlinked = B_FALSE;
sa_bulk_attr_t bulk[5];
@ -925,15 +925,15 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
clear_nlink(ZTOI(zp));
unlinked = B_TRUE;
} else {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, sizeof (zp->z_pflags));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
ctime);
}
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
count = 0;
@ -950,15 +950,15 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
if (zp_is_dir)
drop_nlink(ZTOI(dzp)); /* ".." link from zp */
links = ZTOI(dzp)->i_nlink;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &dzp->z_size, sizeof (dzp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
NULL, ctime, sizeof (ctime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
@ -987,7 +987,7 @@ zfs_dirempty(znode_t *dzp)
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
znode_t *xzp;
dmu_tx_t *tx;
int error;
@ -1005,19 +1005,19 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
&acl_ids)) != 0)
return (error);
if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zsb->z_fuid_dirty;
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zsb, tx);
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
@ -1027,19 +1027,19 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zsb, tx);
zfs_fuid_sync(zfsvfs, tx);
#ifdef DEBUG
error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zsb),
error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent));
ASSERT(error == 0 && parent == zp->z_id);
#endif
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), &xzp->z_id,
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
sizeof (xzp->z_id), tx));
if (!zp->z_unlinked)
(void) zfs_log_create(zsb->z_log, tx, TX_MKXATTR, zp,
(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
xzp, "", NULL, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
@ -1066,7 +1066,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr)
int
zfs_get_xattrdir(znode_t *zp, struct inode **xipp, cred_t *cr, int flags)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
znode_t *xzp;
zfs_dirlock_t *dl;
vattr_t va;
@ -1087,7 +1087,7 @@ top:
return (SET_ERROR(ENOENT));
}
if (zfs_is_readonly(zsb)) {
if (zfs_is_readonly(zfsvfs)) {
zfs_dirent_unlock(dl);
return (SET_ERROR(EROFS));
}
@ -1137,17 +1137,17 @@ zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
uid_t uid;
uid_t downer;
uid_t fowner;
zfs_sb_t *zsb = ZTOZSB(zdp);
zfsvfs_t *zfsvfs = ZTOZSB(zdp);
if (zsb->z_replay)
if (zfsvfs->z_replay)
return (0);
if ((zdp->z_mode & S_ISVTX) == 0)
return (0);
downer = zfs_fuid_map_id(zsb, KUID_TO_SUID(ZTOI(zdp)->i_uid),
downer = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zdp)->i_uid),
cr, ZFS_OWNER);
fowner = zfs_fuid_map_id(zsb, KUID_TO_SUID(ZTOI(zp)->i_uid),
fowner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(ZTOI(zp)->i_uid),
cr, ZFS_OWNER);
if ((uid = crgetuid(cr)) == downer || uid == fowner ||

View File

@ -46,7 +46,7 @@
* two AVL trees are created. One tree is keyed by the index number
* and the other by the domain string. Nodes are never removed from
* trees, but new entries may be added. If a new entry is added then
* the zsb->z_fuid_dirty flag is set to true and the caller will then
* the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
* be responsible for calling zfs_fuid_sync() to sync the changes to disk.
*
*/
@ -191,34 +191,34 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
* Load the fuid table(s) into memory.
*/
static void
zfs_fuid_init(zfs_sb_t *zsb)
zfs_fuid_init(zfsvfs_t *zfsvfs)
{
rw_enter(&zsb->z_fuid_lock, RW_WRITER);
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
if (zsb->z_fuid_loaded) {
rw_exit(&zsb->z_fuid_lock);
if (zfsvfs->z_fuid_loaded) {
rw_exit(&zfsvfs->z_fuid_lock);
return;
}
zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
(void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj);
if (zsb->z_fuid_obj != 0) {
zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os,
zsb->z_fuid_obj, &zsb->z_fuid_idx,
&zsb->z_fuid_domain);
(void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
if (zfsvfs->z_fuid_obj != 0) {
zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
&zfsvfs->z_fuid_domain);
}
zsb->z_fuid_loaded = B_TRUE;
rw_exit(&zsb->z_fuid_lock);
zfsvfs->z_fuid_loaded = B_TRUE;
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
* sync out AVL trees to persistent storage.
*/
void
zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
nvlist_t *nvp;
nvlist_t **fuids;
@ -229,30 +229,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
int numnodes;
int i;
if (!zsb->z_fuid_dirty) {
if (!zfsvfs->z_fuid_dirty) {
return;
}
rw_enter(&zsb->z_fuid_lock, RW_WRITER);
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
/*
* First see if table needs to be created?
*/
if (zsb->z_fuid_obj == 0) {
zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os,
if (zfsvfs->z_fuid_obj == 0) {
zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ,
VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, sizeof (uint64_t), 1,
&zsb->z_fuid_obj, tx) == 0);
&zfsvfs->z_fuid_obj, tx) == 0);
}
VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
numnodes = avl_numnodes(&zsb->z_fuid_idx);
numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++,
domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) {
for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
domnode->f_idx) == 0);
@ -270,29 +270,30 @@ zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
VERIFY(nvlist_pack(nvp, &packed, &nvsize,
NV_ENCODE_XDR, KM_SLEEP) == 0);
nvlist_free(nvp);
zsb->z_fuid_size = nvsize;
dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx);
kmem_free(packed, zsb->z_fuid_size);
VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj,
zfsvfs->z_fuid_size = nvsize;
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
zfsvfs->z_fuid_size, packed, tx);
kmem_free(packed, zfsvfs->z_fuid_size);
VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = zsb->z_fuid_size;
*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
dmu_buf_rele(db, FTAG);
zsb->z_fuid_dirty = B_FALSE;
rw_exit(&zsb->z_fuid_lock);
zfsvfs->z_fuid_dirty = B_FALSE;
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
* Query domain table for a given domain.
*
* If domain isn't found and addok is set, it is added to AVL trees and
* the zsb->z_fuid_dirty flag will be set to TRUE. It will then be
* the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
* necessary for the caller or another thread to detect the dirty table
* and sync out the changes.
*/
int
zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain,
zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
char **retdomain, boolean_t addok)
{
fuid_domain_t searchnode, *findnode;
@ -313,23 +314,23 @@ zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain,
searchnode.f_ksid = ksid_lookupdomain(domain);
if (retdomain)
*retdomain = searchnode.f_ksid->kd_name;
if (!zsb->z_fuid_loaded)
zfs_fuid_init(zsb);
if (!zfsvfs->z_fuid_loaded)
zfs_fuid_init(zfsvfs);
retry:
rw_enter(&zsb->z_fuid_lock, rw);
findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc);
rw_enter(&zfsvfs->z_fuid_lock, rw);
findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
if (findnode) {
rw_exit(&zsb->z_fuid_lock);
rw_exit(&zfsvfs->z_fuid_lock);
ksiddomain_rele(searchnode.f_ksid);
return (findnode->f_idx);
} else if (addok) {
fuid_domain_t *domnode;
uint64_t retidx;
if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) {
rw_exit(&zsb->z_fuid_lock);
if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
rw_exit(&zfsvfs->z_fuid_lock);
rw = RW_WRITER;
goto retry;
}
@ -337,15 +338,15 @@ retry:
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
domnode->f_ksid = searchnode.f_ksid;
retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1;
retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
avl_add(&zsb->z_fuid_domain, domnode);
avl_add(&zsb->z_fuid_idx, domnode);
zsb->z_fuid_dirty = B_TRUE;
rw_exit(&zsb->z_fuid_lock);
avl_add(&zfsvfs->z_fuid_domain, domnode);
avl_add(&zfsvfs->z_fuid_idx, domnode);
zfsvfs->z_fuid_dirty = B_TRUE;
rw_exit(&zfsvfs->z_fuid_lock);
return (retidx);
} else {
rw_exit(&zsb->z_fuid_lock);
rw_exit(&zfsvfs->z_fuid_lock);
return (-1);
}
}
@ -357,23 +358,23 @@ retry:
*
*/
const char *
zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx)
zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
{
char *domain;
if (idx == 0 || !zsb->z_use_fuids)
if (idx == 0 || !zfsvfs->z_use_fuids)
return (NULL);
if (!zsb->z_fuid_loaded)
zfs_fuid_init(zsb);
if (!zfsvfs->z_fuid_loaded)
zfs_fuid_init(zfsvfs);
rw_enter(&zsb->z_fuid_lock, RW_READER);
rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
if (zsb->z_fuid_obj || zsb->z_fuid_dirty)
domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx);
if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
else
domain = nulldomain;
rw_exit(&zsb->z_fuid_lock);
rw_exit(&zfsvfs->z_fuid_lock);
ASSERT(domain);
return (domain);
@ -389,7 +390,7 @@ zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
}
uid_t
zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid,
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
#ifdef HAVE_KSID
@ -400,7 +401,7 @@ zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid,
if (index == 0)
return (fuid);
domain = zfs_fuid_find_by_idx(zsb, index);
domain = zfs_fuid_find_by_idx(zfsvfs, index);
ASSERT(domain != NULL);
if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
@ -495,7 +496,7 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
* be used if it exists.
*/
uint64_t
zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
cred_t *cr, zfs_fuid_info_t **fuidp)
{
uint64_t idx;
@ -509,7 +510,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
if (!zsb->z_use_fuids || (ksid == NULL)) {
if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
if (IS_EPHEMERAL(id))
@ -532,7 +533,7 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
rid = ksid_getrid(ksid);
domain = ksid_getdomain(ksid);
idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
@ -550,10 +551,10 @@ zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
*
* During replay operations the domain+rid information is
* found in the zfs_fuid_info_t that the replay code has
* attached to the zsb of the file system.
* attached to the zfsvfs of the file system.
*/
uint64_t
zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
{
#ifdef HAVE_KSID
@ -574,11 +575,11 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
* chmod.
*/
if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
return (id);
if (zsb->z_replay) {
fuidp = zsb->z_fuid_replay;
if (zfsvfs->z_replay) {
fuidp = zfsvfs->z_fuid_replay;
/*
* If we are passed an ephemeral id, but no
@ -628,9 +629,9 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
}
}
idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
if (!zsb->z_replay)
if (!zfsvfs->z_replay)
zfs_fuid_node_add(fuidpp, kdomain,
rid, idx, id, type);
else if (zfuid != NULL) {
@ -647,15 +648,15 @@ zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
}
void
zfs_fuid_destroy(zfs_sb_t *zsb)
zfs_fuid_destroy(zfsvfs_t *zfsvfs)
{
rw_enter(&zsb->z_fuid_lock, RW_WRITER);
if (!zsb->z_fuid_loaded) {
rw_exit(&zsb->z_fuid_lock);
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
if (!zfsvfs->z_fuid_loaded) {
rw_exit(&zfsvfs->z_fuid_lock);
return;
}
zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
rw_exit(&zsb->z_fuid_lock);
zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
@ -709,7 +710,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
* Will use a straight FUID compare when possible.
*/
boolean_t
zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
{
#ifdef HAVE_KSID
ksid_t *ksid = crgetsid(cr, KSID_GROUP);
@ -733,7 +734,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
} else {
const char *domain;
domain = zfs_fuid_find_by_idx(zsb, idx);
domain = zfs_fuid_find_by_idx(zfsvfs, idx);
ASSERT(domain != NULL);
if (strcmp(domain,
@ -751,7 +752,7 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
/*
* Not found in ksidlist, check posix groups
*/
gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP);
gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
return (groupmember(gid, cr));
#else
return (B_TRUE);
@ -759,17 +760,17 @@ zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
}
void
zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx)
zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
if (zsb->z_fuid_obj == 0) {
if (zfsvfs->z_fuid_obj == 0) {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
FUID_SIZE_ESTIMATE(zsb));
FUID_SIZE_ESTIMATE(zfsvfs));
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
} else {
dmu_tx_hold_bonus(tx, zsb->z_fuid_obj);
dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0,
FUID_SIZE_ESTIMATE(zsb));
dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
FUID_SIZE_ESTIMATE(zfsvfs));
}
}
#endif

View File

@ -548,7 +548,7 @@ zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr)
/*
* If the existing dataset label is nondefault, check if the
* dataset is mounted (label cannot be changed while mounted).
* Get the zfs_sb_t; if there isn't one, then the dataset isn't
* Get the zfsvfs_t; if there isn't one, then the dataset isn't
* mounted (or isn't a dataset, doesn't exist, ...).
*/
if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) {
@ -1394,7 +1394,7 @@ put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
}
static int
get_zfs_sb(const char *dsname, zfs_sb_t **zsbp)
getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
{
objset_t *os;
int error;
@ -1408,10 +1408,10 @@ get_zfs_sb(const char *dsname, zfs_sb_t **zsbp)
}
mutex_enter(&os->os_user_ptr_lock);
*zsbp = dmu_objset_get_user(os);
*zfvp = dmu_objset_get_user(os);
/* bump s_active only when non-zero to prevent umount race */
if (*zsbp == NULL || (*zsbp)->z_sb == NULL ||
!atomic_inc_not_zero(&((*zsbp)->z_sb->s_active))) {
if (*zfvp == NULL || (*zfvp)->z_sb == NULL ||
!atomic_inc_not_zero(&((*zfvp)->z_sb->s_active))) {
error = SET_ERROR(ESRCH);
}
mutex_exit(&os->os_user_ptr_lock);
@ -1420,28 +1420,28 @@ get_zfs_sb(const char *dsname, zfs_sb_t **zsbp)
}
/*
* Find a zfs_sb_t for a mounted filesystem, or create our own, in which
* Find a zfsvfs_t for a mounted filesystem, or create our own, in which
* case its z_sb will be NULL, and it will be opened as the owner.
* If 'writer' is set, the z_teardown_lock will be held for RW_WRITER,
* which prevents all inode ops from running.
*/
static int
zfs_sb_hold(const char *name, void *tag, zfs_sb_t **zsbp, boolean_t writer)
zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
{
int error = 0;
if (get_zfs_sb(name, zsbp) != 0)
error = zfs_sb_create(name, NULL, zsbp);
if (getzfsvfs(name, zfvp) != 0)
error = zfsvfs_create(name, zfvp);
if (error == 0) {
rrm_enter(&(*zsbp)->z_teardown_lock, (writer) ? RW_WRITER :
rrm_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER :
RW_READER, tag);
if ((*zsbp)->z_unmounted) {
if ((*zfvp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
* thread should be just about to disassociate the
* objset from the zsb.
* objset from the zfsvfs.
*/
rrm_exit(&(*zsbp)->z_teardown_lock, tag);
rrm_exit(&(*zfvp)->z_teardown_lock, tag);
return (SET_ERROR(EBUSY));
}
}
@ -1449,15 +1449,15 @@ zfs_sb_hold(const char *name, void *tag, zfs_sb_t **zsbp, boolean_t writer)
}
static void
zfs_sb_rele(zfs_sb_t *zsb, void *tag)
zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
{
rrm_exit(&zsb->z_teardown_lock, tag);
rrm_exit(&zfsvfs->z_teardown_lock, tag);
if (zsb->z_sb) {
deactivate_super(zsb->z_sb);
if (zfsvfs->z_sb) {
deactivate_super(zfsvfs->z_sb);
} else {
dmu_objset_disown(zsb->z_os, zsb);
zfs_sb_free(zsb);
dmu_objset_disown(zfsvfs->z_os, zfsvfs);
zfsvfs_free(zfsvfs);
}
}
@ -2324,7 +2324,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
zfs_userquota_prop_t type;
uint64_t rid;
uint64_t quota;
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
int err;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
@ -2349,10 +2349,10 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
rid = valary[1];
quota = valary[2];
err = zfs_sb_hold(dsname, FTAG, &zsb, B_FALSE);
err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
if (err == 0) {
err = zfs_set_userquota(zsb, type, domain, rid, quota);
zfs_sb_rele(zsb, FTAG);
err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
zfsvfs_rele(zfsvfs, FTAG);
}
return (err);
@ -2429,13 +2429,13 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
break;
case ZFS_PROP_VERSION:
{
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
if ((err = zfs_sb_hold(dsname, FTAG, &zsb, B_TRUE)) != 0)
if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
break;
err = zfs_set_version(zsb, intval);
zfs_sb_rele(zsb, FTAG);
err = zfs_set_version(zfsvfs, intval);
zfsvfs_rele(zfsvfs, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
zfs_cmd_t *zc;
@ -3640,23 +3640,23 @@ zfs_ioc_destroy(zfs_cmd_t *zc)
static int
zfs_ioc_rollback(const char *fsname, nvlist_t *args, nvlist_t *outnvl)
{
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
zvol_state_t *zv;
int error;
if (get_zfs_sb(fsname, &zsb) == 0) {
if (getzfsvfs(fsname, &zfsvfs) == 0) {
dsl_dataset_t *ds;
ds = dmu_objset_ds(zsb->z_os);
error = zfs_suspend_fs(zsb);
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
int resume_err;
error = dsl_dataset_rollback(fsname, zsb, outnvl);
resume_err = zfs_resume_fs(zsb, ds);
error = dsl_dataset_rollback(fsname, zfsvfs, outnvl);
resume_err = zfs_resume_fs(zfsvfs, ds);
error = error ? error : resume_err;
}
deactivate_super(zsb->z_sb);
deactivate_super(zfsvfs->z_sb);
} else if ((zv = zvol_suspend(fsname)) != NULL) {
error = dsl_dataset_rollback(fsname, zvol_tag(zv), outnvl);
zvol_resume(zv);
@ -4246,25 +4246,25 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin,
action_handle);
if (error == 0) {
zfs_sb_t *zsb = NULL;
zfsvfs_t *zfsvfs = NULL;
zvol_state_t *zv = NULL;
if (get_zfs_sb(tofs, &zsb) == 0) {
if (getzfsvfs(tofs, &zfsvfs) == 0) {
/* online recv */
dsl_dataset_t *ds;
int end_err;
ds = dmu_objset_ds(zsb->z_os);
error = zfs_suspend_fs(zsb);
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
/*
* If the suspend fails, then the recv_end will
* likely also fail, and clean up after itself.
*/
end_err = dmu_recv_end(&drc, zsb);
end_err = dmu_recv_end(&drc, zfsvfs);
if (error == 0)
error = zfs_resume_fs(zsb, ds);
error = zfs_resume_fs(zfsvfs, ds);
error = error ? error : end_err;
deactivate_super(zsb->z_sb);
deactivate_super(zfsvfs->z_sb);
} else if ((zv = zvol_suspend(tofs)) != NULL) {
error = dmu_recv_end(&drc, zvol_tag(zv));
zvol_resume(zv);
@ -4869,19 +4869,19 @@ zfs_ioc_promote(zfs_cmd_t *zc)
static int
zfs_ioc_userspace_one(zfs_cmd_t *zc)
{
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
int error;
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE);
error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
error = zfs_userspace_one(zsb,
error = zfs_userspace_one(zfsvfs,
zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie);
zfs_sb_rele(zsb, FTAG);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
@ -4900,7 +4900,7 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc)
static int
zfs_ioc_userspace_many(zfs_cmd_t *zc)
{
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size;
int error;
void *buf;
@ -4908,13 +4908,13 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
if (bufsize <= 0)
return (SET_ERROR(ENOMEM));
error = zfs_sb_hold(zc->zc_name, FTAG, &zsb, B_FALSE);
error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
buf = vmem_alloc(bufsize, KM_SLEEP);
error = zfs_userspace_many(zsb, zc->zc_objset_type, &zc->zc_cookie,
error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
buf, &zc->zc_nvlist_dst_size);
if (error == 0) {
@ -4923,7 +4923,7 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc)
zc->zc_nvlist_dst_size);
}
vmem_free(buf, bufsize);
zfs_sb_rele(zsb, FTAG);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
@ -4940,10 +4940,10 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{
objset_t *os;
int error = 0;
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
if (get_zfs_sb(zc->zc_name, &zsb) == 0) {
if (!dmu_objset_userused_enabled(zsb->z_os)) {
if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
if (!dmu_objset_userused_enabled(zfsvfs->z_os)) {
/*
* If userused is not enabled, it may be because the
* objset needs to be closed & reopened (to grow the
@ -4951,17 +4951,17 @@ zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
*/
dsl_dataset_t *ds;
ds = dmu_objset_ds(zsb->z_os);
error = zfs_suspend_fs(zsb);
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
dmu_objset_refresh_ownership(zsb->z_os,
zsb);
error = zfs_resume_fs(zsb, ds);
dmu_objset_refresh_ownership(zfsvfs->z_os,
zfsvfs);
error = zfs_resume_fs(zfsvfs, ds);
}
}
if (error == 0)
error = dmu_objset_userspace_upgrade(zsb->z_os);
deactivate_super(zsb->z_sb);
error = dmu_objset_userspace_upgrade(zfsvfs->z_os);
deactivate_super(zfsvfs->z_sb);
} else {
/* XXX kind of reading contents without owning */
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
@ -5127,10 +5127,10 @@ zfs_smb_acl_purge(znode_t *dzp)
{
zap_cursor_t zc;
zap_attribute_t zap;
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
int error;
for (zap_cursor_init(&zc, zsb->z_os, dzp->z_id);
for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
if ((error = VOP_REMOVE(ZTOV(dzp), zap.za_name, kcred,
@ -5150,7 +5150,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
znode_t *dzp;
vnode_t *resourcevp = NULL;
znode_t *sharedir;
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
nvlist_t *nvlist;
char *src, *target;
vattr_t vattr;
@ -5171,17 +5171,17 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
}
dzp = VTOZ(vp);
zsb = ZTOZSB(dzp);
ZFS_ENTER(zsb);
zfsvfs = ZTOZSB(dzp);
ZFS_ENTER(zfsvfs);
/*
* Create share dir if its missing.
*/
mutex_enter(&zsb->z_lock);
if (zsb->z_shares_dir == 0) {
mutex_enter(&zfsvfs->z_lock);
if (zfsvfs->z_shares_dir == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, TRUE,
ZFS_SHARES_DIR);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
@ -5189,22 +5189,22 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
if (error != 0) {
dmu_tx_abort(tx);
} else {
error = zfs_create_share_dir(zsb, tx);
error = zfs_create_share_dir(zfsvfs, tx);
dmu_tx_commit(tx);
}
if (error != 0) {
mutex_exit(&zsb->z_lock);
mutex_exit(&zfsvfs->z_lock);
VN_RELE(vp);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
}
mutex_exit(&zsb->z_lock);
mutex_exit(&zfsvfs->z_lock);
ASSERT(zsb->z_shares_dir);
if ((error = zfs_zget(zsb, zsb->z_shares_dir, &sharedir)) != 0) {
ASSERT(zfsvfs->z_shares_dir);
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &sharedir)) != 0) {
VN_RELE(vp);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -5236,7 +5236,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
zc->zc_nvlist_src_size, zc->zc_iflags, &nvlist)) != 0) {
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
if (nvlist_lookup_string(nvlist, ZFS_SMB_ACL_SRC, &src) ||
@ -5244,7 +5244,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
&target)) {
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
nvlist_free(nvlist);
return (error);
}
@ -5265,7 +5265,7 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc)
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
#else

View File

@ -72,7 +72,7 @@ zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
/* ARGSUSED */
static int
zfs_replay_error(zfs_sb_t *zsb, lr_t *lr, boolean_t byteswap)
zfs_replay_error(zfsvfs_t *zfsvfs, lr_t *lr, boolean_t byteswap)
{
return (SET_ERROR(ENOTSUP));
}
@ -265,7 +265,8 @@ zfs_replay_swap_attrs(lr_attr_t *lrattr)
* as option FUID information.
*/
static int
zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
zfs_replay_create_acl(zfsvfs_t *zfsvfs,
lr_acl_create_t *lracl, boolean_t byteswap)
{
char *name = NULL; /* location determined later */
lr_create_t *lr = (lr_create_t *)lracl;
@ -303,7 +304,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
}
}
if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
@ -325,7 +326,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dmu_object_info(zsb->z_os, lr->lr_foid, NULL);
error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL);
if (error != ENOENT)
goto bail;
@ -336,7 +337,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zsb->z_fuid_replay = zfs_replay_fuids(fuidstart,
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
/*FALLTHROUGH*/
@ -352,10 +353,10 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zsb->z_fuid_replay == NULL) {
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
@ -368,7 +369,7 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zsb->z_fuid_replay = zfs_replay_fuids(fuidstart,
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
/*FALLTHROUGH*/
@ -383,10 +384,10 @@ zfs_replay_create_acl(zfs_sb_t *zsb, lr_acl_create_t *lracl, boolean_t byteswap)
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zsb->z_fuid_replay == NULL) {
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
@ -404,15 +405,15 @@ bail:
iput(ZTOI(dzp));
if (zsb->z_fuid_replay)
zfs_fuid_info_free(zsb->z_fuid_replay);
zsb->z_fuid_replay = NULL;
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap)
{
char *name = NULL; /* location determined later */
char *link; /* symlink content follows name */
@ -437,7 +438,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
}
if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
@ -459,7 +460,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dmu_object_info(zsb->z_os, objid, NULL);
error = dmu_object_info(zfsvfs->z_os, objid, NULL);
if (error != ENOENT)
goto out;
@ -476,7 +477,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
(int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR &&
(int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) {
start = (lr + 1);
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
}
@ -487,7 +488,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
@ -505,7 +506,7 @@ zfs_replay_create(zfs_sb_t *zsb, lr_create_t *lr, boolean_t byteswap)
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
@ -537,14 +538,14 @@ out:
iput(ZTOI(dzp));
if (zsb->z_fuid_replay)
zfs_fuid_info_free(zsb->z_fuid_replay);
zsb->z_fuid_replay = NULL;
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap)
zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap)
{
char *name = (char *)(lr + 1); /* name follows lr_remove_t */
znode_t *dzp;
@ -554,7 +555,7 @@ zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if (lr->lr_common.lrc_txtype & TX_CI)
@ -577,7 +578,7 @@ zfs_replay_remove(zfs_sb_t *zsb, lr_remove_t *lr, boolean_t byteswap)
}
static int
zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap)
zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap)
{
char *name = (char *)(lr + 1); /* name follows lr_link_t */
znode_t *dzp, *zp;
@ -587,10 +588,10 @@ zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if ((error = zfs_zget(zsb, lr->lr_link_obj, &zp)) != 0) {
if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) {
iput(ZTOI(dzp));
return (error);
}
@ -607,7 +608,7 @@ zfs_replay_link(zfs_sb_t *zsb, lr_link_t *lr, boolean_t byteswap)
}
static int
zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap)
zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
{
char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
char *tname = sname + strlen(sname) + 1;
@ -618,10 +619,10 @@ zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_sdoid, &sdzp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0)
return (error);
if ((error = zfs_zget(zsb, lr->lr_tdoid, &tdzp)) != 0) {
if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) {
iput(ZTOI(sdzp));
return (error);
}
@ -638,7 +639,7 @@ zfs_replay_rename(zfs_sb_t *zsb, lr_rename_t *lr, boolean_t byteswap)
}
static int
zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
zfs_replay_write(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap)
{
char *data = (char *)(lr + 1); /* data follows lr_write_t */
znode_t *zp;
@ -648,7 +649,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0) {
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
/*
* As we can log writes out of order, it's possible the
* file has been removed. In this case just drop the write
@ -671,10 +672,10 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
* write needs to be there. So we write the whole block and
* reduce the eof. This needs to be done within the single dmu
* transaction created within vn_rdwr -> zfs_write. So a possible
* new end of file is passed through in zsb->z_replay_eof
* new end of file is passed through in zfsvfs->z_replay_eof
*/
zsb->z_replay_eof = 0; /* 0 means don't change end of file */
zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
@ -684,7 +685,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
length = blocksize;
}
if (zp->z_size < eod)
zsb->z_replay_eof = eod;
zfsvfs->z_replay_eof = eod;
}
written = zpl_write_common(ZTOI(zp), data, length, &offset,
@ -695,7 +696,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
error = SET_ERROR(EIO); /* short write */
iput(ZTOI(zp));
zsb->z_replay_eof = 0; /* safety */
zfsvfs->z_replay_eof = 0; /* safety */
return (error);
}
@ -707,7 +708,7 @@ zfs_replay_write(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
* the file is grown.
*/
static int
zfs_replay_write2(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
zfs_replay_write2(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap)
{
znode_t *zp;
int error;
@ -716,13 +717,13 @@ zfs_replay_write2(zfs_sb_t *zsb, lr_write_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zsb->z_os);
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
@ -737,11 +738,11 @@ top:
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zsb->z_log, tx);
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
@ -752,7 +753,7 @@ top:
}
static int
zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap)
zfs_replay_truncate(zfsvfs_t *zfsvfs, lr_truncate_t *lr, boolean_t byteswap)
{
znode_t *zp;
flock64_t fl;
@ -761,7 +762,7 @@ zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap)
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&fl, sizeof (fl));
@ -779,7 +780,7 @@ zfs_replay_truncate(zfs_sb_t *zsb, lr_truncate_t *lr, boolean_t byteswap)
}
static int
zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap)
zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap)
{
znode_t *zp;
xvattr_t xva;
@ -792,11 +793,11 @@ zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((lr->lr_mask & ATTR_XVATTR) &&
zsb->z_version >= ZPL_VERSION_INITIAL)
zfsvfs->z_version >= ZPL_VERSION_INITIAL)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
@ -820,20 +821,20 @@ zfs_replay_setattr(zfs_sb_t *zsb, lr_setattr_t *lr, boolean_t byteswap)
} else
xva.xva_vattr.va_mask &= ~ATTR_XVATTR;
zsb->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
error = zfs_setattr(ZTOI(zp), vap, 0, kcred);
zfs_fuid_info_free(zsb->z_fuid_replay);
zsb->z_fuid_replay = NULL;
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
iput(ZTOI(zp));
return (error);
}
static int
zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap)
zfs_replay_acl_v0(zfsvfs_t *zfsvfs, lr_acl_v0_t *lr, boolean_t byteswap)
{
ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
vsecattr_t vsa;
@ -845,7 +846,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap)
zfs_oldace_byteswap(ace, lr->lr_aclcnt);
}
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
@ -877,7 +878,7 @@ zfs_replay_acl_v0(zfs_sb_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap)
*
*/
static int
zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap)
zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap)
{
ace_t *ace = (ace_t *)(lr + 1);
vsecattr_t vsa;
@ -894,7 +895,7 @@ zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap)
}
}
if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
@ -908,17 +909,17 @@ zfs_replay_acl(zfs_sb_t *zsb, lr_acl_t *lr, boolean_t byteswap)
void *fuidstart = (caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes);
zsb->z_fuid_replay =
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart, &fuidstart,
lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
}
error = zfs_setsecattr(ZTOI(zp), &vsa, 0, kcred);
if (zsb->z_fuid_replay)
zfs_fuid_info_free(zsb->z_fuid_replay);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zsb->z_fuid_replay = NULL;
zfsvfs->z_fuid_replay = NULL;
iput(ZTOI(zp));
return (error);

View File

@ -120,13 +120,13 @@ zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
void
zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
xoptattr_t *xoap;
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa) {
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb),
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp)) != 0)
return;
@ -154,13 +154,13 @@ zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
void
zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
xoptattr_t *xoap;
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa)
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zsb),
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp), tx));
else {
@ -177,7 +177,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
&zp->z_pflags, sizeof (uint64_t), tx));
}
}
@ -185,7 +185,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
int
zfs_sa_get_xattr(znode_t *zp)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char *obj;
int size;
int error;
@ -194,7 +194,7 @@ zfs_sa_get_xattr(znode_t *zp)
ASSERT(!zp->z_xattr_cached);
ASSERT(zp->z_is_sa);
error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), &size);
error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), &size);
if (error) {
if (error == ENOENT)
return nvlist_alloc(&zp->z_xattr_cached,
@ -205,7 +205,7 @@ zfs_sa_get_xattr(znode_t *zp)
obj = vmem_alloc(size, KM_SLEEP);
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), obj, size);
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size);
if (error == 0)
error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);
@ -217,7 +217,7 @@ zfs_sa_get_xattr(znode_t *zp)
int
zfs_sa_set_xattr(znode_t *zp)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
char *obj;
size_t size;
@ -240,7 +240,7 @@ zfs_sa_set_xattr(znode_t *zp)
if (error)
goto out_free;
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa_create(tx, size);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
@ -248,7 +248,7 @@ zfs_sa_set_xattr(znode_t *zp)
if (error) {
dmu_tx_abort(tx);
} else {
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb),
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs),
obj, size, tx));
dmu_tx_commit(tx);
}
@ -271,7 +271,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
dmu_buf_t *db = sa_get_db(hdl);
znode_t *zp = sa_get_userdata(hdl);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int count = 0;
sa_bulk_attr_t *bulk, *sa_attrs;
zfs_acl_locator_cb_t locate = { 0 };
@ -309,18 +309,18 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
/* First do a bulk query of the attributes that aren't cached */
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zsb), NULL, &xattr, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zsb), NULL, &rdev, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&znode_acl, 88);
if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
@ -334,42 +334,43 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
*/
count = 0;
sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zsb),
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
&crtime, 16);
links = ZTOI(zp)->i_nlink;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, 8);
if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&zp->z_acl_cached->z_acl_count, 8);
if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
zfs_acl_xform(zp, zp->z_acl_cached, CRED());
locate.cb_aclp = zp->z_acl_cached;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zsb),
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);
if (xattr)
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zsb),
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
NULL, &xattr, 8);
/* if scanstamp then add scanstamp */
@ -377,7 +378,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
scanstamp, AV_SCANSTAMP_SZ);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zsb),
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
NULL, scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
@ -386,7 +387,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
count, tx) == 0);
if (znode_acl.z_acl_extern_obj)
VERIFY(0 == dmu_object_free(zsb->z_os,
VERIFY(0 == dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -236,44 +236,44 @@ zfs_znode_hold_compare(const void *a, const void *b)
}
boolean_t
zfs_znode_held(zfs_sb_t *zsb, uint64_t obj)
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zsb, obj);
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zsb->z_hold_locks[i]);
zh = avl_find(&zsb->z_hold_trees[i], &search, NULL);
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zsb->z_hold_locks[i]);
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
static znode_hold_t *
zfs_znode_hold_enter(zfs_sb_t *zsb, uint64_t obj)
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zsb, obj);
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
zh_new->zh_obj = obj;
search.zh_obj = obj;
mutex_enter(&zsb->z_hold_locks[i]);
zh = avl_find(&zsb->z_hold_trees[i], &search, NULL);
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
avl_add(&zsb->z_hold_trees[i], zh);
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
refcount_add(&zh->zh_refcount, NULL);
mutex_exit(&zsb->z_hold_locks[i]);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
@ -286,28 +286,28 @@ zfs_znode_hold_enter(zfs_sb_t *zsb, uint64_t obj)
}
static void
zfs_znode_hold_exit(zfs_sb_t *zsb, znode_hold_t *zh)
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zsb, zh->zh_obj);
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zsb, zh->zh_obj));
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
mutex_exit(&zh->zh_lock);
mutex_enter(&zsb->z_hold_locks[i]);
mutex_enter(&zfsvfs->z_hold_locks[i]);
if (refcount_remove(&zh->zh_refcount, NULL) == 0) {
avl_remove(&zsb->z_hold_trees[i], zh);
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zsb->z_hold_locks[i]);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
int
zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
#ifdef HAVE_SMB_SHARE
zfs_acl_ids_t acl_ids;
@ -355,17 +355,17 @@ zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
}
static void
zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zsb, zp->z_id));
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
@ -408,14 +408,14 @@ void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zsb->z_znodes_lock);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zsb->z_all_znodes, zp);
zsb->z_nr_znodes--;
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
}
mutex_exit(&zsb->z_znodes_lock);
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
@ -431,7 +431,7 @@ zfs_inode_destroy(struct inode *ip)
}
static void
zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
@ -457,7 +457,7 @@ zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb), &rdev,
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
/*FALLTHROUGH*/
case S_IFIFO:
@ -517,13 +517,13 @@ zfs_set_inode_flags(znode_t *zp, struct inode *ip)
void
zfs_inode_update(znode_t *zp)
{
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
zsb = ZTOZSB(zp);
zfsvfs = ZTOZSB(zp);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
@ -547,7 +547,7 @@ zfs_inode_update(znode_t *zp)
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
{
znode_t *zp;
@ -561,9 +561,9 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
sa_bulk_attr_t bulk[11];
int count = 0;
ASSERT(zsb != NULL);
ASSERT(zfsvfs != NULL);
ip = new_inode(zsb->z_sb);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
@ -587,21 +587,22 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
zp->z_range_lock.zr_blksz = &zp->z_blksz;
zp->z_range_lock.zr_max_blksz = &ZTOZSB(zp)->z_max_blksz;
zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) {
if (hdl == NULL)
@ -628,7 +629,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
ip->i_ino = obj;
zfs_inode_update(zp);
zfs_inode_set_ops(zsb, ip);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
@ -640,11 +641,11 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
*/
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zsb->z_znodes_lock);
list_insert_tail(&zsb->z_all_znodes, zp);
zsb->z_nr_znodes++;
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
membar_producer();
mutex_exit(&zsb->z_znodes_lock);
mutex_exit(&zfsvfs->z_znodes_lock);
unlock_new_inode(ip);
return (zp);
@ -661,9 +662,9 @@ error:
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfs_sb_t *zsb = ITOZSB(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zsb) || dmu_objset_is_snapshot(zsb->z_os))
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
@ -697,7 +698,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint64_t mode, size, links, parent, pflags;
uint64_t dzp_pflags = 0;
uint64_t rdev = 0;
zfs_sb_t *zsb = ZTOZSB(dzp);
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
timestruc_t now;
uint64_t gen, obj;
@ -710,7 +711,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zsb->z_replay) {
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
@ -719,13 +720,13 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zsb->z_os);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
@ -740,29 +741,29 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zsb->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zsb->z_os, obj,
zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zsb->z_os,
zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zsb->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zsb->z_os, obj,
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zsb->z_os,
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zsb, obj);
VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
@ -781,7 +782,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
flag |= IS_XATTR;
}
if (zsb->z_use_fuids)
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
@ -825,7 +826,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
@ -837,74 +838,74 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
@ -914,7 +915,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
*zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl);
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, obj, sa_hdl);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
@ -936,7 +937,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
@ -1043,7 +1044,7 @@ zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
}
int
zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
@ -1055,11 +1056,11 @@ zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zsb, obj_num);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
@ -1069,7 +1070,7 @@ again:
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
@ -1105,7 +1106,7 @@ again:
if (igrab(ZTOI(zp)) == NULL) {
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
/* inode might need this to finish evict */
cond_resched();
goto again;
@ -1114,7 +1115,7 @@ again:
err = 0;
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
@ -1128,21 +1129,21 @@ again:
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, obj_num, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
@ -1166,7 +1167,7 @@ zfs_rezget(znode_t *zp)
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zsb, obj_num);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
@ -1183,9 +1184,9 @@ zfs_rezget(znode_t *zp)
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
@ -1195,37 +1196,37 @@ zfs_rezget(znode_t *zp)
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
@ -1239,7 +1240,7 @@ zfs_rezget(znode_t *zp)
if (gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
@ -1251,7 +1252,7 @@ zfs_rezget(znode_t *zp)
zp->z_atime_dirty = 0;
zfs_inode_update(zp);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
@ -1259,26 +1260,26 @@ zfs_rezget(znode_t *zp)
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfs_sb_t *zsb = ZTOZSB(zp);
objset_t *os = zsb->z_os;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zsb, obj);
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
@ -1287,7 +1288,7 @@ zfs_zinactive(znode_t *zp)
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zsb, z_id);
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
@ -1297,7 +1298,7 @@ zfs_zinactive(znode_t *zp)
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
@ -1305,7 +1306,7 @@ zfs_zinactive(znode_t *zp)
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zsb, zh);
zfs_znode_hold_exit(zfsvfs, zh);
}
static inline int
@ -1407,7 +1408,7 @@ zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
uint64_t newblksz;
@ -1425,11 +1426,11 @@ zfs_extend(znode_t *zp, uint64_t end)
zfs_range_unlock(rl);
return (0);
}
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
@ -1526,7 +1527,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
rl_t *rl;
int error;
@ -1546,7 +1547,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
@ -1605,7 +1606,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
int error;
@ -1625,12 +1626,12 @@ zfs_trunc(znode_t *zp, uint64_t end)
return (0);
}
error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1);
if (error) {
zfs_range_unlock(rl);
return (error);
}
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
@ -1642,12 +1643,12 @@ zfs_trunc(znode_t *zp, uint64_t end)
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
@ -1674,15 +1675,15 @@ int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfs_sb_t *zsb = ZTOZSB(zp);
zilog_t *zilog = zsb->z_log;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
@ -1703,7 +1704,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zsb->z_os);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
@ -1712,9 +1713,9 @@ log:
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
@ -1742,7 +1743,7 @@ void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfs_sb_t *zsb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
@ -1824,7 +1825,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zsb/sb
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
@ -1838,21 +1839,21 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
zsb->z_os = os;
zsb->z_parent = zsb;
zsb->z_version = version;
zsb->z_use_fuids = USE_FUIDS(version, os);
zsb->z_use_sa = USE_SA(version, os);
zsb->z_norm = norm;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zsb;
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zsb->z_attr_table);
&zfsvfs->z_attr_table);
ASSERT(error == 0);
@ -1861,20 +1862,21 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zsb->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zsb->z_all_znodes, sizeof (znode_t),
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zsb->z_hold_size = size;
zsb->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size, KM_SLEEP);
zsb->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zsb->z_hold_trees[i], zfs_znode_hold_compare,
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zsb->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
@ -1892,18 +1894,18 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
/*
* Create shares directory
*/
error = zfs_create_share_dir(zsb, tx);
error = zfs_create_share_dir(zfsvfs, tx);
ASSERT(error == 0);
for (i = 0; i != size; i++) {
avl_destroy(&zsb->z_hold_trees[i]);
mutex_destroy(&zsb->z_hold_locks[i]);
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zsb->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zsb->z_hold_locks, sizeof (kmutex_t) * size);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zsb, sizeof (zfs_sb_t));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */

View File

@ -52,10 +52,10 @@ zpl_common_open(struct inode *ip, struct file *filp)
static int
zpl_root_iterate(struct file *filp, struct dir_context *ctx)
{
zfs_sb_t *zsb = ITOZSB(file_inode(filp));
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
int error = 0;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
if (!dir_emit_dots(filp, ctx))
goto out;
@ -76,7 +76,7 @@ zpl_root_iterate(struct file *filp, struct dir_context *ctx)
ctx->pos++;
}
out:
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -249,14 +249,14 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
static int
zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx)
{
zfs_sb_t *zsb = ITOZSB(file_inode(filp));
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
fstrans_cookie_t cookie;
char snapname[MAXNAMELEN];
boolean_t case_conflict;
uint64_t id, pos;
int error = 0;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (!dir_emit_dots(filp, ctx))
@ -264,10 +264,10 @@ zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx)
pos = ctx->pos;
while (error == 0) {
dsl_pool_config_enter(dmu_objset_pool(zsb->z_os), FTAG);
error = -dmu_snapshot_list_next(zsb->z_os, MAXNAMELEN,
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = -dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN,
snapname, &id, &pos, &case_conflict);
dsl_pool_config_exit(dmu_objset_pool(zsb->z_os), FTAG);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error)
goto out;
@ -279,7 +279,7 @@ zpl_snapdir_iterate(struct file *filp, struct dir_context *ctx)
}
out:
spl_fstrans_unmark(cookie);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
if (error == -ENOENT)
return (0);
@ -378,15 +378,15 @@ static int
zpl_snapdir_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
zfs_sb_t *zsb = ITOZSB(dentry->d_inode);
zfsvfs_t *zfsvfs = ITOZSB(dentry->d_inode);
int error;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
error = simple_getattr(mnt, dentry, stat);
stat->nlink = stat->size = 2;
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zsb->z_os);
stat->ctime = stat->mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
stat->atime = CURRENT_TIME;
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -464,19 +464,19 @@ zpl_shares_iterate(struct file *filp, struct dir_context *ctx)
{
fstrans_cookie_t cookie;
cred_t *cr = CRED();
zfs_sb_t *zsb = ITOZSB(file_inode(filp));
zfsvfs_t *zfsvfs = ITOZSB(file_inode(filp));
znode_t *dzp;
int error = 0;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
cookie = spl_fstrans_mark();
if (zsb->z_shares_dir == 0) {
if (zfsvfs->z_shares_dir == 0) {
dir_emit_dots(filp, ctx);
goto out;
}
error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp);
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error)
goto out;
@ -487,7 +487,7 @@ zpl_shares_iterate(struct file *filp, struct dir_context *ctx)
iput(ZTOI(dzp));
out:
spl_fstrans_unmark(cookie);
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);
@ -513,27 +513,27 @@ zpl_shares_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *ip = dentry->d_inode;
zfs_sb_t *zsb = ITOZSB(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
znode_t *dzp;
int error;
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
if (zsb->z_shares_dir == 0) {
if (zfsvfs->z_shares_dir == 0) {
error = simple_getattr(mnt, dentry, stat);
stat->nlink = stat->size = 2;
stat->atime = CURRENT_TIME;
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
return (error);
}
error = -zfs_zget(zsb, zsb->z_shares_dir, &dzp);
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error == 0) {
error = -zfs_getattr_fast(ZTOI(dzp), stat);
iput(ZTOI(dzp));
}
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
ASSERT3S(error, <=, 0);
return (error);

View File

@ -605,14 +605,14 @@ static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(mapping->host);
zfs_sb_t *zsb = ITOZSB(mapping->host);
zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
enum writeback_sync_modes sync_mode;
int result;
ZFS_ENTER(zsb);
if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
ZFS_ENTER(zfsvfs);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
ZFS_EXIT(zsb);
ZFS_EXIT(zfsvfs);
sync_mode = wbc->sync_mode;
/*
@ -625,11 +625,11 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
if (sync_mode != wbc->sync_mode) {
ZFS_ENTER(zsb);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (zsb->z_log != NULL)
zil_commit(zsb->z_log, zp->z_id);
ZFS_EXIT(zsb);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
ZFS_EXIT(zfsvfs);
/*
* We need to call write_cache_pages() again (we can't just

View File

@ -48,7 +48,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
pathname_t *ppn = NULL;
pathname_t pn;
int zfs_flags = 0;
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
if (dlen(dentry) >= ZAP_MAXNAMELEN)
return (ERR_PTR(-ENAMETOOLONG));
@ -57,7 +57,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
cookie = spl_fstrans_mark();
/* If we are a case insensitive fs, we need the real name */
if (zsb->z_case == ZFS_CASE_INSENSITIVE) {
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
zfs_flags = FIGNORECASE;
pn_alloc(&pn);
ppn = &pn;
@ -259,7 +259,7 @@ zpl_unlink(struct inode *dir, struct dentry *dentry)
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
crhold(cr);
cookie = spl_fstrans_mark();
@ -269,7 +269,7 @@ zpl_unlink(struct inode *dir, struct dentry *dentry)
* For a CI FS we must invalidate the dentry to prevent the
* creation of negative entries.
*/
if (error == 0 && zsb->z_case == ZFS_CASE_INSENSITIVE)
if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE)
d_invalidate(dentry);
spl_fstrans_unmark(cookie);
@ -319,7 +319,7 @@ zpl_rmdir(struct inode *dir, struct dentry *dentry)
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
crhold(cr);
cookie = spl_fstrans_mark();
@ -329,7 +329,7 @@ zpl_rmdir(struct inode *dir, struct dentry *dentry)
* For a CI FS we must invalidate the dentry to prevent the
* creation of negative entries.
*/
if (error == 0 && zsb->z_case == ZFS_CASE_INSENSITIVE)
if (error == 0 && zfsvfs->z_case == ZFS_CASE_INSENSITIVE)
d_invalidate(dentry);
spl_fstrans_unmark(cookie);
@ -658,7 +658,7 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags)
{
#endif /* HAVE_D_REVALIDATE_NAMEIDATA */
/* CSTYLED */
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
int error;
if (flags & LOOKUP_RCU)
@ -668,12 +668,12 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags)
* Automounted snapshots rely on periodic dentry revalidation
* to defer snapshots from being automatically unmounted.
*/
if (zsb->z_issnap) {
if (time_after(jiffies, zsb->z_snap_defer_time +
if (zfsvfs->z_issnap) {
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zsb->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zsb->z_os->os_spa,
dmu_objset_id(zsb->z_os), zfs_expire_snapshot);
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os), zfs_expire_snapshot);
}
}
@ -684,7 +684,7 @@ zpl_revalidate(struct dentry *dentry, unsigned int flags)
*/
if (dentry->d_inode == NULL) {
spin_lock(&dentry->d_lock);
error = time_before(dentry->d_time, zsb->z_rollback_time);
error = time_before(dentry->d_time, zfsvfs->z_rollback_time);
spin_unlock(&dentry->d_lock);
if (error)

View File

@ -184,211 +184,15 @@ zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
return (error);
}
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static int
zpl_parse_option(char *option, int token, substring_t *args, zfs_mntopts_t *zmo)
{
switch (token) {
case TOKEN_RO:
zmo->z_readonly = B_TRUE;
zmo->z_do_readonly = B_TRUE;
break;
case TOKEN_RW:
zmo->z_readonly = B_FALSE;
zmo->z_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
zmo->z_setuid = B_TRUE;
zmo->z_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
zmo->z_setuid = B_FALSE;
zmo->z_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
zmo->z_exec = B_TRUE;
zmo->z_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
zmo->z_exec = B_FALSE;
zmo->z_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
zmo->z_devices = B_TRUE;
zmo->z_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
zmo->z_devices = B_FALSE;
zmo->z_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
zmo->z_xattr = ZFS_XATTR_DIR;
zmo->z_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
zmo->z_xattr = ZFS_XATTR_SA;
zmo->z_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
zmo->z_xattr = ZFS_XATTR_DIR;
zmo->z_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
zmo->z_xattr = ZFS_XATTR_OFF;
zmo->z_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
zmo->z_atime = B_TRUE;
zmo->z_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
zmo->z_atime = B_FALSE;
zmo->z_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
zmo->z_relatime = B_TRUE;
zmo->z_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
zmo->z_relatime = B_FALSE;
zmo->z_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
zmo->z_nbmand = B_TRUE;
zmo->z_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
zmo->z_nbmand = B_FALSE;
zmo->z_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
zmo->z_mntpoint = match_strdup(&args[0]);
if (zmo->z_mntpoint == NULL)
return (-ENOMEM);
break;
default:
break;
}
return (0);
}
/*
* Parse the mntopts string storing the results in provided zmo argument.
* If an error occurs the zmo argument will not be modified. The caller
* needs to set isremount when recycling an existing zfs_mntopts_t.
*/
static int
zpl_parse_options(char *osname, char *mntopts, zfs_mntopts_t *zmo,
boolean_t isremount)
{
zfs_mntopts_t *tmp_zmo;
int error;
tmp_zmo = zfs_mntopts_alloc();
tmp_zmo->z_osname = strdup(osname);
if (mntopts) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
t = tmp_mntopts = strdup(mntopts);
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zpl_parse_option(p, token, args, tmp_zmo);
if (error) {
zfs_mntopts_free(tmp_zmo);
strfree(tmp_mntopts);
return (error);
}
}
strfree(tmp_mntopts);
}
if (isremount == B_TRUE) {
if (zmo->z_osname)
strfree(zmo->z_osname);
if (zmo->z_mntpoint)
strfree(zmo->z_mntpoint);
} else {
ASSERT3P(zmo->z_osname, ==, NULL);
ASSERT3P(zmo->z_mntpoint, ==, NULL);
}
memcpy(zmo, tmp_zmo, sizeof (zfs_mntopts_t));
kmem_free(tmp_zmo, sizeof (zfs_mntopts_t));
return (0);
}
static int
zpl_remount_fs(struct super_block *sb, int *flags, char *data)
{
zfs_sb_t *zsb = sb->s_fs_info;
zfs_mnt_t zm = { .mnt_osname = NULL, .mnt_data = data };
fstrans_cookie_t cookie;
int error;
error = zpl_parse_options(zsb->z_mntopts->z_osname, data,
zsb->z_mntopts, B_TRUE);
if (error)
return (error);
cookie = spl_fstrans_mark();
error = -zfs_remount(sb, flags, zsb->z_mntopts);
error = -zfs_remount(sb, flags, &zm);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
@ -396,12 +200,13 @@ zpl_remount_fs(struct super_block *sb, int *flags, char *data)
}
static int
__zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb)
__zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
{
seq_printf(seq, ",%s", zsb->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
seq_printf(seq, ",%s",
zfsvfs->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
#ifdef CONFIG_FS_POSIX_ACL
switch (zsb->z_acl_type) {
switch (zfsvfs->z_acl_type) {
case ZFS_ACLTYPE_POSIXACL:
seq_puts(seq, ",posixacl");
break;
@ -431,12 +236,12 @@ zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp)
static int
zpl_fill_super(struct super_block *sb, void *data, int silent)
{
zfs_mntopts_t *zmo = (zfs_mntopts_t *)data;
zfs_mnt_t *zm = (zfs_mnt_t *)data;
fstrans_cookie_t cookie;
int error;
cookie = spl_fstrans_mark();
error = -zfs_domount(sb, zmo, silent);
error = -zfs_domount(sb, zm, silent);
spl_fstrans_unmark(cookie);
ASSERT3S(error, <=, 0);
@ -448,32 +253,18 @@ static struct dentry *
zpl_mount(struct file_system_type *fs_type, int flags,
const char *osname, void *data)
{
zfs_mntopts_t *zmo = zfs_mntopts_alloc();
int error;
zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data };
error = zpl_parse_options((char *)osname, (char *)data, zmo, B_FALSE);
if (error) {
zfs_mntopts_free(zmo);
return (ERR_PTR(error));
}
return (mount_nodev(fs_type, flags, zmo, zpl_fill_super));
return (mount_nodev(fs_type, flags, &zm, zpl_fill_super));
}
#else
static int
zpl_get_sb(struct file_system_type *fs_type, int flags,
const char *osname, void *data, struct vfsmount *mnt)
{
zfs_mntopts_t *zmo = zfs_mntopts_alloc();
int error;
zfs_mnt_t zm = { .mnt_osname = osname, .mnt_data = data };
error = zpl_parse_options((char *)osname, (char *)data, zmo, B_FALSE);
if (error) {
zfs_mntopts_free(zmo);
return (error);
}
return (get_sb_nodev(fs_type, flags, zmo, zpl_fill_super, mnt));
return (get_sb_nodev(fs_type, flags, &zm, zpl_fill_super, mnt));
}
#endif /* HAVE_MOUNT_NODEV */
@ -494,7 +285,7 @@ zpl_prune_sb(int64_t nr_to_scan, void *arg)
struct super_block *sb = (struct super_block *)arg;
int objects = 0;
(void) -zfs_sb_prune(sb, nr_to_scan, &objects);
(void) -zfs_prune(sb, nr_to_scan, &objects);
}
#ifdef HAVE_NR_CACHED_OBJECTS

View File

@ -237,7 +237,7 @@ ssize_t
zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
znode_t *zp = ITOZ(dentry->d_inode);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
xattr_filldir_t xf = { buffer_size, 0, buffer, dentry };
cred_t *cr = CRED();
fstrans_cookie_t cookie;
@ -245,10 +245,10 @@ zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
crhold(cr);
cookie = spl_fstrans_mark();
rrm_enter_read(&(zsb)->z_teardown_lock, FTAG);
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG);
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zsb->z_use_sa && zp->z_is_sa) {
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_list_sa(&xf);
if (error)
goto out;
@ -262,7 +262,7 @@ zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
out:
rw_exit(&zp->z_xattr_lock);
rrm_exit(&(zsb)->z_teardown_lock, FTAG);
rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG);
spl_fstrans_unmark(cookie);
crfree(cr);
@ -349,12 +349,12 @@ __zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size,
cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zsb->z_use_sa && zp->z_is_sa) {
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, value, size);
if (error != -ENOENT)
goto out;
@ -376,14 +376,14 @@ static int
__zpl_xattr_where(struct inode *ip, const char *name, int *where, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(where);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
*where = XATTR_NOENT;
if (zsb->z_use_sa && zp->z_is_sa) {
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, NULL, 0);
if (error >= 0)
*where |= XATTR_IN_SA;
@ -411,18 +411,18 @@ static int
zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
rrm_enter_read(&(zsb)->z_teardown_lock, FTAG);
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG);
rw_enter(&zp->z_xattr_lock, RW_READER);
error = __zpl_xattr_get(ip, name, value, size, cr);
rw_exit(&zp->z_xattr_lock);
rrm_exit(&(zsb)->z_teardown_lock, FTAG);
rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG);
spl_fstrans_unmark(cookie);
crfree(cr);
@ -576,7 +576,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value,
size_t size, int flags)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ZTOZSB(zp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int where;
@ -584,7 +584,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value,
crhold(cr);
cookie = spl_fstrans_mark();
rrm_enter_read(&(zsb)->z_teardown_lock, FTAG);
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG);
rw_enter(&ITOZ(ip)->z_xattr_lock, RW_WRITER);
/*
@ -615,8 +615,8 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value,
}
/* Preferentially store the xattr as a SA for better performance */
if (zsb->z_use_sa && zp->z_is_sa &&
(zsb->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) {
if (zfsvfs->z_use_sa && zp->z_is_sa &&
(zfsvfs->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) {
error = zpl_xattr_set_sa(ip, name, value, size, flags, cr);
if (error == 0) {
/*
@ -637,7 +637,7 @@ zpl_xattr_set(struct inode *ip, const char *name, const void *value,
zpl_xattr_set_sa(ip, name, NULL, 0, 0, cr);
out:
rw_exit(&ITOZ(ip)->z_xattr_lock);
rrm_exit(&(zsb)->z_teardown_lock, FTAG);
rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);