mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Retire z_nr_znodes
Added inab26409db7
("Linux 3.1 compat, super_block->s_shrink"), with the only consumer which needed the count getting retired in066e825221
("Linux compat: Minimum kernel version 3.10"). The counter gets in the way of not maintaining the list to begin with. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <mav@FreeBSD.org> Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Closes #15274
This commit is contained in:
parent
54c6fbd378
commit
f7a07d76ee
@ -93,7 +93,6 @@ struct zfsvfs {
|
|||||||
zfs_teardown_lock_t z_teardown_lock;
|
zfs_teardown_lock_t z_teardown_lock;
|
||||||
zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
|
zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
|
||||||
list_t z_all_znodes; /* all vnodes in the fs */
|
list_t z_all_znodes; /* all vnodes in the fs */
|
||||||
uint64_t z_nr_znodes; /* number of znodes in the fs */
|
|
||||||
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
|
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
|
||||||
struct zfsctl_root *z_ctldir; /* .zfs directory pointer */
|
struct zfsctl_root *z_ctldir; /* .zfs directory pointer */
|
||||||
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
|
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
|
||||||
|
@ -105,7 +105,6 @@ struct zfsvfs {
|
|||||||
rrmlock_t z_teardown_lock;
|
rrmlock_t z_teardown_lock;
|
||||||
krwlock_t z_teardown_inactive_lock;
|
krwlock_t z_teardown_inactive_lock;
|
||||||
list_t z_all_znodes; /* all znodes in the fs */
|
list_t z_all_znodes; /* all znodes in the fs */
|
||||||
uint64_t z_nr_znodes; /* number of znodes in the fs */
|
|
||||||
unsigned long z_rollback_time; /* last online rollback time */
|
unsigned long z_rollback_time; /* last online rollback time */
|
||||||
unsigned long z_snap_defer_time; /* last snapshot unmount deferral */
|
unsigned long z_snap_defer_time; /* last snapshot unmount deferral */
|
||||||
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
|
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
|
||||||
|
@ -1154,7 +1154,6 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
|
|||||||
|
|
||||||
mutex_destroy(&zfsvfs->z_znodes_lock);
|
mutex_destroy(&zfsvfs->z_znodes_lock);
|
||||||
mutex_destroy(&zfsvfs->z_lock);
|
mutex_destroy(&zfsvfs->z_lock);
|
||||||
ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
|
|
||||||
list_destroy(&zfsvfs->z_all_znodes);
|
list_destroy(&zfsvfs->z_all_znodes);
|
||||||
ZFS_TEARDOWN_DESTROY(zfsvfs);
|
ZFS_TEARDOWN_DESTROY(zfsvfs);
|
||||||
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
|
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
|
||||||
@ -1558,12 +1557,11 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
|
|||||||
* may add the parents of dir-based xattrs to the taskq
|
* may add the parents of dir-based xattrs to the taskq
|
||||||
* so we want to wait for these.
|
* so we want to wait for these.
|
||||||
*
|
*
|
||||||
* We can safely read z_nr_znodes without locking because the
|
* We can safely check z_all_znodes for being empty because the
|
||||||
* VFS has already blocked operations which add to the
|
* VFS has already blocked operations which add to it.
|
||||||
* z_all_znodes list and thus increment z_nr_znodes.
|
|
||||||
*/
|
*/
|
||||||
int round = 0;
|
int round = 0;
|
||||||
while (zfsvfs->z_nr_znodes > 0) {
|
while (!list_is_empty(&zfsvfs->z_all_znodes)) {
|
||||||
taskq_wait_outstanding(dsl_pool_zrele_taskq(
|
taskq_wait_outstanding(dsl_pool_zrele_taskq(
|
||||||
dmu_objset_pool(zfsvfs->z_os)), 0);
|
dmu_objset_pool(zfsvfs->z_os)), 0);
|
||||||
if (++round > 1 && !unmounting)
|
if (++round > 1 && !unmounting)
|
||||||
|
@ -537,7 +537,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
|
|||||||
|
|
||||||
mutex_enter(&zfsvfs->z_znodes_lock);
|
mutex_enter(&zfsvfs->z_znodes_lock);
|
||||||
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
||||||
zfsvfs->z_nr_znodes++;
|
|
||||||
zp->z_zfsvfs = zfsvfs;
|
zp->z_zfsvfs = zfsvfs;
|
||||||
mutex_exit(&zfsvfs->z_znodes_lock);
|
mutex_exit(&zfsvfs->z_znodes_lock);
|
||||||
|
|
||||||
@ -1286,7 +1285,6 @@ zfs_znode_free(znode_t *zp)
|
|||||||
mutex_enter(&zfsvfs->z_znodes_lock);
|
mutex_enter(&zfsvfs->z_znodes_lock);
|
||||||
POINTER_INVALIDATE(&zp->z_zfsvfs);
|
POINTER_INVALIDATE(&zp->z_zfsvfs);
|
||||||
list_remove(&zfsvfs->z_all_znodes, zp);
|
list_remove(&zfsvfs->z_all_znodes, zp);
|
||||||
zfsvfs->z_nr_znodes--;
|
|
||||||
mutex_exit(&zfsvfs->z_znodes_lock);
|
mutex_exit(&zfsvfs->z_znodes_lock);
|
||||||
|
|
||||||
#if __FreeBSD_version >= 1300139
|
#if __FreeBSD_version >= 1300139
|
||||||
|
@ -537,7 +537,6 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
|
|||||||
|
|
||||||
mutex_enter(&zfsvfs->z_znodes_lock);
|
mutex_enter(&zfsvfs->z_znodes_lock);
|
||||||
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
||||||
zfsvfs->z_nr_znodes++;
|
|
||||||
membar_producer();
|
membar_producer();
|
||||||
mutex_exit(&zfsvfs->z_znodes_lock);
|
mutex_exit(&zfsvfs->z_znodes_lock);
|
||||||
|
|
||||||
|
@ -1330,12 +1330,11 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
|
|||||||
* may add the parents of dir-based xattrs to the taskq
|
* may add the parents of dir-based xattrs to the taskq
|
||||||
* so we want to wait for these.
|
* so we want to wait for these.
|
||||||
*
|
*
|
||||||
* We can safely read z_nr_znodes without locking because the
|
* We can safely check z_all_znodes for being empty because the
|
||||||
* VFS has already blocked operations which add to the
|
* VFS has already blocked operations which add to it.
|
||||||
* z_all_znodes list and thus increment z_nr_znodes.
|
|
||||||
*/
|
*/
|
||||||
int round = 0;
|
int round = 0;
|
||||||
while (zfsvfs->z_nr_znodes > 0) {
|
while (!list_is_empty(&zfsvfs->z_all_znodes)) {
|
||||||
taskq_wait_outstanding(dsl_pool_zrele_taskq(
|
taskq_wait_outstanding(dsl_pool_zrele_taskq(
|
||||||
dmu_objset_pool(zfsvfs->z_os)), 0);
|
dmu_objset_pool(zfsvfs->z_os)), 0);
|
||||||
if (++round > 1 && !unmounting)
|
if (++round > 1 && !unmounting)
|
||||||
|
@ -390,7 +390,6 @@ zfs_inode_destroy(struct inode *ip)
|
|||||||
mutex_enter(&zfsvfs->z_znodes_lock);
|
mutex_enter(&zfsvfs->z_znodes_lock);
|
||||||
if (list_link_active(&zp->z_link_node)) {
|
if (list_link_active(&zp->z_link_node)) {
|
||||||
list_remove(&zfsvfs->z_all_znodes, zp);
|
list_remove(&zfsvfs->z_all_znodes, zp);
|
||||||
zfsvfs->z_nr_znodes--;
|
|
||||||
}
|
}
|
||||||
mutex_exit(&zfsvfs->z_znodes_lock);
|
mutex_exit(&zfsvfs->z_znodes_lock);
|
||||||
|
|
||||||
@ -641,7 +640,6 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
|
|||||||
|
|
||||||
mutex_enter(&zfsvfs->z_znodes_lock);
|
mutex_enter(&zfsvfs->z_znodes_lock);
|
||||||
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
list_insert_tail(&zfsvfs->z_all_znodes, zp);
|
||||||
zfsvfs->z_nr_znodes++;
|
|
||||||
mutex_exit(&zfsvfs->z_znodes_lock);
|
mutex_exit(&zfsvfs->z_znodes_lock);
|
||||||
|
|
||||||
if (links > 0)
|
if (links > 0)
|
||||||
|
Loading…
Reference in New Issue
Block a user