mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 18:11:00 +03:00
ab26409db7
The Linux 3.1 kernel has introduced the concept of per-filesystem shrinkers which are directly assoicated with a super block. Prior to this change there was one shared global shrinker. The zfs code relied on being able to call the global shrinker when the arc_meta_limit was exceeded. This would cause the VFS to drop references on a fraction of the dentries in the dcache. The ARC could then safely reclaim the memory used by these entries and honor the arc_meta_limit. Unfortunately, when per-filesystem shrinkers were added the old interfaces were made unavailable. This change adds support to use the new per-filesystem shrinker interface so we can continue to honor the arc_meta_limit. The major benefit of the new interface is that we can now target only the zfs filesystem for dentry and inode pruning. Thus we can minimize any impact on the caching of other filesystems. In the context of making this change several other important issues related to managing the ARC were addressed, they include: * The dnlc_reduce_cache() function which was called by the ARC to drop dentries for the Posix layer was replaced with a generic zfs_prune_t callback. The ZPL layer now registers a callback to drop these dentries removing a layering violation which dates back to the Solaris code. This callback can also be used by other ARC consumers such as Lustre. arc_add_prune_callback() arc_remove_prune_callback() * The arc_reduce_dnlc_percent module option has been changed to arc_meta_prune for clarity. The dnlc functions are specific to Solaris's VFS and have already been largely eliminated already. The replacement tunable now represents the number of bytes the prune callback will request when invoked. * Less aggressively invoke the prune callback. We used to call this whenever we exceeded the arc_meta_limit however that's not strictly correct since it results in over zeleous reclaim of dentries and inodes. It is now only called once the arc_meta_limit is exceeded and every effort has been made to evict other data from the ARC cache. * More promptly manage exceeding the arc_meta_limit. When reading meta data in to the cache if a buffer was unable to be recycled notify the arc_reclaim thread to invoke the required prune. * Added arcstat_prune kstat which is incremented when the ARC is forced to request that a consumer prune its cache. Remember this will only occur when the ARC has no other choice. If it can evict buffers safely without invoking the prune callback it will. * This change is also expected to resolve the unexpect collapses of the ARC cache. This would occur because when exceeded just the arc_meta_limit reclaim presure would be excerted on the arc_c value via arc_shrink(). This effectively shrunk the entire cache when really we just needed to reclaim meta data. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #466 Closes #292
319 lines
7.7 KiB
C
319 lines
7.7 KiB
C
/*
|
|
* CDDL HEADER START
|
|
*
|
|
* The contents of this file are subject to the terms of the
|
|
* Common Development and Distribution License (the "License").
|
|
* You may not use this file except in compliance with the License.
|
|
*
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
* See the License for the specific language governing permissions
|
|
* and limitations under the License.
|
|
*
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
*
|
|
* CDDL HEADER END
|
|
*/
|
|
/*
|
|
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
|
|
*/
|
|
|
|
|
|
#include <sys/zfs_vfsops.h>
|
|
#include <sys/zfs_vnops.h>
|
|
#include <sys/zfs_znode.h>
|
|
#include <sys/zpl.h>
|
|
|
|
|
|
static struct inode *
|
|
zpl_inode_alloc(struct super_block *sb)
|
|
{
|
|
struct inode *ip;
|
|
|
|
VERIFY3S(zfs_inode_alloc(sb, &ip), ==, 0);
|
|
ip->i_version = 1;
|
|
|
|
return (ip);
|
|
}
|
|
|
|
static void
|
|
zpl_inode_destroy(struct inode *ip)
|
|
{
|
|
ASSERT(atomic_read(&ip->i_count) == 0);
|
|
zfs_inode_destroy(ip);
|
|
}
|
|
|
|
/*
|
|
* When ->drop_inode() is called its return value indicates if the
|
|
* inode should be evicted from the inode cache. If the inode is
|
|
* unhashed and has no links the default policy is to evict it
|
|
* immediately.
|
|
*
|
|
* Prior to 2.6.36 this eviction was accomplished by the vfs calling
|
|
* ->delete_inode(). It was ->delete_inode()'s responsibility to
|
|
* truncate the inode pages and call clear_inode(). The call to
|
|
* clear_inode() synchronously invalidates all the buffers and
|
|
* calls ->clear_inode(). It was ->clear_inode()'s responsibility
|
|
* to cleanup and filesystem specific data before freeing the inode.
|
|
*
|
|
* This elaborate mechanism was replaced by ->evict_inode() which
|
|
* does the job of both ->delete_inode() and ->clear_inode(). It
|
|
* will be called exactly once, and when it returns the inode must
|
|
* be in a state where it can simply be freed. The ->evict_inode()
|
|
* callback must minimally truncate the inode pages, and call
|
|
* end_writeback() to complete all outstanding writeback for the
|
|
* inode. After this is complete evict inode can cleanup any
|
|
* remaining filesystem specific data.
|
|
*/
|
|
#ifdef HAVE_EVICT_INODE
|
|
static void
|
|
zpl_evict_inode(struct inode *ip)
|
|
{
|
|
truncate_setsize(ip, 0);
|
|
end_writeback(ip);
|
|
zfs_inactive(ip);
|
|
}
|
|
|
|
#else
|
|
|
|
static void
|
|
zpl_clear_inode(struct inode *ip)
|
|
{
|
|
zfs_inactive(ip);
|
|
}
|
|
|
|
static void
|
|
zpl_inode_delete(struct inode *ip)
|
|
{
|
|
truncate_setsize(ip, 0);
|
|
clear_inode(ip);
|
|
}
|
|
|
|
#endif /* HAVE_EVICT_INODE */
|
|
|
|
static void
|
|
zpl_put_super(struct super_block *sb)
|
|
{
|
|
int error;
|
|
|
|
error = -zfs_umount(sb);
|
|
ASSERT3S(error, <=, 0);
|
|
}
|
|
|
|
static int
|
|
zpl_sync_fs(struct super_block *sb, int wait)
|
|
{
|
|
cred_t *cr = CRED();
|
|
int error;
|
|
|
|
crhold(cr);
|
|
error = -zfs_sync(sb, wait, cr);
|
|
crfree(cr);
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return (error);
|
|
}
|
|
|
|
static int
|
|
zpl_statfs(struct dentry *dentry, struct kstatfs *statp)
|
|
{
|
|
int error;
|
|
|
|
error = -zfs_statvfs(dentry, statp);
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return (error);
|
|
}
|
|
|
|
static int
|
|
zpl_remount_fs(struct super_block *sb, int *flags, char *data)
|
|
{
|
|
int error;
|
|
error = -zfs_remount(sb, flags, data);
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return (error);
|
|
}
|
|
|
|
static int
|
|
zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp)
|
|
{
|
|
struct super_block *sb = vfsp->mnt_sb;
|
|
zfs_sb_t *zsb = sb->s_fs_info;
|
|
|
|
/*
|
|
* The Linux VFS automatically handles the following flags:
|
|
* MNT_NOSUID, MNT_NODEV, MNT_NOEXEC, MNT_NOATIME, MNT_READONLY
|
|
*/
|
|
|
|
seq_printf(seq, ",%s", zsb->z_flags & ZSB_XATTR ? "xattr" : "noxattr");
|
|
|
|
return (0);
|
|
}
|
|
|
|
static int
|
|
zpl_fill_super(struct super_block *sb, void *data, int silent)
|
|
{
|
|
int error;
|
|
|
|
error = -zfs_domount(sb, data, silent);
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return (error);
|
|
}
|
|
|
|
#ifdef HAVE_MOUNT_NODEV
|
|
static struct dentry *
|
|
zpl_mount(struct file_system_type *fs_type, int flags,
|
|
const char *osname, void *data)
|
|
{
|
|
zpl_mount_data_t zmd = { osname, data };
|
|
|
|
return mount_nodev(fs_type, flags, &zmd, zpl_fill_super);
|
|
}
|
|
#else
|
|
static int
|
|
zpl_get_sb(struct file_system_type *fs_type, int flags,
|
|
const char *osname, void *data, struct vfsmount *mnt)
|
|
{
|
|
zpl_mount_data_t zmd = { osname, data };
|
|
|
|
return get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt);
|
|
}
|
|
#endif /* HAVE_MOUNT_NODEV */
|
|
|
|
static void
|
|
zpl_kill_sb(struct super_block *sb)
|
|
{
|
|
#ifdef HAVE_SNAPSHOT
|
|
zfs_sb_t *zsb = sb->s_fs_info;
|
|
|
|
if (zsb && dmu_objset_is_snapshot(zsb->z_os))
|
|
zfs_snap_destroy(zsb);
|
|
#endif /* HAVE_SNAPSHOT */
|
|
|
|
kill_anon_super(sb);
|
|
}
|
|
|
|
#ifdef HAVE_SHRINK
|
|
/*
|
|
* Linux 3.1 - 3.x API
|
|
*
|
|
* The Linux 3.1 API introduced per-sb cache shrinkers to replace the
|
|
* global ones. This allows us a mechanism to cleanly target a specific
|
|
* zfs file system when the dnode and inode caches grow too large.
|
|
*
|
|
* In addition, the 3.0 kernel added the iterate_supers_type() helper
|
|
* function which is used to safely walk all of the zfs file systems.
|
|
*/
|
|
static void
|
|
zpl_prune_sb(struct super_block *sb, void *arg)
|
|
{
|
|
int objects = 0;
|
|
int error;
|
|
|
|
error = -zfs_sb_prune(sb, *(unsigned long *)arg, &objects);
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return;
|
|
}
|
|
|
|
void
|
|
zpl_prune_sbs(int64_t bytes_to_scan, void *private)
|
|
{
|
|
unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t));
|
|
|
|
iterate_supers_type(&zpl_fs_type, zpl_prune_sb, &nr_to_scan);
|
|
kmem_reap();
|
|
}
|
|
#else
|
|
/*
|
|
* Linux 2.6.x - 3.0 API
|
|
*
|
|
* These are best effort interfaces are provided by the SPL to induce
|
|
* the Linux VM subsystem to reclaim a fraction of the both dnode and
|
|
* inode caches. Ideally, we want to just target the zfs file systems
|
|
* however our only option is to reclaim from them all.
|
|
*/
|
|
void
|
|
zpl_prune_sbs(int64_t bytes_to_scan, void *private)
|
|
{
|
|
unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t));
|
|
|
|
shrink_dcache_memory(nr_to_scan, GFP_KERNEL);
|
|
shrink_icache_memory(nr_to_scan, GFP_KERNEL);
|
|
kmem_reap();
|
|
}
|
|
#endif /* HAVE_SHRINK */
|
|
|
|
#ifdef HAVE_NR_CACHED_OBJECTS
|
|
static int
|
|
zpl_nr_cached_objects(struct super_block *sb)
|
|
{
|
|
zfs_sb_t *zsb = sb->s_fs_info;
|
|
int nr;
|
|
|
|
mutex_enter(&zsb->z_znodes_lock);
|
|
nr = zsb->z_nr_znodes;
|
|
mutex_exit(&zsb->z_znodes_lock);
|
|
|
|
return (nr);
|
|
}
|
|
#endif /* HAVE_NR_CACHED_OBJECTS */
|
|
|
|
#ifdef HAVE_FREE_CACHED_OBJECTS
|
|
/*
|
|
* Attempt to evict some meta data from the cache. The ARC operates in
|
|
* terms of bytes while the Linux VFS uses objects. Now because this is
|
|
* just a best effort eviction and the exact values aren't critical so we
|
|
* extrapolate from an object count to a byte size using the znode_t size.
|
|
*/
|
|
static void
|
|
zpl_free_cached_objects(struct super_block *sb, int nr_to_scan)
|
|
{
|
|
arc_adjust_meta(nr_to_scan * sizeof(znode_t), B_FALSE);
|
|
}
|
|
#endif /* HAVE_FREE_CACHED_OBJECTS */
|
|
|
|
const struct super_operations zpl_super_operations = {
|
|
.alloc_inode = zpl_inode_alloc,
|
|
.destroy_inode = zpl_inode_destroy,
|
|
.dirty_inode = NULL,
|
|
.write_inode = NULL,
|
|
.drop_inode = NULL,
|
|
#ifdef HAVE_EVICT_INODE
|
|
.evict_inode = zpl_evict_inode,
|
|
#else
|
|
.clear_inode = zpl_clear_inode,
|
|
.delete_inode = zpl_inode_delete,
|
|
#endif /* HAVE_EVICT_INODE */
|
|
.put_super = zpl_put_super,
|
|
.write_super = NULL,
|
|
.sync_fs = zpl_sync_fs,
|
|
.statfs = zpl_statfs,
|
|
.remount_fs = zpl_remount_fs,
|
|
.show_options = zpl_show_options,
|
|
.show_stats = NULL,
|
|
#ifdef HAVE_NR_CACHED_OBJECTS
|
|
.nr_cached_objects = zpl_nr_cached_objects,
|
|
#endif /* HAVE_NR_CACHED_OBJECTS */
|
|
#ifdef HAVE_FREE_CACHED_OBJECTS
|
|
.free_cached_objects = zpl_free_cached_objects,
|
|
#endif /* HAVE_FREE_CACHED_OBJECTS */
|
|
};
|
|
|
|
struct file_system_type zpl_fs_type = {
|
|
.owner = THIS_MODULE,
|
|
.name = ZFS_DRIVER,
|
|
#ifdef HAVE_MOUNT_NODEV
|
|
.mount = zpl_mount,
|
|
#else
|
|
.get_sb = zpl_get_sb,
|
|
#endif /* HAVE_MOUNT_NODEV */
|
|
.kill_sb = zpl_kill_sb,
|
|
};
|