Add kmem cache accessors

Make the metaslab platform agnostic again by adding
accessor functions which can be implemented by each
platform.

Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #9404
This commit is contained in:
Matthew Macy 2019-10-10 15:45:52 -07:00 committed by Brian Behlendorf
parent eedb3a62b9
commit 6501906280
3 changed files with 21 additions and 4 deletions

View File

@ -218,6 +218,8 @@ extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count);
extern void spl_kmem_reap(void);
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)

View File

@ -257,6 +257,20 @@ spl_obj_size(spl_kmem_cache_t *skc)
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
uint64_t
spl_kmem_cache_inuse(kmem_cache_t *cache)
{
return (cache->skc_obj_total);
}
EXPORT_SYMBOL(spl_kmem_cache_inuse);
uint64_t
spl_kmem_cache_entry_size(kmem_cache_t *cache)
{
return (cache->skc_obj_size);
}
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/

View File

@ -2142,8 +2142,8 @@ metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = zfs_btree_leaf_cache->skc_obj_total;
uint64_t size = zfs_btree_leaf_cache->skc_obj_size;
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
int tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(mc->mc_metaslab_txg_list) * 2;
@ -2180,7 +2180,8 @@ metaslab_potentially_evict(metaslab_class_t *mc)
*/
if (msp->ms_loading) {
msp = next_msp;
inuse = zfs_btree_leaf_cache->skc_obj_total;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
@ -2202,7 +2203,7 @@ metaslab_potentially_evict(metaslab_class_t *mc)
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = zfs_btree_leaf_cache->skc_obj_total;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#endif