Throttle number of freed slabs based on nr_to_scan

Previously, the SPL tried to maintain Solaris semantics by freeing
all available (empty) slabs from its slab caches when the shrinker
was called. This is not desirable when running on Linux. To make
the SPL shrinker more Linux friendly, the actual number of freed
slabs from each of the slab caches is now derived from nr_to_scan
and skc_slab_objs.

Additionally, an accounting bug was fixed in spl_slab_reclaim()
which could cause us to reclaim one more slab than requested.

Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #101
This commit is contained in:
Prakash Surya 2012-04-27 15:10:02 -07:00 committed by Brian Behlendorf
parent ef6f91ce0c
commit cef7605c34
2 changed files with 11 additions and 8 deletions

View File

@ -418,7 +418,7 @@ extern void spl_kmem_cache_set_move(kmem_cache_t *,
extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count);
extern void spl_kmem_reap(void);
int spl_kmem_init_kallsyms_lookup(void);
@ -431,7 +431,8 @@ void spl_kmem_fini(void);
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_cache_reap_now(skc) \
spl_kmem_cache_reap_now(skc, skc->skc_reap)
#define kmem_reap() spl_kmem_reap()
#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
((ptr) < (void *)VMALLOC_END))

View File

@ -1087,7 +1087,7 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
* scanning. Additionally, stop when reaching the target
* reclaim 'count' if a non-zero threshold is given.
*/
if ((sks->sks_ref > 0) || (count && i > count))
if ((sks->sks_ref > 0) || (count && i >= count))
break;
if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
@ -1857,8 +1857,9 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
* objects should be freed, because Solaris semantics are to free
* all available objects we may free more objects than requested.
* objects should be freed, which differs from Solaris semantics.
* Solaris semantics are to free all available objects which may (and
* probably will) be more objects than the requested nr_to_scan.
*/
static int
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
@ -1870,7 +1871,8 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan)
spl_kmem_cache_reap_now(skc);
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed in reclaimable, this ensures
@ -1896,7 +1898,7 @@ SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
* effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
SENTRY;
@ -1914,7 +1916,7 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
if (skc->skc_reclaim)
skc->skc_reclaim(skc->skc_private);
spl_slab_reclaim(skc, skc->skc_reap, 0);
spl_slab_reclaim(skc, count, 0);
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
atomic_dec(&skc->skc_ref);