Linux 3.12 compat: shrinker semantics

The new shrinker API as of Linux 3.12 modifies "struct shrinker" by
replacing the @shrink callback with the pair of @count_objects and
@scan_objects.  It also requires the return value of @count_objects to
return the number of objects actually freed whereas the previous @shrink
callback returned the number of remaining freeable objects.

This patch adds support for the new @scan_objects return value semantics
and updates the splat shrinker test case appropriately.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Closes #403
This commit is contained in:
Tim Chase 2014-10-02 07:40:05 -05:00 committed by Brian Behlendorf
parent 46c936756e
commit 802a4a2ad5
3 changed files with 57 additions and 25 deletions

View File

@ -199,4 +199,11 @@ fn ## _scan_objects(struct shrinker *shrink, struct shrink_control *sc) \
#error "Unknown shrinker callback" #error "Unknown shrinker callback"
#endif #endif
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK)
typedef unsigned long spl_shrinker_t;
#else
typedef int spl_shrinker_t;
#define SHRINK_STOP (-1)
#endif
#endif /* SPL_MM_COMPAT_H */ #endif /* SPL_MM_COMPAT_H */

View File

@ -64,7 +64,7 @@ MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
* setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
* is reclaimed. This may increase the likelihood of out of memory events. * is reclaimed. This may increase the likelihood of out of memory events.
*/ */
unsigned int spl_kmem_cache_reclaim = 0; unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
module_param(spl_kmem_cache_reclaim, uint, 0644); module_param(spl_kmem_cache_reclaim, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
@ -2048,14 +2048,24 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
* report that they contain unused objects. Because of this we only * report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches. * register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker * We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects * is called.
* in the cache when called with nr_to_scan == 0 but not attempt to *
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan * If sc->nr_to_scan is zero, the caller is requesting a query of the
* objects should be freed, which differs from Solaris semantics. * number of objects which can potentially be freed. If it is nonzero,
* Solaris semantics are to free all available objects which may (and * the request is to free that many objects.
* probably will) be more objects than the requested nr_to_scan. *
* Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
* in struct shrinker and also require the shrinker to return the number
* of objects freed.
*
* Older kernels require the shrinker to return the number of freeable
* objects following the freeing of nr_to_free.
*
* Linux semantics differ from those under Solaris, which are to
* free all available objects which may (and probably will) be more
* objects than the requested nr_to_scan.
*/ */
static int static spl_shrinker_t
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink, __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
@ -2064,17 +2074,22 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
down_read(&spl_kmem_cache_sem); down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan) if (sc->nr_to_scan) {
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
uint64_t oldalloc = skc->skc_obj_alloc;
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
#else
spl_kmem_cache_reap_now(skc, spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1)); MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed is reclaimable, this ensures
* we are called again with nr_to_scan > 0 so can try and
* reclaim. The exact number is not important either so
* we forgo taking this already highly contented lock.
*/
alloc += skc->skc_obj_alloc; alloc += skc->skc_obj_alloc;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else {
/* Request to query number of freeable objects */
alloc += skc->skc_obj_alloc;
}
} }
up_read(&spl_kmem_cache_sem); up_read(&spl_kmem_cache_sem);
@ -2085,7 +2100,7 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
* system to thrash. * system to thrash.
*/ */
if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan) if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
return (-1); return (SHRINK_STOP);
return (MAX(alloc, 0)); return (MAX(alloc, 0));
} }
@ -2196,7 +2211,7 @@ spl_kmem_reap(void)
sc.nr_to_scan = KMC_REAP_CHUNK; sc.nr_to_scan = KMC_REAP_CHUNK;
sc.gfp_mask = GFP_KERNEL; sc.gfp_mask = GFP_KERNEL;
__spl_kmem_cache_generic_shrinker(NULL, &sc); (void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
} }
EXPORT_SYMBOL(spl_kmem_reap); EXPORT_SYMBOL(spl_kmem_reap);

View File

@ -44,11 +44,13 @@ SPL_SHRINKER_DECLARE(splat_linux_shrinker, splat_linux_shrinker_fn, 1);
static unsigned long splat_linux_shrinker_size = 0; static unsigned long splat_linux_shrinker_size = 0;
static struct file *splat_linux_shrinker_file = NULL; static struct file *splat_linux_shrinker_file = NULL;
static int static spl_shrinker_t
__splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc) __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
{ {
static int failsafe = 0; static int failsafe = 0;
static unsigned long last_splat_linux_shrinker_size = 0; static unsigned long last_splat_linux_shrinker_size = 0;
unsigned long size;
spl_shrinker_t count;
/* /*
* shrinker_size can only decrease or stay the same between callbacks * shrinker_size can only decrease or stay the same between callbacks
@ -61,13 +63,21 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
last_splat_linux_shrinker_size = splat_linux_shrinker_size; last_splat_linux_shrinker_size = splat_linux_shrinker_size;
if (sc->nr_to_scan) { if (sc->nr_to_scan) {
splat_linux_shrinker_size = splat_linux_shrinker_size - size = MIN(sc->nr_to_scan, splat_linux_shrinker_size);
MIN(sc->nr_to_scan, splat_linux_shrinker_size); splat_linux_shrinker_size -= size;
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME, splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Reclaimed %lu objects, size now %lu\n", "Reclaimed %lu objects, size now %lu\n",
sc->nr_to_scan, splat_linux_shrinker_size); size, splat_linux_shrinker_size);
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
count = size;
#else
count = splat_linux_shrinker_size;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else { } else {
count = splat_linux_shrinker_size;
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME, splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Cache size is %lu\n", splat_linux_shrinker_size); "Cache size is %lu\n", splat_linux_shrinker_size);
} }
@ -77,7 +87,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME, splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Far more calls than expected (%d), size now %lu\n", "Far more calls than expected (%d), size now %lu\n",
failsafe, splat_linux_shrinker_size); failsafe, splat_linux_shrinker_size);
return -1; return (SHRINK_STOP);
} else { } else {
/* /*
* We only increment failsafe if it doesn't trigger. This * We only increment failsafe if it doesn't trigger. This
@ -89,7 +99,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
/* Shrinker has run, so signal back to test. */ /* Shrinker has run, so signal back to test. */
wake_up(&shrinker_wait); wake_up(&shrinker_wait);
return (int)splat_linux_shrinker_size; return (count);
} }
SPL_SHRINKER_CALLBACK_WRAPPER(splat_linux_shrinker_fn); SPL_SHRINKER_CALLBACK_WRAPPER(splat_linux_shrinker_fn);