Add kmem_cache flag for forcing kvmalloc

This adds a new KMC_KVMEM flag was added to enforce use of the
kvmalloc allocator in kmem_cache_create even for large blocks, which
may also increase performance in some specific cases (e.g. zstd), too.

Default to KVMEM instead of VMEM in spl_kmem_cache_create.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Signed-off-by: Michael Niewöhner <foss@mniewoehner.de>
Closes #9034
This commit is contained in:
Michael Niewöhner 2019-07-21 19:34:10 +02:00 committed by Brian Behlendorf
parent 66955885e2
commit 6d948c3519
5 changed files with 43 additions and 11 deletions

View File

@ -43,8 +43,9 @@ typedef enum kmc_bit {
KMC_BIT_QCACHE = 4, /* XXX: Unsupported */
KMC_BIT_KMEM = 5, /* Use kmem cache */
KMC_BIT_VMEM = 6, /* Use vmem cache */
KMC_BIT_SLAB = 7, /* Use Linux slab cache */
KMC_BIT_OFFSLAB = 8, /* Objects not on slab */
KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
KMC_BIT_SLAB = 8, /* Use Linux slab cache */
KMC_BIT_OFFSLAB = 9, /* Objects not on slab */
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
KMC_BIT_GROWING = 15, /* Growing in progress */
KMC_BIT_REAPING = 16, /* Reaping in progress */
@ -70,6 +71,7 @@ typedef enum kmem_cbrc {
#define KMC_QCACHE (1 << KMC_BIT_QCACHE)
#define KMC_KMEM (1 << KMC_BIT_KMEM)
#define KMC_VMEM (1 << KMC_BIT_VMEM)
#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
#define KMC_SLAB (1 << KMC_BIT_SLAB)
#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)

View File

@ -406,6 +406,7 @@ void procfs_list_add(procfs_list_t *procfs_list, void *p);
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KMEM 0x0
#define KMC_VMEM 0x0
#define KMC_KVMEM 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)

View File

@ -202,6 +202,8 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
if (skc->skc_flags & KMC_KMEM) {
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
} else if (skc->skc_flags & KMC_KVMEM) {
ptr = spl_kvmalloc(size, lflags);
} else {
/*
* GFP_KERNEL allocations can safely use kvmalloc which may
@ -890,6 +892,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
* flags
* KMC_KMEM Force SPL kmem backed cache
* KMC_VMEM Force SPL vmem backed cache
* KMC_KVMEM Force kvmem backed cache
* KMC_SLAB Force Linux slab backed cache
* KMC_OFFSLAB Locate objects off the slab
* KMC_NOTOUCH Disable cache object aging (unsupported)
@ -977,8 +980,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
@ -996,16 +998,16 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
} else {
/*
* All other objects are considered large and are
* placed on vmem backed slabs.
* placed on kvmem backed slabs.
*/
skc->skc_flags |= KMC_VMEM;
skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
@ -1089,7 +1091,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
@ -1111,7 +1113,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
if (skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_KVMEM)) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
@ -1267,7 +1269,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
* However, this can't be applied to KVM_VMEM due to a bug that
* __vmalloc() doesn't honor gfp flags in page table allocation.
*/
if (!(skc->skc_flags & KMC_VMEM)) {
if (!(skc->skc_flags & KMC_VMEM) && !(skc->skc_flags & KMC_KVMEM)) {
rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);
if (rc == 0)
return (0);

View File

@ -662,6 +662,33 @@ static struct ctl_table spl_kmem_table[] = {
.mode = 0444,
.proc_handler = &proc_doslab,
},
{
.procname = "slab_kvmem_total",
.data = (void *)(KMC_KVMEM | KMC_TOTAL),
.maxlen = sizeof (unsigned long),
.extra1 = &table_min,
.extra2 = &table_max,
.mode = 0444,
.proc_handler = &proc_doslab,
},
{
.procname = "slab_kvmem_alloc",
.data = (void *)(KMC_KVMEM | KMC_ALLOC),
.maxlen = sizeof (unsigned long),
.extra1 = &table_min,
.extra2 = &table_max,
.mode = 0444,
.proc_handler = &proc_doslab,
},
{
.procname = "slab_kvmem_max",
.data = (void *)(KMC_KVMEM | KMC_MAX),
.maxlen = sizeof (unsigned long),
.extra1 = &table_min,
.extra2 = &table_max,
.mode = 0444,
.proc_handler = &proc_doslab,
},
{},
};

View File

@ -202,7 +202,7 @@ spl_zlib_init(void)
zlib_workspace_cache = kmem_cache_create(
"spl_zlib_workspace_cache",
size, 0, NULL, NULL, NULL, NULL, NULL,
KMC_VMEM);
KMC_KVMEM);
if (!zlib_workspace_cache)
return (1);