Use spl_fstrans_mark instead of memalloc_noio_save

For earlier versions of the kernel with memalloc_noio_save, it only turns
off __GFP_IO but leaves __GFP_FS untouched during direct reclaim. This
would cause threads to direct reclaim into ZFS and cause deadlock.

Instead, we should stick to using spl_fstrans_mark. Since we would
explicitly turn off both __GFP_IO and __GFP_FS before allocation, it
will work on every version of the kernel.

This impacts kernel versions 3.9-3.17, see upstream kernel commit
torvalds/linux@934f307 for reference.

Signed-off-by: Chunwei Chen <david.chen@osnexus.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Closes #515
Issue zfsonlinux/zfs#4111
This commit is contained in:
Chunwei Chen 2015-12-17 18:31:58 -08:00 committed by Brian Behlendorf
parent 200366f23f
commit b4ad50ac5f
5 changed files with 5 additions and 38 deletions

View File

@ -36,7 +36,6 @@ extern vmem_t *zio_alloc_arena;
extern vmem_t *zio_arena;
extern size_t vmem_size(vmem_t *vmp, int typemask);
extern void *spl_vmalloc(unsigned long size, gfp_t lflags, pgprot_t prot);
/*
* Memory allocation interfaces

View File

@ -201,7 +201,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
} else {
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
}
/* Resulting allocated memory will be page aligned */
@ -1156,15 +1156,10 @@ spl_cache_grow_work(void *data)
spl_kmem_cache_t *skc = ska->ska_cache;
spl_kmem_slab_t *sks;
#if defined(PF_MEMALLOC_NOIO)
unsigned noio_flag = memalloc_noio_save();
sks = spl_slab_alloc(skc, ska->ska_flags);
memalloc_noio_restore(noio_flag);
#else
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, ska->ska_flags);
spl_fstrans_unmark(cookie);
#endif
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;

View File

@ -185,7 +185,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node)
*/
if ((size > spl_kmem_alloc_max) || use_vmem) {
if (flags & KM_VMEM) {
ptr = spl_vmalloc(size, lflags, PAGE_KERNEL);
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
} else {
return (NULL);
}
@ -198,7 +198,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node)
/*
* For vmem_alloc() and vmem_zalloc() callers retry immediately
* using spl_vmalloc() which is unlikely to fail.
* using __vmalloc() which is unlikely to fail.
*/
if ((flags & KM_VMEM) && (use_vmem == 0)) {
use_vmem = 1;

View File

@ -849,9 +849,7 @@ taskq_thread(void *args)
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
#if defined(PF_MEMALLOC_NOIO)
(void) memalloc_noio_save();
#endif
(void) spl_fstrans_mark();
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);

View File

@ -97,31 +97,6 @@ spl_vmem_free(const void *buf, size_t size)
}
EXPORT_SYMBOL(spl_vmem_free);
/*
* Public vmalloc() interface designed to be safe to be called during I/O.
*/
void *
spl_vmalloc(unsigned long size, gfp_t lflags, pgprot_t prot)
{
#if defined(PF_MEMALLOC_NOIO)
void *ptr;
unsigned noio_flag = 0;
if (spl_fstrans_check())
noio_flag = memalloc_noio_save();
ptr = __vmalloc(size, lflags, prot);
if (spl_fstrans_check())
memalloc_noio_restore(noio_flag);
return (ptr);
#else
return (__vmalloc(size, lflags, prot));
#endif
}
EXPORT_SYMBOL(spl_vmalloc);
int
spl_vmem_init(void)
{