mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 18:11:00 +03:00
Use spl_fstrans_mark instead of memalloc_noio_save
For earlier versions of the kernel with memalloc_noio_save, it only turns off __GFP_IO but leaves __GFP_FS untouched during direct reclaim. This would cause threads to direct reclaim into ZFS and cause deadlock. Instead, we should stick to using spl_fstrans_mark. Since we would explicitly turn off both __GFP_IO and __GFP_FS before allocation, it will work on every version of the kernel. This impacts kernel versions 3.9-3.17, see upstream kernel commit torvalds/linux@934f307 for reference. Signed-off-by: Chunwei Chen <david.chen@osnexus.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #515 Issue zfsonlinux/zfs#4111
This commit is contained in:
parent
200366f23f
commit
b4ad50ac5f
@ -36,7 +36,6 @@ extern vmem_t *zio_alloc_arena;
|
|||||||
extern vmem_t *zio_arena;
|
extern vmem_t *zio_arena;
|
||||||
|
|
||||||
extern size_t vmem_size(vmem_t *vmp, int typemask);
|
extern size_t vmem_size(vmem_t *vmp, int typemask);
|
||||||
extern void *spl_vmalloc(unsigned long size, gfp_t lflags, pgprot_t prot);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory allocation interfaces
|
* Memory allocation interfaces
|
||||||
|
@ -201,7 +201,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
|
|||||||
ASSERT(ISP2(size));
|
ASSERT(ISP2(size));
|
||||||
ptr = (void *)__get_free_pages(lflags, get_order(size));
|
ptr = (void *)__get_free_pages(lflags, get_order(size));
|
||||||
} else {
|
} else {
|
||||||
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
|
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Resulting allocated memory will be page aligned */
|
/* Resulting allocated memory will be page aligned */
|
||||||
@ -1156,15 +1156,10 @@ spl_cache_grow_work(void *data)
|
|||||||
spl_kmem_cache_t *skc = ska->ska_cache;
|
spl_kmem_cache_t *skc = ska->ska_cache;
|
||||||
spl_kmem_slab_t *sks;
|
spl_kmem_slab_t *sks;
|
||||||
|
|
||||||
#if defined(PF_MEMALLOC_NOIO)
|
|
||||||
unsigned noio_flag = memalloc_noio_save();
|
|
||||||
sks = spl_slab_alloc(skc, ska->ska_flags);
|
|
||||||
memalloc_noio_restore(noio_flag);
|
|
||||||
#else
|
|
||||||
fstrans_cookie_t cookie = spl_fstrans_mark();
|
fstrans_cookie_t cookie = spl_fstrans_mark();
|
||||||
sks = spl_slab_alloc(skc, ska->ska_flags);
|
sks = spl_slab_alloc(skc, ska->ska_flags);
|
||||||
spl_fstrans_unmark(cookie);
|
spl_fstrans_unmark(cookie);
|
||||||
#endif
|
|
||||||
spin_lock(&skc->skc_lock);
|
spin_lock(&skc->skc_lock);
|
||||||
if (sks) {
|
if (sks) {
|
||||||
skc->skc_slab_total++;
|
skc->skc_slab_total++;
|
||||||
|
@ -185,7 +185,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node)
|
|||||||
*/
|
*/
|
||||||
if ((size > spl_kmem_alloc_max) || use_vmem) {
|
if ((size > spl_kmem_alloc_max) || use_vmem) {
|
||||||
if (flags & KM_VMEM) {
|
if (flags & KM_VMEM) {
|
||||||
ptr = spl_vmalloc(size, lflags, PAGE_KERNEL);
|
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
|
||||||
} else {
|
} else {
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* For vmem_alloc() and vmem_zalloc() callers retry immediately
|
* For vmem_alloc() and vmem_zalloc() callers retry immediately
|
||||||
* using spl_vmalloc() which is unlikely to fail.
|
* using __vmalloc() which is unlikely to fail.
|
||||||
*/
|
*/
|
||||||
if ((flags & KM_VMEM) && (use_vmem == 0)) {
|
if ((flags & KM_VMEM) && (use_vmem == 0)) {
|
||||||
use_vmem = 1;
|
use_vmem = 1;
|
||||||
|
@ -849,9 +849,7 @@ taskq_thread(void *args)
|
|||||||
tq = tqt->tqt_tq;
|
tq = tqt->tqt_tq;
|
||||||
current->flags |= PF_NOFREEZE;
|
current->flags |= PF_NOFREEZE;
|
||||||
|
|
||||||
#if defined(PF_MEMALLOC_NOIO)
|
(void) spl_fstrans_mark();
|
||||||
(void) memalloc_noio_save();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
sigfillset(&blocked);
|
sigfillset(&blocked);
|
||||||
sigprocmask(SIG_BLOCK, &blocked, NULL);
|
sigprocmask(SIG_BLOCK, &blocked, NULL);
|
||||||
|
@ -97,31 +97,6 @@ spl_vmem_free(const void *buf, size_t size)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(spl_vmem_free);
|
EXPORT_SYMBOL(spl_vmem_free);
|
||||||
|
|
||||||
/*
|
|
||||||
* Public vmalloc() interface designed to be safe to be called during I/O.
|
|
||||||
*/
|
|
||||||
void *
|
|
||||||
spl_vmalloc(unsigned long size, gfp_t lflags, pgprot_t prot)
|
|
||||||
{
|
|
||||||
#if defined(PF_MEMALLOC_NOIO)
|
|
||||||
void *ptr;
|
|
||||||
unsigned noio_flag = 0;
|
|
||||||
|
|
||||||
if (spl_fstrans_check())
|
|
||||||
noio_flag = memalloc_noio_save();
|
|
||||||
|
|
||||||
ptr = __vmalloc(size, lflags, prot);
|
|
||||||
|
|
||||||
if (spl_fstrans_check())
|
|
||||||
memalloc_noio_restore(noio_flag);
|
|
||||||
|
|
||||||
return (ptr);
|
|
||||||
#else
|
|
||||||
return (__vmalloc(size, lflags, prot));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(spl_vmalloc);
|
|
||||||
|
|
||||||
int
|
int
|
||||||
spl_vmem_init(void)
|
spl_vmem_init(void)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user