2019-10-18 20:23:19 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
|
|
* Copyright (c) 2018, Joyent, Inc.
|
|
|
|
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
|
|
|
|
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
|
|
|
|
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/spa_impl.h>
|
|
|
|
#include <sys/zio_compress.h>
|
|
|
|
#include <sys/zio_checksum.h>
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/arc.h>
|
|
|
|
#include <sys/refcount.h>
|
|
|
|
#include <sys/vdev.h>
|
|
|
|
#include <sys/vdev_trim.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
|
|
|
#include <sys/dsl_pool.h>
|
|
|
|
#include <sys/zio_checksum.h>
|
|
|
|
#include <sys/multilist.h>
|
|
|
|
#include <sys/abd.h>
|
|
|
|
#include <sys/zil.h>
|
|
|
|
#include <sys/fm/fs/zfs.h>
|
|
|
|
#ifdef _KERNEL
|
|
|
|
#include <sys/shrinker.h>
|
|
|
|
#include <sys/vmsystm.h>
|
|
|
|
#include <sys/zpl.h>
|
|
|
|
#include <linux/page_compat.h>
|
|
|
|
#endif
|
|
|
|
#include <sys/callb.h>
|
|
|
|
#include <sys/kstat.h>
|
|
|
|
#include <sys/zthr.h>
|
|
|
|
#include <zfs_fletcher.h>
|
|
|
|
#include <sys/arc_impl.h>
|
Enable use of DTRACE_PROBE* macros in "spl" module
This change modifies some of the infrastructure for enabling the use of
the DTRACE_PROBE* macros, such that we can use tehm in the "spl" module.
Currently, when the DTRACE_PROBE* macros are used, they get expanded to
create new functions, and these dynamically generated functions become
part of the "zfs" module.
Since the "spl" module does not depend on the "zfs" module, the use of
DTRACE_PROBE* in the "spl" module would result in undefined symbols
being used in the "spl" module. Specifically, DTRACE_PROBE* would turn
into a function call, and the function being called would be a symbol
only contained in the "zfs" module; which results in a linker and/or
runtime error.
Thus, this change adds the necessary logic to the "spl" module, to
mirror the tracing functionality available to the "zfs" module. After
this change, we'll have a "trace_zfs.h" header file which defines the
probes available only to the "zfs" module, and a "trace_spl.h" header
file which defines the probes available only to the "spl" module.
Reviewed by: Brad Lewis <brad.lewis@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Prakash Surya <prakash.surya@delphix.com>
Closes #9525
2019-10-30 21:02:41 +03:00
|
|
|
#include <sys/trace_zfs.h>
|
2019-10-18 20:23:19 +03:00
|
|
|
#include <sys/aggsum.h>
|
|
|
|
|
|
|
|
int64_t last_free_memory;
|
|
|
|
free_memory_reason_t last_free_reason;
|
|
|
|
|
2020-03-27 19:14:46 +03:00
|
|
|
/*
|
|
|
|
* Return a default max arc size based on the amount of physical memory.
|
|
|
|
*/
|
|
|
|
uint64_t
|
|
|
|
arc_default_max(uint64_t min, uint64_t allmem)
|
|
|
|
{
|
|
|
|
/* Default to 1/2 of all memory. */
|
|
|
|
return (MAX(allmem / 2, min));
|
|
|
|
}
|
|
|
|
|
2019-10-18 20:23:19 +03:00
|
|
|
#ifdef _KERNEL
|
|
|
|
/*
|
|
|
|
* Return maximum amount of memory that we could possibly use. Reduced
|
|
|
|
* to half of all memory in user space which is primarily used for testing.
|
|
|
|
*/
|
|
|
|
uint64_t
|
|
|
|
arc_all_memory(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
return (ptob(zfs_totalram_pages - zfs_totalhigh_pages));
|
|
|
|
#else
|
|
|
|
return (ptob(zfs_totalram_pages));
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the amount of memory that is considered free. In user space
|
|
|
|
* which is primarily used for testing we pretend that free memory ranges
|
|
|
|
* from 0-20% of all memory.
|
|
|
|
*/
|
|
|
|
uint64_t
|
|
|
|
arc_free_memory(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
struct sysinfo si;
|
|
|
|
si_meminfo(&si);
|
|
|
|
return (ptob(si.freeram - si.freehigh));
|
|
|
|
#else
|
|
|
|
return (ptob(nr_free_pages() +
|
|
|
|
nr_inactive_file_pages() +
|
|
|
|
nr_slab_reclaimable_pages()));
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Additional reserve of pages for pp_reserve.
|
|
|
|
*/
|
|
|
|
int64_t arc_pages_pp_reserve = 64;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Additional reserve of pages for swapfs.
|
|
|
|
*/
|
|
|
|
int64_t arc_swapfs_reserve = 64;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the amount of memory that can be consumed before reclaim will be
|
|
|
|
* needed. Positive if there is sufficient free memory, negative indicates
|
|
|
|
* the amount of memory that needs to be freed up.
|
|
|
|
*/
|
|
|
|
int64_t
|
|
|
|
arc_available_memory(void)
|
|
|
|
{
|
|
|
|
int64_t lowest = INT64_MAX;
|
|
|
|
free_memory_reason_t r = FMR_UNKNOWN;
|
|
|
|
int64_t n;
|
|
|
|
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
if (arc_need_free > 0) {
|
|
|
|
lowest = -arc_need_free;
|
|
|
|
r = FMR_NEEDFREE;
|
2019-10-18 20:23:19 +03:00
|
|
|
}
|
|
|
|
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
n = arc_free_memory() - arc_sys_free - arc_need_free;
|
2019-10-18 20:23:19 +03:00
|
|
|
if (n < lowest) {
|
|
|
|
lowest = n;
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
r = FMR_LOTSFREE;
|
2019-10-18 20:23:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
last_free_memory = lowest;
|
|
|
|
last_free_reason = r;
|
|
|
|
|
|
|
|
return (lowest);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
arc_evictable_memory(void)
|
|
|
|
{
|
|
|
|
int64_t asize = aggsum_value(&arc_size);
|
|
|
|
uint64_t arc_clean =
|
|
|
|
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
|
|
|
|
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
|
|
|
|
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
|
|
|
|
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
|
|
|
uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scale reported evictable memory in proportion to page cache, cap
|
|
|
|
* at specified min/max.
|
|
|
|
*/
|
|
|
|
uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
|
|
|
|
min = MAX(arc_c_min, MIN(arc_c_max, min));
|
|
|
|
|
|
|
|
if (arc_dirty >= min)
|
|
|
|
return (arc_clean);
|
|
|
|
|
|
|
|
return (MAX((int64_t)asize - (int64_t)min, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Revise SPL wrapper for shrinker callbacks
The SPL provides a wrapper for the kernel's shrinker callbacks, which
enables the ZFS code to interface with multiple versions of the shrinker
API's from different kernel versions. Specifically, Linux kernels 3.0 -
3.11 has a single "combined" callback, and Linux kernels 3.12 and later
have two "split" callbacks. The SPL provides a wrapper function so that
the ZFS code only needs to implement one version of the callbacks.
Currently the SPL's wrappers are designed such that the ZFS code
implements the older, "combined" callback. There are a few downsides to
this approach:
* The general design within ZFS is for the latest Linux kernel to be
considered the "first class" API.
* The newer, "split" callback API is easier to understand, because each
callback has one purpose.
* The current wrappers do not completely abstract out the differing
API's, so ZFS code needs `#ifdef` code to handle the differing return
values required for different kernel versions.
This commit addresses these drawbacks by having the ZFS code provide the
latest, "split" callbacks, and the SPL provides a wrapping function for
the older, "combined" API.
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10502
2020-06-27 20:27:02 +03:00
|
|
|
* The _count() function returns the number of free-able objects.
|
|
|
|
* The _scan() function returns the number of objects that were freed.
|
2019-10-18 20:23:19 +03:00
|
|
|
*/
|
Revise SPL wrapper for shrinker callbacks
The SPL provides a wrapper for the kernel's shrinker callbacks, which
enables the ZFS code to interface with multiple versions of the shrinker
API's from different kernel versions. Specifically, Linux kernels 3.0 -
3.11 has a single "combined" callback, and Linux kernels 3.12 and later
have two "split" callbacks. The SPL provides a wrapper function so that
the ZFS code only needs to implement one version of the callbacks.
Currently the SPL's wrappers are designed such that the ZFS code
implements the older, "combined" callback. There are a few downsides to
this approach:
* The general design within ZFS is for the latest Linux kernel to be
considered the "first class" API.
* The newer, "split" callback API is easier to understand, because each
callback has one purpose.
* The current wrappers do not completely abstract out the differing
API's, so ZFS code needs `#ifdef` code to handle the differing return
values required for different kernel versions.
This commit addresses these drawbacks by having the ZFS code provide the
latest, "split" callbacks, and the SPL provides a wrapping function for
the older, "combined" API.
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10502
2020-06-27 20:27:02 +03:00
|
|
|
static unsigned long
|
|
|
|
arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
return (btop((int64_t)arc_evictable_memory()));
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long
|
|
|
|
arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
|
2019-10-18 20:23:19 +03:00
|
|
|
{
|
|
|
|
int64_t pages;
|
|
|
|
|
|
|
|
/* The arc is considered warm once reclaim has occurred */
|
|
|
|
if (unlikely(arc_warm == B_FALSE))
|
|
|
|
arc_warm = B_TRUE;
|
|
|
|
|
|
|
|
/* Return the potential number of reclaimable pages */
|
|
|
|
pages = btop((int64_t)arc_evictable_memory());
|
|
|
|
|
|
|
|
/* Not allowed to perform filesystem reclaim */
|
|
|
|
if (!(sc->gfp_mask & __GFP_FS))
|
|
|
|
return (SHRINK_STOP);
|
|
|
|
|
|
|
|
/* Reclaim in progress */
|
|
|
|
if (mutex_tryenter(&arc_adjust_lock) == 0) {
|
|
|
|
ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_exit(&arc_adjust_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Evict the requested number of pages by shrinking arc_c the
|
|
|
|
* requested amount.
|
|
|
|
*/
|
|
|
|
if (pages > 0) {
|
|
|
|
arc_reduce_target_size(ptob(sc->nr_to_scan));
|
ARC shrinking blocks reads/writes
ZFS registers a memory hook, `__arc_shrinker_func`, which is supposed to
allow the ARC to shrink when the kernel experiences memory pressure.
The ARC shrinker changes `arc_c` via a call to
`arc_reduce_target_size()`. Before commit 3ec34e55271d433e3c, the ARC
shrinker would also evict data from the ARC to bring `arc_size` down to
the new `arc_c`. However, that commit (seemingly inadvertently) made it
so that the ARC shrinker no longer evicts any data or waits for eviction
to complete.
Repeated calls to the ARC shrinker can reduce `arc_c` drastically, often
all the way to `arc_c_min`. Since it doesn't wait for the actual
eviction of data from the ARC, this creates a situation where `arc_size`
is more than `arc_c` for the several seconds/minutes it takes for
`arc_adjust_zthr` to evict data from the ARC. During this time,
arc_get_data_impl() will block, so ZFS can't process read/write requests
(e.g. from iSCSI, NFS, or read/write syscalls).
To ensure that `arc_c` doesn't shrink faster than the adjust thread can
keep up, this commit makes the ARC shrinker wait for the eviction to
complete, resulting in similar behavior to what we had before commit
3ec34e55271d433e3c.
Note: commit 3ec34e55271d433e3c is `OpenZFS 9284 - arc_reclaim_thread
has 2 jobs` and was integrated in December 2018, and is part of ZoL
0.8.x but not 0.7.x.
Additionally, when the ARC size is reduced drastically, the
`arc_adjust_zthr` can be on-CPU for many seconds without blocking. Any
threads that are bound to the same CPU that arc_adjust_zthr is running
on will not able to run for a long time.
To ensure that CPU-bound threads can make progress, this commit changes
`arc_evict_state_impl()` make a voluntary preemption call,
`cond_resched()`.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-70703
Closes #10496
2020-06-26 20:42:27 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Repeated calls to the arc shrinker can reduce arc_c
|
|
|
|
* drastically, potentially all the way to arc_c_min. While
|
|
|
|
* arc_c is below arc_size, ZFS can't process read/write
|
|
|
|
* requests, because arc_get_data_impl() will block. To
|
|
|
|
* ensure that arc_c doesn't shrink faster than the adjust
|
|
|
|
* thread can keep up, we wait for eviction here.
|
|
|
|
*/
|
|
|
|
mutex_enter(&arc_adjust_lock);
|
|
|
|
if (arc_is_overflowing()) {
|
|
|
|
arc_adjust_needed = B_TRUE;
|
|
|
|
zthr_wakeup(arc_adjust_zthr);
|
|
|
|
(void) cv_wait(&arc_adjust_waiters_cv,
|
|
|
|
&arc_adjust_lock);
|
|
|
|
}
|
|
|
|
mutex_exit(&arc_adjust_lock);
|
|
|
|
|
2019-10-18 20:23:19 +03:00
|
|
|
if (current_is_kswapd())
|
|
|
|
arc_kmem_reap_soon();
|
|
|
|
pages = MAX((int64_t)pages -
|
|
|
|
(int64_t)btop(arc_evictable_memory()), 0);
|
|
|
|
/*
|
|
|
|
* We've shrunk what we can, wake up threads.
|
|
|
|
*/
|
|
|
|
cv_broadcast(&arc_adjust_waiters_cv);
|
|
|
|
} else
|
|
|
|
pages = SHRINK_STOP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When direct reclaim is observed it usually indicates a rapid
|
|
|
|
* increase in memory pressure. This occurs because the kswapd
|
|
|
|
* threads were unable to asynchronously keep enough free memory
|
|
|
|
* available. In this case set arc_no_grow to briefly pause arc
|
|
|
|
* growth to avoid compounding the memory pressure.
|
|
|
|
*/
|
|
|
|
if (current_is_kswapd()) {
|
|
|
|
ARCSTAT_BUMP(arcstat_memory_indirect_count);
|
|
|
|
} else {
|
|
|
|
arc_no_grow = B_TRUE;
|
|
|
|
arc_kmem_reap_soon();
|
|
|
|
ARCSTAT_BUMP(arcstat_memory_direct_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (pages);
|
|
|
|
}
|
|
|
|
|
Revise SPL wrapper for shrinker callbacks
The SPL provides a wrapper for the kernel's shrinker callbacks, which
enables the ZFS code to interface with multiple versions of the shrinker
API's from different kernel versions. Specifically, Linux kernels 3.0 -
3.11 has a single "combined" callback, and Linux kernels 3.12 and later
have two "split" callbacks. The SPL provides a wrapper function so that
the ZFS code only needs to implement one version of the callbacks.
Currently the SPL's wrappers are designed such that the ZFS code
implements the older, "combined" callback. There are a few downsides to
this approach:
* The general design within ZFS is for the latest Linux kernel to be
considered the "first class" API.
* The newer, "split" callback API is easier to understand, because each
callback has one purpose.
* The current wrappers do not completely abstract out the differing
API's, so ZFS code needs `#ifdef` code to handle the differing return
values required for different kernel versions.
This commit addresses these drawbacks by having the ZFS code provide the
latest, "split" callbacks, and the SPL provides a wrapping function for
the older, "combined" API.
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10502
2020-06-27 20:27:02 +03:00
|
|
|
SPL_SHRINKER_DECLARE(arc_shrinker,
|
|
|
|
arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
|
2019-10-18 20:23:19 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
|
|
|
|
{
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
uint64_t free_memory = arc_free_memory();
|
2019-10-18 20:23:19 +03:00
|
|
|
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
|
2019-10-18 20:23:19 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
if (txg > spa->spa_lowmem_last_txg) {
|
|
|
|
spa->spa_lowmem_last_txg = txg;
|
|
|
|
spa->spa_lowmem_page_load = 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are in pageout, we know that memory is already tight,
|
|
|
|
* the arc is already going to be evicting, so we just want to
|
|
|
|
* continue to let page writes occur as quickly as possible.
|
|
|
|
*/
|
|
|
|
if (current_is_kswapd()) {
|
|
|
|
if (spa->spa_lowmem_page_load >
|
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share
its code structure with any other operating systems. In particular, the
ARC and kmem code need not be similar to the code in illumos, because we
won't be syncing this OS-specific code between operating systems. For
example, if/when illumos support is added to the common repo, we would
add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of
this code.
Therefore, we can simplify the code in the OS-specific ARC and kmem
routines.
These changes do not impact system behavior, they are purely code
cleanup. The changes are:
Arenas are not used on Linux or FreeBSD (they are always `NULL`), so
`heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along
with code that uses them.
In `arc_available_memory()`:
* `desfree` is unused, remove it
* rename `freemem` to avoid conflict with pre-existing `#define`
* remove checks related to arenas
* use units of bytes, rather than converting from bytes to pages and
then back to bytes
`SPL_KMEM_CACHE_REAP` is unused, remove it.
`skc_reap` is unused, remove it.
The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove
it.
`vmem_size()` and associated type and macros are unused, remove them.
In `arc_memory_throttle()`, use a less confusing variable name to store
the result of `arc_free_memory()`.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10499
2020-06-29 19:01:07 +03:00
|
|
|
MAX(arc_sys_free / 4, free_memory) / 4) {
|
2019-10-18 20:23:19 +03:00
|
|
|
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
|
|
|
|
return (SET_ERROR(ERESTART));
|
|
|
|
}
|
|
|
|
/* Note: reserve is inflated, so we deflate */
|
|
|
|
atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
|
|
|
|
return (0);
|
|
|
|
} else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
|
|
|
|
/* memory is low, delay before restarting */
|
|
|
|
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
|
|
|
|
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
|
|
|
|
return (SET_ERROR(EAGAIN));
|
|
|
|
}
|
|
|
|
spa->spa_lowmem_page_load = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arc_lowmem_init(void)
|
|
|
|
{
|
|
|
|
uint64_t allmem = arc_all_memory();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a shrinker to support synchronous (direct) memory
|
|
|
|
* reclaim from the arc. This is done to prevent kswapd from
|
|
|
|
* swapping out pages when it is preferable to shrink the arc.
|
|
|
|
*/
|
|
|
|
spl_register_shrinker(&arc_shrinker);
|
|
|
|
|
|
|
|
/* Set to 1/64 of all memory or a minimum of 512K */
|
|
|
|
arc_sys_free = MAX(allmem / 64, (512 * 1024));
|
|
|
|
arc_need_free = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arc_lowmem_fini(void)
|
|
|
|
{
|
|
|
|
spl_unregister_shrinker(&arc_shrinker);
|
|
|
|
}
|
2019-10-27 01:22:19 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
param_set_arc_long(const char *buf, zfs_kernel_param_t *kp)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = param_set_long(buf, kp);
|
|
|
|
if (error < 0)
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
|
2020-04-10 01:39:48 +03:00
|
|
|
arc_tuning_update(B_TRUE);
|
2019-10-27 01:22:19 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = param_set_int(buf, kp);
|
|
|
|
if (error < 0)
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
|
2020-04-10 01:39:48 +03:00
|
|
|
arc_tuning_update(B_TRUE);
|
2019-10-27 01:22:19 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2019-10-18 20:23:19 +03:00
|
|
|
#else /* _KERNEL */
|
|
|
|
int64_t
|
|
|
|
arc_available_memory(void)
|
|
|
|
{
|
|
|
|
int64_t lowest = INT64_MAX;
|
|
|
|
free_memory_reason_t r = FMR_UNKNOWN;
|
|
|
|
|
|
|
|
/* Every 100 calls, free a small amount */
|
|
|
|
if (spa_get_random(100) == 0)
|
|
|
|
lowest = -1024;
|
|
|
|
|
|
|
|
last_free_memory = lowest;
|
|
|
|
last_free_reason = r;
|
|
|
|
|
|
|
|
return (lowest);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
|
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
arc_all_memory(void)
|
|
|
|
{
|
|
|
|
return (ptob(physmem) / 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
arc_free_memory(void)
|
|
|
|
{
|
|
|
|
return (spa_get_random(arc_all_memory() * 20 / 100));
|
|
|
|
}
|
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper function for arc_prune_async() it is responsible for safely
|
|
|
|
* handling the execution of a registered arc_prune_func_t.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
arc_prune_task(void *ptr)
|
|
|
|
{
|
|
|
|
arc_prune_t *ap = (arc_prune_t *)ptr;
|
|
|
|
arc_prune_func_t *func = ap->p_pfunc;
|
|
|
|
|
|
|
|
if (func != NULL)
|
|
|
|
func(ap->p_adjust, ap->p_private);
|
|
|
|
|
|
|
|
zfs_refcount_remove(&ap->p_refcnt, func);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify registered consumers they must drop holds on a portion of the ARC
|
|
|
|
* buffered they reference. This provides a mechanism to ensure the ARC can
|
|
|
|
* honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
|
|
|
|
* is analogous to dnlc_reduce_cache() but more generic.
|
|
|
|
*
|
|
|
|
* This operation is performed asynchronously so it may be safely called
|
|
|
|
* in the context of the arc_reclaim_thread(). A reference is taken here
|
|
|
|
* for each registered arc_prune_t and the arc_prune_task() is responsible
|
|
|
|
* for releasing it once the registered arc_prune_func_t has completed.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
arc_prune_async(int64_t adjust)
|
|
|
|
{
|
|
|
|
arc_prune_t *ap;
|
|
|
|
|
|
|
|
mutex_enter(&arc_prune_mtx);
|
|
|
|
for (ap = list_head(&arc_prune_list); ap != NULL;
|
|
|
|
ap = list_next(&arc_prune_list, ap)) {
|
|
|
|
|
|
|
|
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
|
|
|
|
ap->p_adjust = adjust;
|
|
|
|
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
|
|
|
|
ap, TQ_SLEEP) == TASKQID_INVALID) {
|
|
|
|
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ARCSTAT_BUMP(arcstat_prune);
|
|
|
|
}
|
|
|
|
mutex_exit(&arc_prune_mtx);
|
|
|
|
}
|