mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 01:51:00 +03:00
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share its code structure with any other operating systems. In particular, the ARC and kmem code need not be similar to the code in illumos, because we won't be syncing this OS-specific code between operating systems. For example, if/when illumos support is added to the common repo, we would add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of this code. Therefore, we can simplify the code in the OS-specific ARC and kmem routines. These changes do not impact system behavior, they are purely code cleanup. The changes are: Arenas are not used on Linux or FreeBSD (they are always `NULL`), so `heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along with code that uses them. In `arc_available_memory()`: * `desfree` is unused, remove it * rename `freemem` to avoid conflict with pre-existing `#define` * remove checks related to arenas * use units of bytes, rather than converting from bytes to pages and then back to bytes `SPL_KMEM_CACHE_REAP` is unused, remove it. `skc_reap` is unused, remove it. The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove it. `vmem_size()` and associated type and macros are unused, remove them. In `arc_memory_throttle()`, use a less confusing variable name to store the result of `arc_free_memory()`. Reviewed-by: George Wilson <gwilson@delphix.com> Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Ryan Moeller <ryan@ixsystems.com> Signed-off-by: Matthew Ahrens <mahrens@delphix.com> Closes #10499
This commit is contained in:
parent
94a2dca6a0
commit
3c42c9ed84
@ -90,13 +90,10 @@ void *calloc(size_t n, size_t s);
|
||||
#define kmem_cache_reap_now kmem_cache_reap_soon
|
||||
#define freemem vm_free_count()
|
||||
#define minfree vm_cnt.v_free_min
|
||||
#define heap_arena kernel_arena
|
||||
#define zio_arena NULL
|
||||
#define kmem_alloc(size, kmflags) zfs_kmem_alloc((size), (kmflags))
|
||||
#define kmem_zalloc(size, kmflags) \
|
||||
zfs_kmem_alloc((size), (kmflags) | M_ZERO)
|
||||
#define kmem_free(buf, size) zfs_kmem_free((buf), (size))
|
||||
#define vmem_qcache_reap(ptr) ((void)0)
|
||||
|
||||
|
||||
#endif /* _OPENSOLARIS_SYS_KMEM_H_ */
|
||||
|
@ -100,7 +100,6 @@ extern struct rw_semaphore spl_kmem_cache_sem;
|
||||
#define SKC_MAGIC 0x2c2c2c2c
|
||||
|
||||
#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
|
||||
#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
|
||||
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
|
||||
#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */
|
||||
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
|
||||
@ -185,7 +184,6 @@ typedef struct spl_kmem_cache {
|
||||
uint32_t skc_slab_objs; /* Objects per slab */
|
||||
uint32_t skc_slab_size; /* Slab size */
|
||||
uint32_t skc_delay; /* Slab reclaim interval */
|
||||
uint32_t skc_reap; /* Slab reclaim count */
|
||||
atomic_t skc_ref; /* Ref count callers */
|
||||
taskqid_t skc_taskqid; /* Slab reclaim task */
|
||||
struct list_head skc_list; /* List of caches linkage */
|
||||
@ -219,7 +217,7 @@ extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
|
||||
extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
|
||||
extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
|
||||
extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
|
||||
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count);
|
||||
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
|
||||
extern void spl_kmem_reap(void);
|
||||
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
|
||||
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
|
||||
@ -230,8 +228,7 @@ extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
|
||||
#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
|
||||
#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
|
||||
#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
|
||||
#define kmem_cache_reap_now(skc) \
|
||||
spl_kmem_cache_reap_now(skc, skc->skc_reap)
|
||||
#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
|
||||
#define kmem_reap() spl_kmem_reap()
|
||||
|
||||
/*
|
||||
|
@ -31,12 +31,6 @@
|
||||
|
||||
typedef struct vmem { } vmem_t;
|
||||
|
||||
extern vmem_t *heap_arena;
|
||||
extern vmem_t *zio_alloc_arena;
|
||||
extern vmem_t *zio_arena;
|
||||
|
||||
extern size_t vmem_size(vmem_t *vmp, int typemask);
|
||||
|
||||
/*
|
||||
* Memory allocation interfaces
|
||||
*/
|
||||
@ -97,7 +91,6 @@ extern size_t vmem_size(vmem_t *vmp, int typemask);
|
||||
#define vmem_alloc(sz, fl) spl_vmem_alloc((sz), (fl), __func__, __LINE__)
|
||||
#define vmem_zalloc(sz, fl) spl_vmem_zalloc((sz), (fl), __func__, __LINE__)
|
||||
#define vmem_free(ptr, sz) spl_vmem_free((ptr), (sz))
|
||||
#define vmem_qcache_reap(ptr) ((void)0)
|
||||
|
||||
extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line);
|
||||
extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line);
|
||||
|
@ -872,7 +872,6 @@ typedef enum free_memory_reason_t {
|
||||
#define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */
|
||||
#define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */
|
||||
|
||||
extern int arc_zio_arena_free_shift;
|
||||
extern taskq_t *arc_prune_taskq;
|
||||
extern arc_stats_t arc_stats;
|
||||
extern hrtime_t arc_growtime;
|
||||
|
@ -413,12 +413,9 @@ void procfs_list_add(procfs_list_t *procfs_list, void *p);
|
||||
#define kmem_debugging() 0
|
||||
#define kmem_cache_reap_now(_c) umem_cache_reap_now(_c);
|
||||
#define kmem_cache_set_move(_c, _cb) /* nothing */
|
||||
#define vmem_qcache_reap(_v) /* nothing */
|
||||
#define POINTER_INVALIDATE(_pp) /* nothing */
|
||||
#define POINTER_IS_VALID(_p) 0
|
||||
|
||||
extern vmem_t *zio_arena;
|
||||
|
||||
typedef umem_cache_t kmem_cache_t;
|
||||
|
||||
typedef enum kmem_cbrc {
|
||||
|
@ -52,7 +52,6 @@
|
||||
uint64_t physmem;
|
||||
char hw_serial[HW_HOSTID_LEN];
|
||||
struct utsname hw_utsname;
|
||||
vmem_t *zio_arena = NULL;
|
||||
|
||||
/* If set, all blocks read will be copied to the specified directory. */
|
||||
char *vn_dumpdir = NULL;
|
||||
|
@ -48,13 +48,6 @@
|
||||
|
||||
extern struct vfsops zfs_vfsops;
|
||||
|
||||
/* vmem_size typemask */
|
||||
#define VMEM_ALLOC 0x01
|
||||
#define VMEM_FREE 0x02
|
||||
#define VMEM_MAXFREE 0x10
|
||||
typedef size_t vmem_size_t;
|
||||
extern vmem_size_t vmem_size(vmem_t *vm, int typemask);
|
||||
|
||||
uint_t zfs_arc_free_target = 0;
|
||||
|
||||
int64_t last_free_memory;
|
||||
@ -135,25 +128,6 @@ arc_available_memory(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If zio data pages are being allocated out of a separate heap segment,
|
||||
* then enforce that the size of available vmem for this arena remains
|
||||
* above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
|
||||
*
|
||||
* Note that reducing the arc_zio_arena_free_shift keeps more virtual
|
||||
* memory (in the zio_arena) free, which can avoid memory
|
||||
* fragmentation issues.
|
||||
*/
|
||||
if (zio_arena != NULL) {
|
||||
n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
|
||||
(vmem_size(zio_arena, VMEM_ALLOC) >>
|
||||
arc_zio_arena_free_shift);
|
||||
if (n < lowest) {
|
||||
lowest = n;
|
||||
r = FMR_ZIO_ARENA;
|
||||
}
|
||||
}
|
||||
|
||||
last_free_memory = lowest;
|
||||
last_free_reason = r;
|
||||
DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r);
|
||||
|
@ -923,7 +923,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
|
||||
skc->skc_obj_size = size;
|
||||
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
|
||||
skc->skc_delay = SPL_KMEM_CACHE_DELAY;
|
||||
skc->skc_reap = SPL_KMEM_CACHE_REAP;
|
||||
atomic_set(&skc->skc_ref, 0);
|
||||
|
||||
INIT_LIST_HEAD(&skc->skc_list);
|
||||
@ -1650,8 +1649,7 @@ spl_kmem_cache_shrinker_scan(struct shrinker *shrink,
|
||||
down_read(&spl_kmem_cache_sem);
|
||||
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
|
||||
uint64_t oldalloc = skc->skc_obj_alloc;
|
||||
spl_kmem_cache_reap_now(skc,
|
||||
MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
|
||||
spl_kmem_cache_reap_now(skc);
|
||||
if (oldalloc > skc->skc_obj_alloc)
|
||||
alloc += oldalloc - skc->skc_obj_alloc;
|
||||
}
|
||||
@ -1682,7 +1680,7 @@ SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
|
||||
* effort and we do not want to thrash creating and destroying slabs.
|
||||
*/
|
||||
void
|
||||
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
|
||||
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
|
||||
{
|
||||
ASSERT(skc->skc_magic == SKC_MAGIC);
|
||||
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
|
||||
|
@ -28,51 +28,6 @@
|
||||
#include <sys/shrinker.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
vmem_t *heap_arena = NULL;
|
||||
EXPORT_SYMBOL(heap_arena);
|
||||
|
||||
vmem_t *zio_alloc_arena = NULL;
|
||||
EXPORT_SYMBOL(zio_alloc_arena);
|
||||
|
||||
vmem_t *zio_arena = NULL;
|
||||
EXPORT_SYMBOL(zio_arena);
|
||||
|
||||
#define VMEM_FLOOR_SIZE (4 * 1024 * 1024) /* 4MB floor */
|
||||
|
||||
/*
|
||||
* Return approximate virtual memory usage based on these assumptions:
|
||||
*
|
||||
* 1) The major SPL consumer of virtual memory is the kmem cache.
|
||||
* 2) Memory allocated with vmem_alloc() is short lived and can be ignored.
|
||||
* 3) Allow a 4MB floor as a generous pad given normal consumption.
|
||||
* 4) The spl_kmem_cache_sem only contends with cache create/destroy.
|
||||
*/
|
||||
size_t
|
||||
vmem_size(vmem_t *vmp, int typemask)
|
||||
{
|
||||
spl_kmem_cache_t *skc = NULL;
|
||||
size_t alloc = VMEM_FLOOR_SIZE;
|
||||
|
||||
if ((typemask & VMEM_ALLOC) && (typemask & VMEM_FREE))
|
||||
return (VMALLOC_TOTAL);
|
||||
|
||||
|
||||
down_read(&spl_kmem_cache_sem);
|
||||
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
|
||||
if (skc->skc_flags & KMC_VMEM)
|
||||
alloc += skc->skc_slab_size * skc->skc_slab_total;
|
||||
}
|
||||
up_read(&spl_kmem_cache_sem);
|
||||
|
||||
if (typemask & VMEM_ALLOC)
|
||||
return (MIN(alloc, VMALLOC_TOTAL));
|
||||
else if (typemask & VMEM_FREE)
|
||||
return (MAX(VMALLOC_TOTAL - alloc, 0));
|
||||
else
|
||||
return (0);
|
||||
}
|
||||
EXPORT_SYMBOL(vmem_size);
|
||||
|
||||
/*
|
||||
* Public vmem_alloc(), vmem_zalloc() and vmem_free() interfaces.
|
||||
*/
|
||||
|
@ -126,74 +126,18 @@ arc_available_memory(void)
|
||||
int64_t lowest = INT64_MAX;
|
||||
free_memory_reason_t r = FMR_UNKNOWN;
|
||||
int64_t n;
|
||||
#ifdef freemem
|
||||
#undef freemem
|
||||
#endif
|
||||
pgcnt_t needfree = btop(arc_need_free);
|
||||
pgcnt_t lotsfree = btop(arc_sys_free);
|
||||
pgcnt_t desfree = 0;
|
||||
pgcnt_t freemem = btop(arc_free_memory());
|
||||
|
||||
if (needfree > 0) {
|
||||
n = PAGESIZE * (-needfree);
|
||||
if (n < lowest) {
|
||||
lowest = n;
|
||||
r = FMR_NEEDFREE;
|
||||
}
|
||||
if (arc_need_free > 0) {
|
||||
lowest = -arc_need_free;
|
||||
r = FMR_NEEDFREE;
|
||||
}
|
||||
|
||||
/*
|
||||
* check that we're out of range of the pageout scanner. It starts to
|
||||
* schedule paging if freemem is less than lotsfree and needfree.
|
||||
* lotsfree is the high-water mark for pageout, and needfree is the
|
||||
* number of needed free pages. We add extra pages here to make sure
|
||||
* the scanner doesn't start up while we're freeing memory.
|
||||
*/
|
||||
n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
|
||||
n = arc_free_memory() - arc_sys_free - arc_need_free;
|
||||
if (n < lowest) {
|
||||
lowest = n;
|
||||
r = FMR_LOTSFREE;
|
||||
}
|
||||
|
||||
#if defined(_ILP32)
|
||||
/*
|
||||
* If we're on a 32-bit platform, it's possible that we'll exhaust the
|
||||
* kernel heap space before we ever run out of available physical
|
||||
* memory. Most checks of the size of the heap_area compare against
|
||||
* tune.t_minarmem, which is the minimum available real memory that we
|
||||
* can have in the system. However, this is generally fixed at 25 pages
|
||||
* which is so low that it's useless. In this comparison, we seek to
|
||||
* calculate the total heap-size, and reclaim if more than 3/4ths of the
|
||||
* heap is allocated. (Or, in the calculation, if less than 1/4th is
|
||||
* free)
|
||||
*/
|
||||
n = vmem_size(heap_arena, VMEM_FREE) -
|
||||
(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
|
||||
if (n < lowest) {
|
||||
lowest = n;
|
||||
r = FMR_HEAP_ARENA;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If zio data pages are being allocated out of a separate heap segment,
|
||||
* then enforce that the size of available vmem for this arena remains
|
||||
* above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
|
||||
*
|
||||
* Note that reducing the arc_zio_arena_free_shift keeps more virtual
|
||||
* memory (in the zio_arena) free, which can avoid memory
|
||||
* fragmentation issues.
|
||||
*/
|
||||
if (zio_arena != NULL) {
|
||||
n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
|
||||
(vmem_size(zio_arena, VMEM_ALLOC) >>
|
||||
arc_zio_arena_free_shift);
|
||||
if (n < lowest) {
|
||||
lowest = n;
|
||||
r = FMR_ZIO_ARENA;
|
||||
}
|
||||
}
|
||||
|
||||
last_free_memory = lowest;
|
||||
last_free_reason = r;
|
||||
|
||||
@ -317,14 +261,9 @@ SPL_SHRINKER_DECLARE(arc_shrinker,
|
||||
int
|
||||
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
{
|
||||
uint64_t available_memory = arc_free_memory();
|
||||
uint64_t free_memory = arc_free_memory();
|
||||
|
||||
#if defined(_ILP32)
|
||||
available_memory =
|
||||
MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
|
||||
#endif
|
||||
|
||||
if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
|
||||
if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
|
||||
return (0);
|
||||
|
||||
if (txg > spa->spa_lowmem_last_txg) {
|
||||
@ -338,7 +277,7 @@ arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
*/
|
||||
if (current_is_kswapd()) {
|
||||
if (spa->spa_lowmem_page_load >
|
||||
MAX(arc_sys_free / 4, available_memory) / 4) {
|
||||
MAX(arc_sys_free / 4, free_memory) / 4) {
|
||||
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
|
||||
return (SET_ERROR(ERESTART));
|
||||
}
|
||||
|
@ -390,11 +390,6 @@ static boolean_t arc_initialized;
|
||||
*/
|
||||
boolean_t arc_warm;
|
||||
|
||||
/*
|
||||
* log2 fraction of the zio arena to keep free.
|
||||
*/
|
||||
int arc_zio_arena_free_shift = 2;
|
||||
|
||||
/*
|
||||
* These tunables are for performance analysis.
|
||||
*/
|
||||
@ -4687,14 +4682,6 @@ arc_kmem_reap_soon(void)
|
||||
kmem_cache_reap_now(hdr_l2only_cache);
|
||||
kmem_cache_reap_now(zfs_btree_leaf_cache);
|
||||
abd_cache_reap_now();
|
||||
|
||||
if (zio_arena != NULL) {
|
||||
/*
|
||||
* Ask the vmem arena to reclaim unused memory from its
|
||||
* quantum caches.
|
||||
*/
|
||||
vmem_qcache_reap(zio_arena);
|
||||
}
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
|
Loading…
Reference in New Issue
Block a user