linux/kmem: remove HAVE_ATOMIC64_T and kmem_alloc_used wrappers

Seems like we haven't set it since the SPL was pulled into the main ZFS
tree. In removing the define, I've taken the 64-bit version (ie the one
that _hasn't_ been running since back then) because it looks like its
closer to the intended width by the way its used.

Since the macros ar eno longer needed as a selector, pull those too.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Sponsored-by: https://despairlabs.com/sponsor/
Closes #17551
This commit is contained in:
Rob Norris 2025-07-18 15:18:33 +10:00 committed by Brian Behlendorf
parent 1c483cf3d0
commit 9292071565
3 changed files with 11 additions and 36 deletions

View File

@ -127,21 +127,8 @@ spl_fstrans_check(void)
return (current->flags & SPL_FSTRANS); return (current->flags & SPL_FSTRANS);
} }
#ifdef HAVE_ATOMIC64_T
#define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
#define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
#define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
#define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
extern atomic64_t kmem_alloc_used; extern atomic64_t kmem_alloc_used;
extern unsigned long long kmem_alloc_max; extern uint64_t kmem_alloc_max;
#else /* HAVE_ATOMIC64_T */
#define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
#define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
#define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
#define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
extern atomic_t kmem_alloc_used;
extern unsigned long long kmem_alloc_max;
#endif /* HAVE_ATOMIC64_T */
extern unsigned int spl_kmem_alloc_warn; extern unsigned int spl_kmem_alloc_warn;
extern unsigned int spl_kmem_alloc_max; extern unsigned int spl_kmem_alloc_max;

View File

@ -302,13 +302,8 @@ spl_kmem_free_impl(const void *buf, size_t size)
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
/* Shim layer memory accounting */ /* Shim layer memory accounting */
#ifdef HAVE_ATOMIC64_T
atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
unsigned long long kmem_alloc_max = 0; uint64_t kmem_alloc_max = 0;
#else /* HAVE_ATOMIC64_T */
atomic_t kmem_alloc_used = ATOMIC_INIT(0);
unsigned long long kmem_alloc_max = 0;
#endif /* HAVE_ATOMIC64_T */
EXPORT_SYMBOL(kmem_alloc_used); EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max); EXPORT_SYMBOL(kmem_alloc_max);
@ -320,9 +315,9 @@ spl_kmem_alloc_debug(size_t size, int flags, int node)
ptr = spl_kmem_alloc_impl(size, flags, node); ptr = spl_kmem_alloc_impl(size, flags, node);
if (ptr) { if (ptr) {
kmem_alloc_used_add(size); atomic64_add(size, &kmem_alloc_used);
if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) if (unlikely(atomic64_read(&kmem_alloc_used) > kmem_alloc_max))
kmem_alloc_max = kmem_alloc_used_read(); kmem_alloc_max = atomic64_read(&kmem_alloc_used);
} }
return (ptr); return (ptr);
@ -331,7 +326,7 @@ spl_kmem_alloc_debug(size_t size, int flags, int node)
inline void inline void
spl_kmem_free_debug(const void *ptr, size_t size) spl_kmem_free_debug(const void *ptr, size_t size)
{ {
kmem_alloc_used_sub(size); atomic64_sub(size, &kmem_alloc_used);
spl_kmem_free_impl(ptr, size); spl_kmem_free_impl(ptr, size);
} }
@ -595,7 +590,7 @@ spl_kmem_init(void)
{ {
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
kmem_alloc_used_set(0); atomic64_set(&kmem_alloc_used, 0);
@ -617,9 +612,10 @@ spl_kmem_fini(void)
* at that address to aid in debugging. Performance is not * at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. * a serious concern here since it is module unload time.
*/ */
if (kmem_alloc_used_read() != 0) if (atomic64_read(&kmem_alloc_used) != 0)
printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n", printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
(unsigned long)kmem_alloc_used_read(), kmem_alloc_max); (unsigned long)atomic64_read(&kmem_alloc_used),
kmem_alloc_max);
#ifdef DEBUG_KMEM_TRACKING #ifdef DEBUG_KMEM_TRACKING
spl_kmem_fini_tracking(&kmem_list, &kmem_lock); spl_kmem_fini_tracking(&kmem_list, &kmem_lock);

View File

@ -82,11 +82,7 @@ proc_domemused(CONST_CTL_TABLE *table, int write,
if (write) { if (write) {
*ppos += *lenp; *ppos += *lenp;
} else { } else {
#ifdef HAVE_ATOMIC64_T
val = atomic64_read((atomic64_t *)table->data); val = atomic64_read((atomic64_t *)table->data);
#else
val = atomic_read((atomic_t *)table->data);
#endif /* HAVE_ATOMIC64_T */
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos); rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
} }
@ -315,18 +311,14 @@ static struct ctl_table spl_kmem_table[] = {
{ {
.procname = "kmem_used", .procname = "kmem_used",
.data = &kmem_alloc_used, .data = &kmem_alloc_used,
#ifdef HAVE_ATOMIC64_T
.maxlen = sizeof (atomic64_t), .maxlen = sizeof (atomic64_t),
#else
.maxlen = sizeof (atomic_t),
#endif /* HAVE_ATOMIC64_T */
.mode = 0444, .mode = 0444,
.proc_handler = &proc_domemused, .proc_handler = &proc_domemused,
}, },
{ {
.procname = "kmem_max", .procname = "kmem_max",
.data = &kmem_alloc_max, .data = &kmem_alloc_max,
.maxlen = sizeof (unsigned long), .maxlen = sizeof (uint64_t),
.extra1 = &table_min, .extra1 = &table_min,
.extra2 = &table_max, .extra2 = &table_max,
.mode = 0444, .mode = 0444,