From 07de86923bcb78352a4f4ffef02408f9a7dc794e Mon Sep 17 00:00:00 2001 From: Andrew Innes Date: Thu, 27 Oct 2022 06:08:31 +0800 Subject: [PATCH] Aligned free for aligned alloc Windows port frees memory that was alloc'd aligned in a different way then alloc'd memory. So changing frees to be specific. Reviewed-by: Richard Yao Reviewed-by: Brian Behlendorf Signed-off-by: Andrew Innes Co-Authored-By: Jorgen Lundman Closes #14059 --- lib/libspl/include/umem.h | 20 +++++++++++++++++++- lib/libzutil/zutil_import.c | 18 +++++++++--------- module/os/linux/zfs/abd_os.c | 4 ++-- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/lib/libspl/include/umem.h b/lib/libspl/include/umem.h index 82976f756..77c216721 100644 --- a/lib/libspl/include/umem.h +++ b/lib/libspl/include/umem.h @@ -137,6 +137,21 @@ umem_free(const void *ptr, size_t size __maybe_unused) free((void *)ptr); } +/* + * umem_free_aligned was added for supporting portability + * with non-POSIX platforms that require a different free + * to be used with aligned allocations. + */ +static inline void +umem_free_aligned(void *ptr, size_t size __maybe_unused) +{ +#ifndef _WIN32 + free((void *)ptr); +#else + _aligned_free(ptr); +#endif +} + static inline void umem_nofail_callback(umem_nofail_callback_t *cb __maybe_unused) {} @@ -196,7 +211,10 @@ umem_cache_free(umem_cache_t *cp, void *ptr) if (cp->cache_destructor) cp->cache_destructor(ptr, cp->cache_private); - umem_free(ptr, cp->cache_bufsize); + if (cp->cache_align != 0) + umem_free_aligned(ptr, cp->cache_bufsize); + else + umem_free(ptr, cp->cache_bufsize); } static inline void diff --git a/lib/libzutil/zutil_import.c b/lib/libzutil/zutil_import.c index fee176184..5d7b4a946 100644 --- a/lib/libzutil/zutil_import.c +++ b/lib/libzutil/zutil_import.c @@ -934,7 +934,6 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels) vdev_phys_t *label; nvlist_t *expected_config = NULL; uint64_t expected_guid = 0, size; - int error; *config = NULL; @@ -942,8 +941,9 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels) return (0); size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); - error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label)); - if (error) + label = (vdev_phys_t *)umem_alloc_aligned(sizeof (*label), PAGESIZE, + UMEM_DEFAULT); + if (label == NULL) return (-1); for (l = 0; l < VDEV_LABELS; l++) { @@ -992,7 +992,7 @@ zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels) if (num_labels != NULL) *num_labels = count; - free(label); + umem_free_aligned(label, sizeof (*label)); *config = expected_config; return (0); @@ -1023,9 +1023,9 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels) return (0); size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); - error = posix_memalign((void **)&labels, PAGESIZE, - VDEV_LABELS * sizeof (*labels)); - if (error) + labels = (vdev_phys_t *)umem_alloc_aligned( + VDEV_LABELS * sizeof (*labels), PAGESIZE, UMEM_DEFAULT); + if (labels == NULL) return (-1); memset(aiocbs, 0, sizeof (aiocbs)); @@ -1078,7 +1078,7 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels) error = zpool_read_label_slow(fd, config, num_labels); saved_errno = errno; } - free(labels); + umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels)); errno = saved_errno; return (error); } @@ -1127,7 +1127,7 @@ zpool_read_label(int fd, nvlist_t **config, int *num_labels) if (num_labels != NULL) *num_labels = count; - free(labels); + umem_free_aligned(labels, VDEV_LABELS * sizeof (*labels)); *config = expected_config; return (0); diff --git a/module/os/linux/zfs/abd_os.c b/module/os/linux/zfs/abd_os.c index e9b28becf..16530d826 100644 --- a/module/os/linux/zfs/abd_os.c +++ b/module/os/linux/zfs/abd_os.c @@ -598,7 +598,7 @@ abd_free_chunks(abd_t *abd) abd_for_each_sg(abd, sg, n, i) { struct page *p = nth_page(sg_page(sg), 0); - umem_free(p, PAGESIZE); + umem_free_aligned(p, PAGESIZE); } abd_free_sg_table(abd); } @@ -704,7 +704,7 @@ abd_free_zero_scatter(void) __free_page(abd_zero_page); #endif /* HAVE_ZERO_PAGE_GPL_ONLY */ #else - umem_free(abd_zero_page, PAGESIZE); + umem_free_aligned(abd_zero_page, PAGESIZE); #endif /* _KERNEL */ }