From 6af8db61b1ea489ade2d5344f4ae5f09c3d9faad Mon Sep 17 00:00:00 2001 From: Rob Norris Date: Sat, 5 Jul 2025 13:22:22 +1000 Subject: [PATCH] metaslab: don't pass whole zio to throttle reserve APIs They only need a couple of fields, and passing the whole thing just invites fiddling around inside it, like modifying flags, which then makes it much harder to understand the zio state from inside zio.c. We move the flag update to just after a successful throttle in zio.c. Rename ZIO_FLAG_IO_ALLOCATING to ZIO_FLAG_ALLOC_THROTTLED Better describes what it means, and makes it look less like IO_IS_ALLOCATING, which means something different. Sponsored-by: Klara, Inc. Sponsored-by: Wasabi Technology, Inc. Reviewed-by: Alexander Motin Reviewed-by: Brian Behlendorf Signed-off-by: Rob Norris Closes #17508 --- include/sys/metaslab.h | 7 ++++--- include/sys/zio.h | 2 +- man/man8/zpool-events.8 | 4 ++-- module/zcommon/zfs_valstr.c | 2 +- module/zfs/metaslab.c | 31 +++++++++++++++---------------- module/zfs/zio.c | 37 +++++++++++++++++++++---------------- 6 files changed, 44 insertions(+), 39 deletions(-) diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h index 4d57e52e8..36cbe06ba 100644 --- a/include/sys/metaslab.h +++ b/include/sys/metaslab.h @@ -110,9 +110,10 @@ void metaslab_class_balance(metaslab_class_t *mc, boolean_t onsync); void metaslab_class_histogram_verify(metaslab_class_t *); uint64_t metaslab_class_fragmentation(metaslab_class_t *); uint64_t metaslab_class_expandable_space(metaslab_class_t *); -boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, zio_t *, - boolean_t, boolean_t *); -boolean_t metaslab_class_throttle_unreserve(metaslab_class_t *, int, zio_t *); +boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int, int, + uint64_t, boolean_t, boolean_t *); +boolean_t metaslab_class_throttle_unreserve(metaslab_class_t *, int, int, + uint64_t); void metaslab_class_evict_old(metaslab_class_t *, uint64_t); const char *metaslab_class_get_name(metaslab_class_t *); uint64_t metaslab_class_get_alloc(metaslab_class_t *); diff --git a/include/sys/zio.h b/include/sys/zio.h index 01f3babeb..e65ac2803 100644 --- a/include/sys/zio.h +++ b/include/sys/zio.h @@ -196,7 +196,7 @@ typedef uint64_t zio_flag_t; #define ZIO_FLAG_DONT_RETRY (1ULL << 10) #define ZIO_FLAG_NODATA (1ULL << 12) #define ZIO_FLAG_INDUCE_DAMAGE (1ULL << 13) -#define ZIO_FLAG_IO_ALLOCATING (1ULL << 14) +#define ZIO_FLAG_ALLOC_THROTTLED (1ULL << 14) #define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1) #define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1) diff --git a/man/man8/zpool-events.8 b/man/man8/zpool-events.8 index 7af1917da..2d32dce2b 100644 --- a/man/man8/zpool-events.8 +++ b/man/man8/zpool-events.8 @@ -28,7 +28,7 @@ .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. .\" Copyright (c) 2024, 2025, Klara, Inc. .\" -.Dd May 27, 2025 +.Dd July 3, 2025 .Dt ZPOOL-EVENTS 8 .Os . @@ -465,7 +465,7 @@ ZIO_FLAG_DONT_RETRY:0x00000400 ZIO_FLAG_NODATA:0x00001000 ZIO_FLAG_INDUCE_DAMAGE:0x00002000 -ZIO_FLAG_IO_ALLOCATING:0x00004000 +ZIO_FLAG_ALLOC_THROTTLED:0x00004000 ZIO_FLAG_IO_RETRY:0x00008000 ZIO_FLAG_PROBE:0x00010000 ZIO_FLAG_TRYHARD:0x00020000 diff --git a/module/zcommon/zfs_valstr.c b/module/zcommon/zfs_valstr.c index 08813b81c..0cb9f584a 100644 --- a/module/zcommon/zfs_valstr.c +++ b/module/zcommon/zfs_valstr.c @@ -203,7 +203,7 @@ _VALSTR_BITFIELD_IMPL(zio_flag, { '?', "??", "[UNUSED 11]" }, { '.', "ND", "NODATA" }, { '.', "ID", "INDUCE_DAMAGE" }, - { '.', "AL", "IO_ALLOCATING" }, + { '.', "AT", "ALLOC_THROTTLED" }, { '.', "RE", "IO_RETRY" }, { '.', "PR", "PROBE" }, { '.', "TH", "TRYHARD" }, diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 43b94eba2..23eca0425 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -5757,21 +5757,21 @@ metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) } /* - * Reserve some allocation slots. The reservation system must be called - * before we call into the allocator. If there aren't any available slots - * then the I/O will be throttled until an I/O completes and its slots are - * freed up. The function returns true if it was successful in placing - * the reservation. + * Reserve some space for a future allocation. The reservation system must be + * called before we call into the allocator. If there aren't enough space + * available, the calling I/O will be throttled until another I/O completes and + * its reservation is released. The function returns true if it was successful + * in placing the reservation. */ boolean_t -metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio, - boolean_t must, boolean_t *more) +metaslab_class_throttle_reserve(metaslab_class_t *mc, int allocator, + int copies, uint64_t io_size, boolean_t must, boolean_t *more) { - metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator]; + metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; ASSERT(mc->mc_alloc_throttle_enabled); - if (mc->mc_alloc_io_size < zio->io_size) { - mc->mc_alloc_io_size = zio->io_size; + if (mc->mc_alloc_io_size < io_size) { + mc->mc_alloc_io_size = io_size; metaslab_class_balance(mc, B_FALSE); } if (must || mca->mca_reserved <= mc->mc_alloc_max) { @@ -5782,10 +5782,9 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio, * worst that can happen is few more I/Os get to allocation * earlier, that is not a problem. */ - int64_t delta = slots * zio->io_size; + int64_t delta = copies * io_size; *more = (atomic_add_64_nv(&mca->mca_reserved, delta) <= mc->mc_alloc_max); - zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; return (B_TRUE); } *more = B_FALSE; @@ -5793,13 +5792,13 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio, } boolean_t -metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, - zio_t *zio) +metaslab_class_throttle_unreserve(metaslab_class_t *mc, int allocator, + int copies, uint64_t io_size) { - metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator]; + metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; ASSERT(mc->mc_alloc_throttle_enabled); - int64_t delta = slots * zio->io_size; + int64_t delta = copies * io_size; return (atomic_add_64_nv(&mca->mca_reserved, -delta) <= mc->mc_alloc_max); } diff --git a/module/zfs/zio.c b/module/zfs/zio.c index c5f385aeb..64f3d31f5 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -1679,7 +1679,7 @@ zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, * If this is a retried I/O then we ignore it since we will * have already processed the original allocating I/O. */ - if (flags & ZIO_FLAG_IO_ALLOCATING && + if (flags & ZIO_FLAG_ALLOC_THROTTLED && (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { ASSERT(pio->io_metaslab_class != NULL); ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); @@ -1689,7 +1689,7 @@ zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || pio->io_child_type == ZIO_CHILD_GANG); - flags &= ~ZIO_FLAG_IO_ALLOCATING; + flags &= ~ZIO_FLAG_ALLOC_THROTTLED; } zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, @@ -3151,7 +3151,7 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) ASSERT(ZIO_HAS_ALLOCATOR(pio)); int flags = METASLAB_GANG_HEADER; - if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { + if (pio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) { ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); ASSERT(has_data); @@ -3186,10 +3186,11 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); zio_gang_inherit_allocator(pio, zio); - if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { + if (pio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) { boolean_t more; - VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies, - zio, B_TRUE, &more)); + VERIFY(metaslab_class_throttle_reserve(mc, zio->io_allocator, + gbh_copies, zio->io_size, B_TRUE, &more)); + zio->io_flags |= ZIO_FLAG_ALLOC_THROTTLED; } /* @@ -4072,9 +4073,11 @@ zio_io_to_allocate(metaslab_class_allocator_t *mca, boolean_t *more) * reserve then we throttle. */ if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, - zio->io_prop.zp_copies, zio, B_FALSE, more)) { + zio->io_allocator, zio->io_prop.zp_copies, zio->io_size, + B_FALSE, more)) { return (NULL); } + zio->io_flags |= ZIO_FLAG_ALLOC_THROTTLED; avl_remove(&mca->mca_tree, zio); ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); @@ -4230,13 +4233,14 @@ again: * If we are holding old class reservation, drop it. * Dispatch the next ZIO(s) there if some are waiting. */ - if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { + if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) { if (metaslab_class_throttle_unreserve(mc, - zio->io_prop.zp_copies, zio)) { + zio->io_allocator, zio->io_prop.zp_copies, + zio->io_size)) { zio_allocate_dispatch(zio->io_metaslab_class, zio->io_allocator); } - zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; + zio->io_flags &= ~ZIO_FLAG_ALLOC_THROTTLED; } if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { @@ -5196,7 +5200,7 @@ zio_ready(zio_t *zio) if (zio->io_error != 0) { zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; - if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { + if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) { ASSERT(IO_IS_ALLOCATING(zio)); ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); ASSERT(zio->io_metaslab_class != NULL); @@ -5207,8 +5211,8 @@ zio_ready(zio_t *zio) * issue the next I/O to allocate. */ if (metaslab_class_throttle_unreserve( - zio->io_metaslab_class, zio->io_prop.zp_copies, - zio)) { + zio->io_metaslab_class, zio->io_allocator, + zio->io_prop.zp_copies, zio->io_size)) { zio_allocate_dispatch(zio->io_metaslab_class, zio->io_allocator); } @@ -5267,7 +5271,7 @@ zio_dva_throttle_done(zio_t *zio) ASSERT3P(vd, ==, vd->vdev_top); ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); - ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); + ASSERT(zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED); /* * Parents of gang children can have two flavors -- ones that allocated @@ -5291,7 +5295,8 @@ zio_dva_throttle_done(zio_t *zio) metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio->io_allocator, flags, pio->io_size, tag); - if (metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, pio)) { + if (metaslab_class_throttle_unreserve(pio->io_metaslab_class, + pio->io_allocator, 1, pio->io_size)) { zio_allocate_dispatch(zio->io_metaslab_class, pio->io_allocator); } @@ -5322,7 +5327,7 @@ zio_done(zio_t *zio) * write. We must do this since the allocation is performed * by the logical I/O but the actual write is done by child I/Os. */ - if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && + if (zio->io_flags & ZIO_FLAG_ALLOC_THROTTLED && zio->io_child_type == ZIO_CHILD_VDEV) zio_dva_throttle_done(zio);