zfsonlinux/debian/patches/0017-vdev_disk-reorganise-vdev_disk_io_start.patch
Thomas Lamprecht 68be554e71 backport 2.2.4 staging for better 6.8 support
Use the current ZFS 2.2.4 staging tree [0] with commit deb7a8423 ("Fix
corruption caused by mmap flushing problems") on top.

Additionally, include an open, but ack'd, pull request [1] that avoids
a potential general protection fault due to touching a vbio after it
was handed off to the kernel.

[0]: https://github.com/openzfs/zfs/commits/zfs-2.2.4-staging/
[1]: https://github.com/openzfs/zfs/pull/16049

Both should mostly touch the module code.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2024-04-03 09:56:31 +02:00

112 lines
3.1 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Rob Norris <rob.norris@klarasystems.com>
Date: Tue, 9 Jan 2024 12:23:30 +1100
Subject: [PATCH] vdev_disk: reorganise vdev_disk_io_start
Light reshuffle to make it a bit more linear to read and get rid of a
bunch of args that aren't needed in all cases.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Closes #15533
Closes #15588
(cherry picked from commit 867178ae1db28e73051c8a7ce662f2f2f81cd8e6)
---
module/os/linux/zfs/vdev_disk.c | 51 ++++++++++++++++++++-------------
1 file changed, 31 insertions(+), 20 deletions(-)
diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c
index 957619b87..51e7cef2f 100644
--- a/module/os/linux/zfs/vdev_disk.c
+++ b/module/os/linux/zfs/vdev_disk.c
@@ -720,9 +720,16 @@ vdev_classic_bio_max_segs(zio_t *zio, int bio_size, uint64_t abd_offset)
}
static int
-vdev_classic_physio(struct block_device *bdev, zio_t *zio,
- size_t io_size, uint64_t io_offset, int rw, int flags)
+vdev_classic_physio(zio_t *zio)
{
+ vdev_t *v = zio->io_vd;
+ vdev_disk_t *vd = v->vdev_tsd;
+ struct block_device *bdev = BDH_BDEV(vd->vd_bdh);
+ size_t io_size = zio->io_size;
+ uint64_t io_offset = zio->io_offset;
+ int rw = zio->io_type == ZIO_TYPE_READ ? READ : WRITE;
+ int flags = 0;
+
dio_request_t *dr;
uint64_t abd_offset;
uint64_t bio_offset;
@@ -944,7 +951,7 @@ vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
- int rw, error;
+ int error;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
@@ -1007,13 +1014,6 @@ vdev_disk_io_start(zio_t *zio)
rw_exit(&vd->vd_lock);
zio_execute(zio);
return;
- case ZIO_TYPE_WRITE:
- rw = WRITE;
- break;
-
- case ZIO_TYPE_READ:
- rw = READ;
- break;
case ZIO_TYPE_TRIM:
zio->io_error = vdev_disk_io_trim(zio);
@@ -1026,23 +1026,34 @@ vdev_disk_io_start(zio_t *zio)
#endif
return;
- default:
+ case ZIO_TYPE_READ:
+ case ZIO_TYPE_WRITE:
+ zio->io_target_timestamp = zio_handle_io_delay(zio);
+ error = vdev_classic_physio(zio);
rw_exit(&vd->vd_lock);
- zio->io_error = SET_ERROR(ENOTSUP);
- zio_interrupt(zio);
+ if (error) {
+ zio->io_error = error;
+ zio_interrupt(zio);
+ }
return;
- }
- zio->io_target_timestamp = zio_handle_io_delay(zio);
- error = vdev_classic_physio(BDH_BDEV(vd->vd_bdh), zio,
- zio->io_size, zio->io_offset, rw, 0);
- rw_exit(&vd->vd_lock);
+ default:
+ /*
+ * Getting here means our parent vdev has made a very strange
+ * request of us, and shouldn't happen. Assert here to force a
+ * crash in dev builds, but in production return the IO
+ * unhandled. The pool will likely suspend anyway but that's
+ * nicer than crashing the kernel.
+ */
+ ASSERT3S(zio->io_type, ==, -1);
- if (error) {
- zio->io_error = error;
+ rw_exit(&vd->vd_lock);
+ zio->io_error = SET_ERROR(ENOTSUP);
zio_interrupt(zio);
return;
}
+
+ __builtin_unreachable();
}
static void