zfsonlinux/zfs-patches/0011-Trim-new-line-from-zfs_vdev_scheduler.patch
Stoiko Ivanov a010b40938 update/rebase to zfs-0.7.10 with patches from ZOL
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
2018-09-13 08:54:51 +02:00

156 lines
4.4 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Brian Behlendorf <behlendorf1@llnl.gov>
Date: Tue, 5 Sep 2017 13:41:32 -0700
Subject: [PATCH] Trim new line from zfs_vdev_scheduler
Add a helper function to trim the tailing new line. While we're
here use this new hook to immediately apply the new scheduler.
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3356
Closes #6573
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
---
module/zfs/vdev_disk.c | 71 +++++++++++++++++++++++++++++++++++++-------------
1 file changed, 53 insertions(+), 18 deletions(-)
diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c
index 5ae50a31..d6212835 100644
--- a/module/zfs/vdev_disk.c
+++ b/module/zfs/vdev_disk.c
@@ -27,13 +27,14 @@
*/
#include <sys/zfs_context.h>
-#include <sys/spa.h>
+#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/sunldi.h>
+#include <linux/mod_compat.h>
char *zfs_vdev_scheduler = VDEV_SCHEDULER;
static void *zfs_vdev_holder = VDEV_HOLDER;
@@ -113,15 +114,23 @@ vdev_disk_error(zio_t *zio)
* physical device. This yields the largest possible requests for
* the device with the lowest total overhead.
*/
-static int
+static void
vdev_elevator_switch(vdev_t *v, char *elevator)
{
vdev_disk_t *vd = v->vdev_tsd;
- struct block_device *bdev = vd->vd_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- char *device = bdev->bd_disk->disk_name;
+ struct request_queue *q;
+ char *device;
int error;
+ for (int c = 0; c < v->vdev_children; c++)
+ vdev_elevator_switch(v->vdev_child[c], elevator);
+
+ if (!v->vdev_ops->vdev_op_leaf || vd->vd_bdev == NULL)
+ return;
+
+ q = bdev_get_queue(vd->vd_bdev);
+ device = vd->vd_bdev->bd_disk->disk_name;
+
/*
* Skip devices which are not whole disks (partitions).
* Device-mapper devices are excepted since they may be whole
@@ -131,15 +140,15 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
* "Skip devices without schedulers" check below will fail.
*/
if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0)
- return (0);
+ return;
/* Skip devices without schedulers (loop, ram, dm, etc) */
if (!q->elevator || !blk_queue_stackable(q))
- return (0);
+ return;
/* Leave existing scheduler when set to "none" */
if ((strncmp(elevator, "none", 4) == 0) && (strlen(elevator) == 4))
- return (0);
+ return;
#ifdef HAVE_ELEVATOR_CHANGE
error = elevator_change(q, elevator);
@@ -156,20 +165,16 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
" 2>/dev/null; " \
"echo %s"
- {
- char *argv[] = { "/bin/sh", "-c", NULL, NULL };
- char *envp[] = { NULL };
+ char *argv[] = { "/bin/sh", "-c", NULL, NULL };
+ char *envp[] = { NULL };
- argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
- error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
- strfree(argv[2]);
- }
+ argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
+ error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ strfree(argv[2]);
#endif /* HAVE_ELEVATOR_CHANGE */
if (error)
printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
elevator, v->vdev_path, device, error);
-
- return (error);
}
/*
@@ -798,6 +803,35 @@ vdev_disk_rele(vdev_t *vd)
/* XXX: Implement me as a vnode rele for the device */
}
+static int
+param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
+{
+ spa_t *spa = NULL;
+ char *p;
+
+ if (val == NULL)
+ return (SET_ERROR(-EINVAL));
+
+ if ((p = strchr(val, '\n')) != NULL)
+ *p = '\0';
+
+ mutex_enter(&spa_namespace_lock);
+ while ((spa = spa_next(spa)) != NULL) {
+ if (spa_state(spa) != POOL_STATE_ACTIVE ||
+ !spa_writeable(spa) || spa_suspended(spa))
+ continue;
+
+ spa_open_ref(spa, FTAG);
+ mutex_exit(&spa_namespace_lock);
+ vdev_elevator_switch(spa->spa_root_vdev, (char *)val);
+ mutex_enter(&spa_namespace_lock);
+ spa_close(spa, FTAG);
+ }
+ mutex_exit(&spa_namespace_lock);
+
+ return (param_set_charp(val, kp));
+}
+
vdev_ops_t vdev_disk_ops = {
vdev_disk_open,
vdev_disk_close,
@@ -812,5 +846,6 @@ vdev_ops_t vdev_disk_ops = {
B_TRUE /* leaf vdev */
};
-module_param(zfs_vdev_scheduler, charp, 0644);
+module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
+ param_get_charp, &zfs_vdev_scheduler, 0644);
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");