2025-01-04 03:04:27 +03:00
|
|
|
// SPDX-License-Identifier: CDDL-1.0
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
|
* CDDL HEADER START
|
|
|
|
|
*
|
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
|
*
|
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2019-09-25 19:20:30 +03:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
|
* and limitations under the License.
|
|
|
|
|
*
|
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
|
*
|
|
|
|
|
* CDDL HEADER END
|
|
|
|
|
*/
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
|
2024-07-31 07:35:48 +03:00
|
|
|
* Copyright (c) 2024, Rob Norris <robn@despairlabs.com>
|
zvol: remove the OS-side minor before freeing the zvol
When destroying a zvol, it is not "unpublished" from the system (that
is, /dev/zd* node removed) until zvol_os_free(). Under Linux, at the
time del_gendisk() and put_disk() are called, the device node may still
be have an active hold, from a userspace program or something inside the
kernel (a partition probe). As it is currently, this can lead to calls
to zvol_open() or zvol_release() while the zvol_state_t is partially or
fully freed. zvol_open() has some protection against this by checking
that private_data is NULL, but zvol_release does not.
This implements a better ordering for all of this by adding a new
OS-side method, zvol_os_remove_minor(), which is responsible for fully
decoupling the "private" (OS-side) objects from the zvol_state_t. For
Linux, that means calling put_disk(), nulling private_data, and freeing
zv_zso.
This takes the place of zvol_os_clear_private(), which was a nod in that
direction but did not do enough, and did not do it early enough.
Equivalent changes are made on the FreeBSD side to follow the API
change.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 06:43:17 +03:00
|
|
|
* Copyright (c) 2024, 2025, Klara, Inc.
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
*/
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
#include <sys/dataset_kstats.h>
|
|
|
|
|
#include <sys/dbuf.h>
|
|
|
|
|
#include <sys/dmu_traverse.h>
|
|
|
|
|
#include <sys/dsl_dataset.h>
|
|
|
|
|
#include <sys/dsl_prop.h>
|
|
|
|
|
#include <sys/dsl_dir.h>
|
|
|
|
|
#include <sys/zap.h>
|
|
|
|
|
#include <sys/zfeature.h>
|
|
|
|
|
#include <sys/zil_impl.h>
|
|
|
|
|
#include <sys/dmu_tx.h>
|
|
|
|
|
#include <sys/zio.h>
|
|
|
|
|
#include <sys/zfs_rlock.h>
|
|
|
|
|
#include <sys/spa_impl.h>
|
|
|
|
|
#include <sys/zvol.h>
|
|
|
|
|
#include <sys/zvol_impl.h>
|
2024-04-04 04:21:25 +03:00
|
|
|
#include <cityhash.h>
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
#include <linux/blkdev_compat.h>
|
|
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2024-06-28 19:52:03 +03:00
|
|
|
#include <linux/workqueue.h>
|
2022-06-09 17:10:38 +03:00
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
|
|
|
|
|
|
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
|
|
|
|
|
struct request *rq, boolean_t force_sync);
|
|
|
|
|
|
2022-01-15 02:37:55 +03:00
|
|
|
static unsigned int zvol_major = ZVOL_MAJOR;
|
|
|
|
|
static unsigned long zvol_max_discard_blocks = 16384;
|
2022-03-31 01:39:55 +03:00
|
|
|
|
|
|
|
|
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
|
2023-06-30 21:34:05 +03:00
|
|
|
static unsigned int zvol_open_timeout_ms = 1000;
|
2022-03-31 01:39:55 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
static unsigned int zvol_blk_mq_threads = 0;
|
|
|
|
|
static unsigned int zvol_blk_mq_actual_threads;
|
|
|
|
|
static boolean_t zvol_use_blk_mq = B_FALSE;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The maximum number of volblocksize blocks to process per thread. Typically,
|
|
|
|
|
* write heavy workloads preform better with higher values here, and read
|
|
|
|
|
* heavy workloads preform better with lower values, but that's not a hard
|
|
|
|
|
* and fast rule. It's basically a knob to tune between "less overhead with
|
|
|
|
|
* less parallelism" and "more overhead, but more parallelism".
|
|
|
|
|
*
|
|
|
|
|
* '8' was chosen as a reasonable, balanced, default based off of sequential
|
|
|
|
|
* read and write tests to a zvol in an NVMe pool (with 16 CPUs).
|
|
|
|
|
*/
|
|
|
|
|
static unsigned int zvol_blk_mq_blocks_per_thread = 8;
|
|
|
|
|
|
|
|
|
|
#ifndef BLKDEV_DEFAULT_RQ
|
|
|
|
|
/* BLKDEV_MAX_RQ was renamed to BLKDEV_DEFAULT_RQ in the 5.16 kernel */
|
|
|
|
|
#define BLKDEV_DEFAULT_RQ BLKDEV_MAX_RQ
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Finalize our BIO or request.
|
|
|
|
|
*/
|
2024-08-24 14:37:30 +03:00
|
|
|
static inline void
|
|
|
|
|
zvol_end_io(struct bio *bio, struct request *rq, int error)
|
|
|
|
|
{
|
2025-08-09 03:04:01 +03:00
|
|
|
ASSERT3U(error, >=, 0);
|
2024-08-24 14:37:30 +03:00
|
|
|
if (bio) {
|
2025-08-09 03:04:01 +03:00
|
|
|
bio->bi_status = errno_to_bi_status(error);
|
2024-08-24 14:37:30 +03:00
|
|
|
bio_endio(bio);
|
|
|
|
|
} else {
|
|
|
|
|
blk_mq_end_request(rq, errno_to_bi_status(error));
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-06-09 17:10:38 +03:00
|
|
|
|
|
|
|
|
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
|
|
|
|
|
static unsigned int zvol_actual_blk_mq_queue_depth;
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
struct zvol_state_os {
|
|
|
|
|
struct gendisk *zvo_disk; /* generic disk */
|
|
|
|
|
struct request_queue *zvo_queue; /* request queue */
|
|
|
|
|
dev_t zvo_dev; /* device id */
|
2022-06-09 17:10:38 +03:00
|
|
|
|
|
|
|
|
struct blk_mq_tag_set tag_set;
|
|
|
|
|
|
|
|
|
|
/* Set from the global 'zvol_use_blk_mq' at zvol load */
|
|
|
|
|
boolean_t use_blk_mq;
|
2019-09-25 19:20:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct ida zvol_ida;
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
/*
|
|
|
|
|
* This is called when a new block multiqueue request comes in. A request
|
|
|
|
|
* contains one or more BIOs.
|
|
|
|
|
*/
|
|
|
|
|
static blk_status_t zvol_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
|
|
const struct blk_mq_queue_data *bd)
|
|
|
|
|
{
|
|
|
|
|
struct request *rq = bd->rq;
|
|
|
|
|
zvol_state_t *zv = rq->q->queuedata;
|
|
|
|
|
|
|
|
|
|
/* Tell the kernel that we are starting to process this request */
|
|
|
|
|
blk_mq_start_request(rq);
|
|
|
|
|
|
|
|
|
|
if (blk_rq_is_passthrough(rq)) {
|
|
|
|
|
/* Skip non filesystem request */
|
|
|
|
|
blk_mq_end_request(rq, BLK_STS_IOERR);
|
|
|
|
|
return (BLK_STS_IOERR);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zvol_request_impl(zv, NULL, rq, 0);
|
|
|
|
|
|
|
|
|
|
/* Acknowledge to the kernel that we got this request */
|
|
|
|
|
return (BLK_STS_OK);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct blk_mq_ops zvol_blk_mq_queue_ops = {
|
|
|
|
|
.queue_rq = zvol_mq_queue_rq,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Initialize our blk-mq struct */
|
|
|
|
|
static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv)
|
|
|
|
|
{
|
|
|
|
|
struct zvol_state_os *zso = zv->zv_zso;
|
|
|
|
|
|
|
|
|
|
memset(&zso->tag_set, 0, sizeof (zso->tag_set));
|
|
|
|
|
|
|
|
|
|
/* Initialize tag set. */
|
|
|
|
|
zso->tag_set.ops = &zvol_blk_mq_queue_ops;
|
|
|
|
|
zso->tag_set.nr_hw_queues = zvol_blk_mq_actual_threads;
|
|
|
|
|
zso->tag_set.queue_depth = zvol_actual_blk_mq_queue_depth;
|
|
|
|
|
zso->tag_set.numa_node = NUMA_NO_NODE;
|
|
|
|
|
zso->tag_set.cmd_size = 0;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We need BLK_MQ_F_BLOCKING here since we do blocking calls in
|
|
|
|
|
* zvol_request_impl()
|
|
|
|
|
*/
|
2025-02-05 09:52:45 +03:00
|
|
|
zso->tag_set.flags = BLK_MQ_F_BLOCKING;
|
|
|
|
|
|
|
|
|
|
#ifdef BLK_MQ_F_SHOULD_MERGE
|
|
|
|
|
/*
|
|
|
|
|
* Linux 6.14 removed BLK_MQ_F_SHOULD_MERGE and made it implicit.
|
|
|
|
|
* For older kernels, we set it.
|
|
|
|
|
*/
|
|
|
|
|
zso->tag_set.flags |= BLK_MQ_F_SHOULD_MERGE;
|
|
|
|
|
#endif
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
zso->tag_set.driver_data = zv;
|
|
|
|
|
|
|
|
|
|
return (blk_mq_alloc_tag_set(&zso->tag_set));
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
|
* Given a path, return TRUE if path is a ZVOL.
|
|
|
|
|
*/
|
2022-02-07 21:24:38 +03:00
|
|
|
boolean_t
|
|
|
|
|
zvol_os_is_zvol(const char *path)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2020-12-22 21:26:45 +03:00
|
|
|
dev_t dev = 0;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-22 21:26:45 +03:00
|
|
|
if (vdev_lookup_bdev(path, &dev) != 0)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (B_FALSE);
|
|
|
|
|
|
2020-12-22 21:26:45 +03:00
|
|
|
if (MAJOR(dev) == zvol_major)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
|
|
return (B_FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
struct bio *bio = zvr->bio;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request *rq = zvr->rq;
|
2020-12-18 19:48:26 +03:00
|
|
|
int error = 0;
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_t uio;
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_state_t *zv = zvr->zv;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request_queue *q;
|
|
|
|
|
struct gendisk *disk;
|
|
|
|
|
unsigned long start_time = 0;
|
|
|
|
|
boolean_t acct = B_FALSE;
|
|
|
|
|
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
ASSERT3P(zv->zv_zilog, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
q = zv->zv_zso->zvo_queue;
|
|
|
|
|
disk = zv->zv_zso->zvo_disk;
|
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/* bio marked as FLUSH need to flush before write */
|
2025-02-24 07:14:23 +03:00
|
|
|
if (io_is_flush(bio, rq)) {
|
|
|
|
|
error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
|
|
|
|
if (error != 0) {
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
zvol_end_io(bio, rq, -error);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
|
|
|
|
|
/* Some requests are just for flush and nothing else. */
|
2022-06-09 17:10:38 +03:00
|
|
|
if (io_size(bio, rq) == 0) {
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2024-08-24 14:37:30 +03:00
|
|
|
zvol_end_io(bio, rq, 0);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
zfs_uio_bvec_init(&uio, bio, rq);
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
ssize_t start_resid = uio.uio_resid;
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
/*
|
|
|
|
|
* With use_blk_mq, accounting is done by blk_mq_start_request()
|
|
|
|
|
* and blk_mq_end_request(), so we can skip it here.
|
|
|
|
|
*/
|
|
|
|
|
if (bio) {
|
|
|
|
|
acct = blk_queue_io_stat(q);
|
|
|
|
|
if (acct) {
|
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, WRITE,
|
|
|
|
|
bio);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
boolean_t sync =
|
2022-06-09 17:10:38 +03:00
|
|
|
io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
|
uio.uio_loffset, uio.uio_resid, RL_WRITER);
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volsize = zv->zv_volsize;
|
|
|
|
|
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
|
|
|
|
|
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
|
|
|
|
|
uint64_t off = uio.uio_loffset;
|
|
|
|
|
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
|
|
|
|
|
|
|
|
|
if (bytes > volsize - off) /* don't write past the end */
|
|
|
|
|
bytes = volsize - off;
|
|
|
|
|
|
2020-04-11 07:14:01 +03:00
|
|
|
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
/* This will only fail for ENOSPC */
|
2025-03-19 02:04:22 +03:00
|
|
|
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (error) {
|
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
|
break;
|
|
|
|
|
}
|
Wire O_DIRECT also to Uncached I/O (#17218)
Before Direct I/O was implemented, I've implemented lighter version
I called Uncached I/O. It uses normal DMU/ARC data path with some
optimizations, but evicts data from caches as soon as possible and
reasonable. Originally I wired it only to a primarycache property,
but now completing the integration all the way up to the VFS.
While Direct I/O has the lowest possible memory bandwidth usage,
it also has a significant number of limitations. It require I/Os
to be page aligned, does not allow speculative prefetch, etc. The
Uncached I/O does not have those limitations, but instead require
additional memory copy, though still one less than regular cached
I/O. As such it should fill the gap in between. Considering this
I've disabled annoying EINVAL errors on misaligned requests, adding
a tunable for those who wants to test their applications.
To pass the information between the layers I had to change a number
of APIs. But as side effect upper layers can now control not only
the caching, but also speculative prefetch. I haven't wired it to
VFS yet, since it require looking on some OS specifics. But while
there I've implemented speculative prefetch of indirect blocks for
Direct I/O, controllable via all the same mechanisms.
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Fixes #17027
Reviewed-by: Rob Norris <robn@despairlabs.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
2025-05-14 00:26:55 +03:00
|
|
|
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx,
|
|
|
|
|
DMU_READ_PREFETCH);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (error == 0) {
|
|
|
|
|
zvol_log_write(zv, tx, off, bytes, sync);
|
|
|
|
|
}
|
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
|
break;
|
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
int64_t nwritten = start_resid - uio.uio_resid;
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
|
2019-09-25 19:20:30 +03:00
|
|
|
task_io_account_write(nwritten);
|
|
|
|
|
|
2025-02-24 07:14:23 +03:00
|
|
|
if (error == 0 && sync)
|
|
|
|
|
error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
if (bio && acct) {
|
2020-12-22 23:17:13 +03:00
|
|
|
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
|
2022-06-09 17:10:38 +03:00
|
|
|
}
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write_task(void *arg)
|
|
|
|
|
{
|
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
|
zvol_write(&task->zvr);
|
|
|
|
|
zv_request_task_free(task);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
zvol_discard(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
struct bio *bio = zvr->bio;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request *rq = zvr->rq;
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_state_t *zv = zvr->zv;
|
2022-06-09 17:10:38 +03:00
|
|
|
uint64_t start = io_offset(bio, rq);
|
|
|
|
|
uint64_t size = io_size(bio, rq);
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t end = start + size;
|
|
|
|
|
boolean_t sync;
|
|
|
|
|
int error = 0;
|
|
|
|
|
dmu_tx_t *tx;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request_queue *q = zv->zv_zso->zvo_queue;
|
|
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
|
|
|
|
unsigned long start_time = 0;
|
2023-03-05 00:48:29 +03:00
|
|
|
boolean_t acct = B_FALSE;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
ASSERT3P(zv->zv_zilog, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
if (bio) {
|
|
|
|
|
acct = blk_queue_io_stat(q);
|
|
|
|
|
if (acct) {
|
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, WRITE,
|
|
|
|
|
bio);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
sync = io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (end > zv->zv_volsize) {
|
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Align the request to volume block boundaries when a secure erase is
|
|
|
|
|
* not required. This will prevent dnode_free_range() from zeroing out
|
|
|
|
|
* the unaligned parts which is slow (read-modify-write) and useless
|
|
|
|
|
* since we are not freeing any space by doing so.
|
|
|
|
|
*/
|
2022-06-09 17:10:38 +03:00
|
|
|
if (!io_is_secure_erase(bio, rq)) {
|
2019-09-25 19:20:30 +03:00
|
|
|
start = P2ROUNDUP(start, zv->zv_volblocksize);
|
2024-05-10 18:47:21 +03:00
|
|
|
end = P2ALIGN_TYPED(end, zv->zv_volblocksize, uint64_t);
|
2019-09-25 19:20:30 +03:00
|
|
|
size = end - start;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
|
start, size, RL_WRITER);
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
tx = dmu_tx_create(zv->zv_objset);
|
|
|
|
|
dmu_tx_mark_netfree(tx);
|
2025-03-19 02:04:22 +03:00
|
|
|
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (error != 0) {
|
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
|
} else {
|
2023-10-31 00:51:56 +03:00
|
|
|
zvol_log_truncate(zv, tx, start, size);
|
2019-09-25 19:20:30 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
error = dmu_free_long_range(zv->zv_objset,
|
|
|
|
|
ZVOL_OBJ, start, size);
|
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (error == 0 && sync)
|
2025-02-24 07:14:23 +03:00
|
|
|
error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
unlock:
|
2019-09-25 19:20:30 +03:00
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
if (bio && acct) {
|
|
|
|
|
blk_generic_end_io_acct(q, disk, WRITE, bio,
|
|
|
|
|
start_time);
|
|
|
|
|
}
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard_task(void *arg)
|
|
|
|
|
{
|
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
|
zvol_discard(&task->zvr);
|
|
|
|
|
zv_request_task_free(task);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
zvol_read(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
struct bio *bio = zvr->bio;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request *rq = zvr->rq;
|
2020-12-18 19:48:26 +03:00
|
|
|
int error = 0;
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_t uio;
|
2022-06-09 17:10:38 +03:00
|
|
|
boolean_t acct = B_FALSE;
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_state_t *zv = zvr->zv;
|
2022-06-09 17:10:38 +03:00
|
|
|
struct request_queue *q;
|
|
|
|
|
struct gendisk *disk;
|
|
|
|
|
unsigned long start_time = 0;
|
|
|
|
|
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
zfs_uio_bvec_init(&uio, bio, rq);
|
|
|
|
|
|
|
|
|
|
q = zv->zv_zso->zvo_queue;
|
|
|
|
|
disk = zv->zv_zso->zvo_disk;
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
ssize_t start_resid = uio.uio_resid;
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
/*
|
|
|
|
|
* When blk-mq is being used, accounting is done by
|
|
|
|
|
* blk_mq_start_request() and blk_mq_end_request().
|
|
|
|
|
*/
|
|
|
|
|
if (bio) {
|
|
|
|
|
acct = blk_queue_io_stat(q);
|
|
|
|
|
if (acct)
|
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, READ,
|
|
|
|
|
bio);
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
|
uio.uio_loffset, uio.uio_resid, RL_READER);
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volsize = zv->zv_volsize;
|
2022-06-09 17:10:38 +03:00
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
|
|
|
|
|
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
|
|
|
|
|
|
|
|
|
|
/* don't read past the end */
|
|
|
|
|
if (bytes > volsize - uio.uio_loffset)
|
|
|
|
|
bytes = volsize - uio.uio_loffset;
|
|
|
|
|
|
Wire O_DIRECT also to Uncached I/O (#17218)
Before Direct I/O was implemented, I've implemented lighter version
I called Uncached I/O. It uses normal DMU/ARC data path with some
optimizations, but evicts data from caches as soon as possible and
reasonable. Originally I wired it only to a primarycache property,
but now completing the integration all the way up to the VFS.
While Direct I/O has the lowest possible memory bandwidth usage,
it also has a significant number of limitations. It require I/Os
to be page aligned, does not allow speculative prefetch, etc. The
Uncached I/O does not have those limitations, but instead require
additional memory copy, though still one less than regular cached
I/O. As such it should fill the gap in between. Considering this
I've disabled annoying EINVAL errors on misaligned requests, adding
a tunable for those who wants to test their applications.
To pass the information between the layers I had to change a number
of APIs. But as side effect upper layers can now control not only
the caching, but also speculative prefetch. I haven't wired it to
VFS yet, since it require looking on some OS specifics. But while
there I've implemented speculative prefetch of indirect blocks for
Direct I/O, controllable via all the same mechanisms.
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Fixes #17027
Reviewed-by: Rob Norris <robn@despairlabs.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
2025-05-14 00:26:55 +03:00
|
|
|
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes,
|
|
|
|
|
DMU_READ_PREFETCH);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (error) {
|
|
|
|
|
/* convert checksum errors into IO errors */
|
|
|
|
|
if (error == ECKSUM)
|
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
int64_t nread = start_resid - uio.uio_resid;
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
|
2019-09-25 19:20:30 +03:00
|
|
|
task_io_account_read(nread);
|
|
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
if (bio && acct) {
|
2020-12-22 23:17:13 +03:00
|
|
|
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
|
2022-06-09 17:10:38 +03:00
|
|
|
}
|
2020-12-22 23:17:13 +03:00
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, error);
|
2021-03-03 19:15:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
zvol_read_task(void *arg)
|
|
|
|
|
{
|
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
|
zvol_read(&task->zvr);
|
|
|
|
|
zv_request_task_free(task);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Process a BIO or request
|
|
|
|
|
*
|
|
|
|
|
* Either 'bio' or 'rq' should be set depending on if we are processing a
|
|
|
|
|
* bio or a request (both should not be set).
|
|
|
|
|
*
|
|
|
|
|
* force_sync: Set to 0 to defer processing to a background taskq
|
|
|
|
|
* Set to 1 to process data synchronously
|
|
|
|
|
*/
|
2021-12-03 06:54:05 +03:00
|
|
|
static void
|
2022-06-09 17:10:38 +03:00
|
|
|
zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
|
|
|
|
|
boolean_t force_sync)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
fstrans_cookie_t cookie = spl_fstrans_mark();
|
2022-06-09 17:10:38 +03:00
|
|
|
uint64_t offset = io_offset(bio, rq);
|
|
|
|
|
uint64_t size = io_size(bio, rq);
|
|
|
|
|
int rw = io_data_dir(bio, rq);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
zvol: ensure device minors are properly cleaned up
Currently, if a minor is in use when we try to remove it, we'll skip it
and never come back to it again. Since the zvol state is hung off the
minor in the kernel, this can get us into weird situations if something
tries to use it after the removal fails. It's even worse at pool export,
as there's now a vestigial zvol state with no pool under it. It's
weirder again if the pool is subsequently reimported, as the zvol code
(reasonably) assumes the zvol state has been properly setup, when it's
actually left over from the previous import of the pool.
This commit attempts to tackle that by setting a flag on the zvol if its
minor can't be removed, and then checking that flag when a request is
made and rejecting it, thus stopping new work coming in.
The flag also causes a condvar to be signaled when the last client
finishes. For the case where a single minor is being removed (eg
changing volmode), it will wait for this signal before proceeding.
Meanwhile, when removing all minors, a background task is created for
each minor that couldn't be removed on the spot, and those tasks then
wake and clean up.
Since any new tasks are queued on to the pool's spa_zvol_taskq,
spa_export_common() will continue to wait at export until all minors are
removed.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #14872
Closes #16364
2024-07-18 06:24:05 +03:00
|
|
|
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, SET_ERROR(ENXIO));
|
zvol: ensure device minors are properly cleaned up
Currently, if a minor is in use when we try to remove it, we'll skip it
and never come back to it again. Since the zvol state is hung off the
minor in the kernel, this can get us into weird situations if something
tries to use it after the removal fails. It's even worse at pool export,
as there's now a vestigial zvol state with no pool under it. It's
weirder again if the pool is subsequently reimported, as the zvol code
(reasonably) assumes the zvol state has been properly setup, when it's
actually left over from the previous import of the pool.
This commit attempts to tackle that by setting a flag on the zvol if its
minor can't be removed, and then checking that flag when a request is
made and rejecting it, thus stopping new work coming in.
The flag also causes a condvar to be signaled when the last client
finishes. For the case where a single minor is being removed (eg
changing volmode), it will wait for this signal before proceeding.
Meanwhile, when removing all minors, a background task is created for
each minor that couldn't be removed on the spot, and those tasks then
wake and clean up.
Since any new tasks are queued on to the pool's spa_zvol_taskq,
spa_export_common() will continue to wait at export until all minors are
removed.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #14872
Closes #16364
2024-07-18 06:24:05 +03:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-25 00:53:27 +03:00
|
|
|
if (zvol_request_sync || zv->zv_threading == B_FALSE)
|
2022-06-09 17:10:38 +03:00
|
|
|
force_sync = 1;
|
|
|
|
|
|
|
|
|
|
zv_request_t zvr = {
|
|
|
|
|
.zv = zv,
|
|
|
|
|
.bio = bio,
|
|
|
|
|
.rq = rq,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (io_has_data(bio, rq) && offset + size > zv->zv_volsize) {
|
|
|
|
|
printk(KERN_INFO "%s: bad access: offset=%llu, size=%lu\n",
|
2019-09-25 19:20:30 +03:00
|
|
|
zv->zv_zso->zvo_disk->disk_name,
|
|
|
|
|
(long long unsigned)offset,
|
|
|
|
|
(long unsigned)size);
|
|
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, SET_ERROR(EIO));
|
2019-09-25 19:20:30 +03:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-03 19:15:28 +03:00
|
|
|
zv_request_task_t *task;
|
2024-04-04 04:21:25 +03:00
|
|
|
zv_taskq_t *ztqs = &zvol_taskqs;
|
|
|
|
|
uint_t blk_mq_hw_queue = 0;
|
|
|
|
|
uint_t tq_idx;
|
|
|
|
|
uint_t taskq_hash;
|
|
|
|
|
if (rq)
|
2024-04-08 21:38:49 +03:00
|
|
|
#ifdef HAVE_BLK_MQ_RQ_HCTX
|
2024-04-04 04:21:25 +03:00
|
|
|
blk_mq_hw_queue = rq->mq_hctx->queue_num;
|
2024-04-08 21:38:49 +03:00
|
|
|
#else
|
linux/zvol_os: fix crash with blk-mq on Linux 4.19
03987f71e3 (#16069) added a workaround to get the blk-mq hardware
context for older kernels that don't cache it in the struct request.
However, this workaround appears to be incomplete.
In 4.19, the rq data context is optional. If its not initialised, then
the cached rq->cpu will be -1, and so using it to index into mq_map
causes a crash.
Given that the upstream 4.19 is now in extended LTS and rarely seen,
RHEL8 4.18+ has long carried "modern" blk-mq support, and the cached
hardware context has been available since 5.1, I'm not going to huge
lengths to get queue selection correct for the very few people that are
likely to feel it. To that end, we simply call raw_smp_processor_id() to
get a valid CPU id and use that instead.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com>
Reviewed-by: Paul Dagnelie <paul.dagnelie@klarasystems.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Closes #17597
2025-08-08 19:39:14 +03:00
|
|
|
blk_mq_hw_queue = rq->q->queue_hw_ctx[
|
|
|
|
|
rq->q->mq_map[raw_smp_processor_id()]]->queue_num;
|
2024-04-04 04:21:25 +03:00
|
|
|
#endif
|
2024-09-07 17:07:14 +03:00
|
|
|
taskq_hash = cityhash3((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
|
|
|
|
|
blk_mq_hw_queue);
|
2024-04-04 04:21:25 +03:00
|
|
|
tq_idx = taskq_hash % ztqs->tqs_cnt;
|
2021-03-03 19:15:28 +03:00
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
if (rw == WRITE) {
|
|
|
|
|
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
|
2025-08-09 03:04:01 +03:00
|
|
|
zvol_end_io(bio, rq, SET_ERROR(EROFS));
|
2019-09-25 19:20:30 +03:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
* Prevents the zvol from being suspended, or the ZIL being
|
|
|
|
|
* concurrently opened. Will be released after the i/o
|
|
|
|
|
* completes.
|
2019-09-25 19:20:30 +03:00
|
|
|
*/
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Open a ZIL if this is the first time we have written to this
|
|
|
|
|
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
|
|
|
|
|
* than zv_state_lock so that we don't need to acquire an
|
|
|
|
|
* additional lock in this path.
|
|
|
|
|
*/
|
|
|
|
|
if (zv->zv_zilog == NULL) {
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
|
|
|
|
|
if (zv->zv_zilog == NULL) {
|
|
|
|
|
zv->zv_zilog = zil_open(zv->zv_objset,
|
2022-07-21 03:14:06 +03:00
|
|
|
zvol_get_data, &zv->zv_kstat.dk_zil_sums);
|
2019-09-25 19:20:30 +03:00
|
|
|
zv->zv_flags |= ZVOL_WRITTEN_TO;
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
/* replay / destroy done in zvol_create_minor */
|
|
|
|
|
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
|
|
|
|
|
ZIL_REPLAY_NEEDED));
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
rw_downgrade(&zv->zv_suspend_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
* We don't want this thread to be blocked waiting for i/o to
|
|
|
|
|
* complete, so we instead wait from a taskq callback. The
|
|
|
|
|
* i/o may be a ZIL write (via zil_commit()), or a read of an
|
|
|
|
|
* indirect block, or a read of a data block (if this is a
|
|
|
|
|
* partial-block write). We will indicate that the i/o is
|
2022-06-09 17:10:38 +03:00
|
|
|
* complete by calling END_IO() from the taskq callback.
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
*
|
|
|
|
|
* This design allows the calling thread to continue and
|
|
|
|
|
* initiate more concurrent operations by calling
|
|
|
|
|
* zvol_request() again. There are typically only a small
|
|
|
|
|
* number of threads available to call zvol_request() (e.g.
|
|
|
|
|
* one per iSCSI target), so keeping the latency of
|
|
|
|
|
* zvol_request() low is important for performance.
|
|
|
|
|
*
|
|
|
|
|
* The zvol_request_sync module parameter allows this
|
|
|
|
|
* behavior to be altered, for performance evaluation
|
|
|
|
|
* purposes. If the callback blocks, setting
|
|
|
|
|
* zvol_request_sync=1 will result in much worse performance.
|
|
|
|
|
*
|
|
|
|
|
* We can have up to zvol_threads concurrent i/o's being
|
|
|
|
|
* processed for all zvols on the system. This is typically
|
|
|
|
|
* a vast improvement over the zvol_request_sync=1 behavior
|
|
|
|
|
* of one i/o at a time per zvol. However, an even better
|
|
|
|
|
* design would be for zvol_request() to initiate the zio
|
|
|
|
|
* directly, and then be notified by the zio_done callback,
|
2022-06-09 17:10:38 +03:00
|
|
|
* which would call END_IO(). Unfortunately, the DMU/ZIL
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
* interfaces lack this functionality (they block waiting for
|
|
|
|
|
* the i/o to complete).
|
2019-09-25 19:20:30 +03:00
|
|
|
*/
|
2022-06-09 17:10:38 +03:00
|
|
|
if (io_is_discard(bio, rq) || io_is_secure_erase(bio, rq)) {
|
|
|
|
|
if (force_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
2024-04-04 04:21:25 +03:00
|
|
|
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
} else {
|
2022-06-09 17:10:38 +03:00
|
|
|
if (force_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
2024-04-04 04:21:25 +03:00
|
|
|
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* The SCST driver, and possibly others, may issue READ I/Os
|
|
|
|
|
* with a length of zero bytes. These empty I/Os contain no
|
|
|
|
|
* data and require no additional handling.
|
|
|
|
|
*/
|
|
|
|
|
if (size == 0) {
|
2024-08-24 14:37:30 +03:00
|
|
|
zvol_end_io(bio, rq, 0);
|
2019-09-25 19:20:30 +03:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/* See comment in WRITE case above. */
|
2022-06-09 17:10:38 +03:00
|
|
|
if (force_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_read(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
2024-04-04 04:21:25 +03:00
|
|
|
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_read_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
spl_fstrans_unmark(cookie);
|
2022-06-09 17:10:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
|
|
|
|
#ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
|
|
|
|
|
static void
|
|
|
|
|
zvol_submit_bio(struct bio *bio)
|
|
|
|
|
#else
|
|
|
|
|
static blk_qc_t
|
|
|
|
|
zvol_submit_bio(struct bio *bio)
|
|
|
|
|
#endif
|
|
|
|
|
#else
|
|
|
|
|
static MAKE_REQUEST_FN_RET
|
|
|
|
|
zvol_request(struct request_queue *q, struct bio *bio)
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
|
|
|
|
#if defined(HAVE_BIO_BDEV_DISK)
|
|
|
|
|
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
|
|
|
|
#else
|
|
|
|
|
struct request_queue *q = bio->bi_disk->queue;
|
|
|
|
|
#endif
|
|
|
|
|
#endif
|
|
|
|
|
zvol_state_t *zv = q->queuedata;
|
|
|
|
|
|
|
|
|
|
zvol_request_impl(zv, bio, NULL, 0);
|
|
|
|
|
#if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
|
|
|
|
|
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
|
2021-12-03 06:54:05 +03:00
|
|
|
!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (BLK_QC_T_NONE);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2023-08-01 18:37:20 +03:00
|
|
|
#ifdef HAVE_BLK_MODE_T
|
|
|
|
|
zvol_open(struct gendisk *disk, blk_mode_t flag)
|
|
|
|
|
#else
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_open(struct block_device *bdev, fmode_t flag)
|
2023-08-01 18:37:20 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
zvol_state_t *zv;
|
|
|
|
|
int error = 0;
|
2021-12-17 20:52:13 +03:00
|
|
|
boolean_t drop_suspend = B_FALSE;
|
2021-12-02 03:07:12 +03:00
|
|
|
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
|
|
|
|
|
hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
|
|
|
|
|
hrtime_t start = gethrtime();
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2021-12-02 03:07:12 +03:00
|
|
|
retry:
|
|
|
|
|
#endif
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
|
2023-08-01 18:37:20 +03:00
|
|
|
#ifdef HAVE_BLK_MODE_T
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zv = atomic_load_ptr(&disk->private_data);
|
2023-08-01 18:37:20 +03:00
|
|
|
#else
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zv = atomic_load_ptr(&bdev->bd_disk->private_data);
|
2023-08-01 18:37:20 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
if (zv == NULL) {
|
2024-07-18 06:13:44 +03:00
|
|
|
return (-SET_ERROR(ENXIO));
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
mutex_enter(&zv->zv_state_lock);
|
zvol: ensure device minors are properly cleaned up
Currently, if a minor is in use when we try to remove it, we'll skip it
and never come back to it again. Since the zvol state is hung off the
minor in the kernel, this can get us into weird situations if something
tries to use it after the removal fails. It's even worse at pool export,
as there's now a vestigial zvol state with no pool under it. It's
weirder again if the pool is subsequently reimported, as the zvol code
(reasonably) assumes the zvol state has been properly setup, when it's
actually left over from the previous import of the pool.
This commit attempts to tackle that by setting a flag on the zvol if its
minor can't be removed, and then checking that flag when a request is
made and rejecting it, thus stopping new work coming in.
The flag also causes a condvar to be signaled when the last client
finishes. For the case where a single minor is being removed (eg
changing volmode), it will wait for this signal before proceeding.
Meanwhile, when removing all minors, a background task is created for
each minor that couldn't be removed on the spot, and those tasks then
wake and clean up.
Since any new tasks are queued on to the pool's spa_zvol_taskq,
spa_export_common() will continue to wait at export until all minors are
removed.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #14872
Closes #16364
2024-07-18 06:24:05 +03:00
|
|
|
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
return (-SET_ERROR(ENXIO));
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
/*
|
|
|
|
|
* Make sure zvol is not suspended during first open
|
|
|
|
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
|
|
|
|
* ordering - zv_suspend_lock before zv_state_lock
|
|
|
|
|
*/
|
|
|
|
|
if (zv->zv_open_count == 0) {
|
|
|
|
|
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Removal may happen while the locks are down, so
|
|
|
|
|
* we can't trust zv any longer; we have to start over.
|
|
|
|
|
*/
|
|
|
|
|
#ifdef HAVE_BLK_MODE_T
|
|
|
|
|
zv = atomic_load_ptr(&disk->private_data);
|
|
|
|
|
#else
|
|
|
|
|
zv = atomic_load_ptr(&bdev->bd_disk->private_data);
|
|
|
|
|
#endif
|
|
|
|
|
if (zv == NULL)
|
|
|
|
|
return (-SET_ERROR(ENXIO));
|
|
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
|
|
|
|
|
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
return (-SET_ERROR(ENXIO));
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
/* check to see if zv_suspend_lock is needed */
|
|
|
|
|
if (zv->zv_open_count != 0) {
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
} else {
|
|
|
|
|
drop_suspend = B_TRUE;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
drop_suspend = B_TRUE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
|
|
if (zv->zv_open_count == 0) {
|
|
|
|
|
boolean_t drop_namespace = B_FALSE;
|
|
|
|
|
|
|
|
|
|
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
|
|
|
|
|
|
2021-12-02 03:07:12 +03:00
|
|
|
/*
|
|
|
|
|
* In all other call paths the spa_namespace_lock is taken
|
|
|
|
|
* before the bdev->bd_mutex lock. However, on open(2)
|
|
|
|
|
* the __blkdev_get() function calls fops->open() with the
|
|
|
|
|
* bdev->bd_mutex lock held. This can result in a deadlock
|
|
|
|
|
* when zvols from one pool are used as vdevs in another.
|
|
|
|
|
*
|
|
|
|
|
* To prevent a lock inversion deadlock we preemptively
|
|
|
|
|
* take the spa_namespace_lock. Normally the lock will not
|
|
|
|
|
* be contended and this is safe because spa_open_common()
|
|
|
|
|
* handles the case where the caller already holds the
|
|
|
|
|
* spa_namespace_lock.
|
|
|
|
|
*
|
|
|
|
|
* When the lock cannot be aquired after multiple retries
|
|
|
|
|
* this must be the vdev on zvol deadlock case and we have
|
|
|
|
|
* no choice but to return an error. For 5.12 and older
|
|
|
|
|
* kernels returning -ERESTARTSYS will result in the
|
|
|
|
|
* bdev->bd_mutex being dropped, then reacquired, and
|
|
|
|
|
* fops->open() being called again. This process can be
|
|
|
|
|
* repeated safely until both locks are acquired. For 5.13
|
|
|
|
|
* and newer the -ERESTARTSYS retry logic was removed from
|
|
|
|
|
* the kernel so the only option is to return the error for
|
|
|
|
|
* the caller to handle it.
|
|
|
|
|
*/
|
2021-12-17 20:52:13 +03:00
|
|
|
if (!mutex_owned(&spa_namespace_lock)) {
|
|
|
|
|
if (!mutex_tryenter(&spa_namespace_lock)) {
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2024-07-11 00:27:44 +03:00
|
|
|
drop_suspend = B_FALSE;
|
2021-12-02 03:07:12 +03:00
|
|
|
|
|
|
|
|
#ifdef HAVE_BLKDEV_GET_ERESTARTSYS
|
2021-12-17 20:52:13 +03:00
|
|
|
schedule();
|
2024-07-18 06:13:44 +03:00
|
|
|
return (-SET_ERROR(ERESTARTSYS));
|
2021-12-17 20:52:13 +03:00
|
|
|
#else
|
|
|
|
|
if ((gethrtime() - start) > timeout)
|
2024-07-18 06:13:44 +03:00
|
|
|
return (-SET_ERROR(ERESTARTSYS));
|
2021-12-02 03:07:12 +03:00
|
|
|
|
2024-05-09 17:30:28 +03:00
|
|
|
schedule_timeout_interruptible(
|
|
|
|
|
MSEC_TO_TICK(10));
|
2021-12-17 20:52:13 +03:00
|
|
|
goto retry;
|
2021-12-02 03:07:12 +03:00
|
|
|
#endif
|
2021-12-17 20:52:13 +03:00
|
|
|
} else {
|
|
|
|
|
drop_namespace = B_TRUE;
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-01 18:37:20 +03:00
|
|
|
error = -zvol_first_open(zv, !(blk_mode_is_open_write(flag)));
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
if (drop_namespace)
|
|
|
|
|
mutex_exit(&spa_namespace_lock);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
if (error == 0) {
|
2023-08-01 18:37:20 +03:00
|
|
|
if ((blk_mode_is_open_write(flag)) &&
|
|
|
|
|
(zv->zv_flags & ZVOL_RDONLY)) {
|
2021-12-17 20:52:13 +03:00
|
|
|
if (zv->zv_open_count == 0)
|
|
|
|
|
zvol_last_close(zv);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2024-07-18 06:13:44 +03:00
|
|
|
error = -SET_ERROR(EROFS);
|
2021-12-17 20:52:13 +03:00
|
|
|
} else {
|
|
|
|
|
zv->zv_open_count++;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
if (drop_suspend)
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2021-12-02 03:07:12 +03:00
|
|
|
|
2021-12-17 20:52:13 +03:00
|
|
|
if (error == 0)
|
2023-08-01 18:37:20 +03:00
|
|
|
#ifdef HAVE_BLK_MODE_T
|
|
|
|
|
disk_check_media_change(disk);
|
|
|
|
|
#else
|
2021-12-17 20:52:13 +03:00
|
|
|
zfs_check_media_change(bdev);
|
2023-08-01 18:37:20 +03:00
|
|
|
#endif
|
2021-12-17 20:52:13 +03:00
|
|
|
|
|
|
|
|
return (error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2023-08-01 18:37:20 +03:00
|
|
|
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
|
|
|
|
|
zvol_release(struct gendisk *disk)
|
|
|
|
|
#else
|
|
|
|
|
zvol_release(struct gendisk *disk, fmode_t unused)
|
|
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2023-08-01 18:37:20 +03:00
|
|
|
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
|
|
|
|
|
(void) unused;
|
|
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
boolean_t drop_suspend = B_TRUE;
|
|
|
|
|
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
|
|
|
|
|
if (zv == NULL)
|
|
|
|
|
return;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
|
* make sure zvol is not suspended during last close
|
|
|
|
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
|
|
|
|
* ordering - zv_suspend_lock before zv_state_lock
|
|
|
|
|
*/
|
|
|
|
|
if (zv->zv_open_count == 1) {
|
|
|
|
|
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Unlike in zvol_open(), we don't check if removal
|
|
|
|
|
* started here, because we might be one of the openers
|
|
|
|
|
* that needs to be thrown out! If we're the last, we
|
|
|
|
|
* need to call zvol_last_close() below to finish
|
|
|
|
|
* cleanup. So, no special treatment for us.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/* check to see if zv_suspend_lock is needed */
|
|
|
|
|
if (zv->zv_open_count != 1) {
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
|
|
zv->zv_open_count--;
|
2020-10-21 20:23:08 +03:00
|
|
|
if (zv->zv_open_count == 0) {
|
|
|
|
|
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_last_close(zv);
|
2020-10-21 20:23:08 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
|
|
|
|
|
if (drop_suspend)
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
zvol_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
|
{
|
|
|
|
|
int error = 0;
|
|
|
|
|
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
|
|
|
|
|
ASSERT3P(zv, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
case BLKFLSBUF:
|
2023-09-15 08:07:03 +03:00
|
|
|
#ifdef HAVE_FSYNC_BDEV
|
2019-09-25 19:20:30 +03:00
|
|
|
fsync_bdev(bdev);
|
2023-09-15 08:07:03 +03:00
|
|
|
#elif defined(HAVE_SYNC_BLOCKDEV)
|
|
|
|
|
sync_blockdev(bdev);
|
|
|
|
|
#else
|
|
|
|
|
#error "Neither fsync_bdev() nor sync_blockdev() found"
|
|
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
invalidate_bdev(bdev);
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
|
|
|
|
|
if (!(zv->zv_flags & ZVOL_RDONLY))
|
|
|
|
|
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
|
|
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BLKZNAME:
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
2025-08-09 03:04:01 +03:00
|
|
|
error = -copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
|
2019-09-25 19:20:30 +03:00
|
|
|
mutex_exit(&zv->zv_state_lock);
|
2025-08-09 03:04:01 +03:00
|
|
|
if (error)
|
|
|
|
|
error = SET_ERROR(error);
|
2019-09-25 19:20:30 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2025-08-09 03:04:01 +03:00
|
|
|
error = SET_ERROR(ENOTTY);
|
2019-09-25 19:20:30 +03:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
return (-error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
|
static int
|
|
|
|
|
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
|
unsigned cmd, unsigned long arg)
|
|
|
|
|
{
|
|
|
|
|
return (zvol_ioctl(bdev, mode, cmd, arg));
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
#define zvol_compat_ioctl NULL
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
|
zvol_check_events(struct gendisk *disk, unsigned int clearing)
|
|
|
|
|
{
|
|
|
|
|
unsigned int mask = 0;
|
|
|
|
|
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (zv != NULL) {
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
|
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
|
|
|
|
zv->zv_changed = 0;
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
zvol_revalidate_disk(struct gendisk *disk)
|
|
|
|
|
{
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (zv != NULL) {
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
|
set_capacity(zv->zv_zso->zvo_disk,
|
|
|
|
|
zv->zv_volsize >> SECTOR_BITS);
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-07 21:24:38 +03:00
|
|
|
int
|
|
|
|
|
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2020-12-18 20:36:19 +03:00
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-23 00:53:25 +03:00
|
|
|
#if defined(HAVE_REVALIDATE_DISK_SIZE)
|
2020-12-18 20:36:19 +03:00
|
|
|
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
|
2020-12-23 00:53:25 +03:00
|
|
|
#elif defined(HAVE_REVALIDATE_DISK)
|
2020-12-18 20:36:19 +03:00
|
|
|
revalidate_disk(disk);
|
2020-12-23 00:53:25 +03:00
|
|
|
#else
|
|
|
|
|
zvol_revalidate_disk(disk);
|
2020-10-18 20:06:18 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Provide a simple virtual geometry for legacy compatibility. For devices
|
|
|
|
|
* smaller than 1 MiB a small head and sector count is used to allow very
|
|
|
|
|
* tiny devices. For devices over 1 Mib a standard head and sector count
|
|
|
|
|
* is used to keep the cylinders count reasonable.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
|
{
|
|
|
|
|
sector_t sectors;
|
|
|
|
|
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
|
|
|
|
|
ASSERT3P(zv, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
|
|
|
|
|
sectors = get_capacity(zv->zv_zso->zvo_disk);
|
|
|
|
|
|
|
|
|
|
if (sectors > 2048) {
|
|
|
|
|
geo->heads = 16;
|
|
|
|
|
geo->sectors = 63;
|
|
|
|
|
} else {
|
|
|
|
|
geo->heads = 2;
|
|
|
|
|
geo->sectors = 4;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
geo->start = 0;
|
|
|
|
|
geo->cylinders = sectors / (geo->heads * geo->sectors);
|
|
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
/*
|
|
|
|
|
* Why have two separate block_device_operations structs?
|
|
|
|
|
*
|
|
|
|
|
* Normally we'd just have one, and assign 'submit_bio' as needed. However,
|
|
|
|
|
* it's possible the user's kernel is built with CONSTIFY_PLUGIN, meaning we
|
|
|
|
|
* can't just change submit_bio dynamically at runtime. So just create two
|
|
|
|
|
* separate structs to get around this.
|
|
|
|
|
*/
|
|
|
|
|
static const struct block_device_operations zvol_ops_blk_mq = {
|
|
|
|
|
.open = zvol_open,
|
|
|
|
|
.release = zvol_release,
|
|
|
|
|
.ioctl = zvol_ioctl,
|
|
|
|
|
.compat_ioctl = zvol_compat_ioctl,
|
|
|
|
|
.check_events = zvol_check_events,
|
|
|
|
|
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
|
|
|
|
|
.revalidate_disk = zvol_revalidate_disk,
|
|
|
|
|
#endif
|
|
|
|
|
.getgeo = zvol_getgeo,
|
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
|
};
|
|
|
|
|
|
2022-01-15 02:37:55 +03:00
|
|
|
static const struct block_device_operations zvol_ops = {
|
2019-09-25 19:20:30 +03:00
|
|
|
.open = zvol_open,
|
|
|
|
|
.release = zvol_release,
|
|
|
|
|
.ioctl = zvol_ioctl,
|
|
|
|
|
.compat_ioctl = zvol_compat_ioctl,
|
|
|
|
|
.check_events = zvol_check_events,
|
2021-05-12 05:53:02 +03:00
|
|
|
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
|
2019-09-25 19:20:30 +03:00
|
|
|
.revalidate_disk = zvol_revalidate_disk,
|
2021-05-12 05:53:02 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
.getgeo = zvol_getgeo,
|
|
|
|
|
.owner = THIS_MODULE,
|
2020-08-09 19:12:25 +03:00
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
2021-07-24 01:28:03 +03:00
|
|
|
.submit_bio = zvol_submit_bio,
|
2020-08-09 19:12:25 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
};
|
|
|
|
|
|
2024-07-31 07:35:48 +03:00
|
|
|
/*
|
|
|
|
|
* Since 6.9, Linux has been removing queue limit setters in favour of an
|
|
|
|
|
* initial queue_limits struct applied when the device is open. Since 6.11,
|
|
|
|
|
* queue_limits is being extended to allow more things to be applied when the
|
|
|
|
|
* device is open. Setters are also being removed for this.
|
|
|
|
|
*
|
|
|
|
|
* For OpenZFS, this means that depending on kernel version, some options may
|
|
|
|
|
* be set up before the device is open, and some applied to an open device
|
|
|
|
|
* (queue) after the fact.
|
|
|
|
|
*
|
|
|
|
|
* We manage this complexity by having our own limits struct,
|
|
|
|
|
* zvol_queue_limits_t, in which we carry any queue config that we're
|
|
|
|
|
* interested in setting. This structure is the same on all kernels.
|
|
|
|
|
*
|
|
|
|
|
* These limits are then applied to the queue at device open time by the most
|
|
|
|
|
* appropriate method for the kernel.
|
|
|
|
|
*
|
|
|
|
|
* zvol_queue_limits_convert() is used on 6.9+ (where the two-arg form of
|
|
|
|
|
* blk_alloc_disk() exists). This converts our limits struct to a proper Linux
|
|
|
|
|
* struct queue_limits, and passes it in. Any fields added in later kernels are
|
|
|
|
|
* (obviously) not set up here.
|
|
|
|
|
*
|
|
|
|
|
* zvol_queue_limits_apply() is called on all kernel versions after the queue
|
|
|
|
|
* is created, and applies any remaining config. Before 6.9 that will be
|
|
|
|
|
* everything, via setter methods. After 6.9 that will be whatever couldn't be
|
|
|
|
|
* put into struct queue_limits. (This implies that zvol_queue_limits_apply()
|
|
|
|
|
* will always be a no-op on the latest kernel we support).
|
|
|
|
|
*/
|
2024-05-28 04:32:07 +03:00
|
|
|
typedef struct zvol_queue_limits {
|
|
|
|
|
unsigned int zql_max_hw_sectors;
|
|
|
|
|
unsigned short zql_max_segments;
|
|
|
|
|
unsigned int zql_max_segment_size;
|
|
|
|
|
unsigned int zql_io_opt;
|
2024-07-31 10:22:20 +03:00
|
|
|
unsigned int zql_physical_block_size;
|
|
|
|
|
unsigned int zql_max_discard_sectors;
|
|
|
|
|
unsigned int zql_discard_granularity;
|
2024-05-28 04:32:07 +03:00
|
|
|
} zvol_queue_limits_t;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
|
|
|
|
|
boolean_t use_blk_mq)
|
|
|
|
|
{
|
|
|
|
|
limits->zql_max_hw_sectors = (DMU_MAX_ACCESS / 4) >> 9;
|
|
|
|
|
|
|
|
|
|
if (use_blk_mq) {
|
|
|
|
|
/*
|
|
|
|
|
* IO requests can be really big (1MB). When an IO request
|
|
|
|
|
* comes in, it is passed off to zvol_read() or zvol_write()
|
|
|
|
|
* in a new thread, where it is chunked up into 'volblocksize'
|
|
|
|
|
* sized pieces and processed. So for example, if the request
|
|
|
|
|
* is a 1MB write and your volblocksize is 128k, one zvol_write
|
|
|
|
|
* thread will take that request and sequentially do ten 128k
|
|
|
|
|
* IOs. This is due to the fact that the thread needs to lock
|
|
|
|
|
* each volblocksize sized block. So you might be wondering:
|
|
|
|
|
* "instead of passing the whole 1MB request to one thread,
|
|
|
|
|
* why not pass ten individual 128k chunks to ten threads and
|
|
|
|
|
* process the whole write in parallel?" The short answer is
|
|
|
|
|
* that there's a sweet spot number of chunks that balances
|
|
|
|
|
* the greater parallelism with the added overhead of more
|
|
|
|
|
* threads. The sweet spot can be different depending on if you
|
|
|
|
|
* have a read or write heavy workload. Writes typically want
|
|
|
|
|
* high chunk counts while reads typically want lower ones. On
|
|
|
|
|
* a test pool with 6 NVMe drives in a 3x 2-disk mirror
|
|
|
|
|
* configuration, with volblocksize=8k, the sweet spot for good
|
|
|
|
|
* sequential reads and writes was at 8 chunks.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Below we tell the kernel how big we want our requests
|
|
|
|
|
* to be. You would think that blk_queue_io_opt() would be
|
|
|
|
|
* used to do this since it is used to "set optimal request
|
|
|
|
|
* size for the queue", but that doesn't seem to do
|
|
|
|
|
* anything - the kernel still gives you huge requests
|
|
|
|
|
* with tons of little PAGE_SIZE segments contained within it.
|
|
|
|
|
*
|
|
|
|
|
* Knowing that the kernel will just give you PAGE_SIZE segments
|
|
|
|
|
* no matter what, you can say "ok, I want PAGE_SIZE byte
|
|
|
|
|
* segments, and I want 'N' of them per request", where N is
|
|
|
|
|
* the correct number of segments for the volblocksize and
|
|
|
|
|
* number of chunks you want.
|
|
|
|
|
*/
|
|
|
|
|
if (zvol_blk_mq_blocks_per_thread != 0) {
|
|
|
|
|
unsigned int chunks;
|
|
|
|
|
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
|
|
|
|
|
|
|
|
|
|
limits->zql_max_segment_size = PAGE_SIZE;
|
|
|
|
|
limits->zql_max_segments =
|
|
|
|
|
(zv->zv_volblocksize * chunks) / PAGE_SIZE;
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Special case: zvol_blk_mq_blocks_per_thread = 0
|
|
|
|
|
* Max everything out.
|
|
|
|
|
*/
|
|
|
|
|
limits->zql_max_segments = UINT16_MAX;
|
|
|
|
|
limits->zql_max_segment_size = UINT_MAX;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
limits->zql_max_segments = UINT16_MAX;
|
|
|
|
|
limits->zql_max_segment_size = UINT_MAX;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-15 01:14:33 +03:00
|
|
|
limits->zql_io_opt = DMU_MAX_ACCESS / 2;
|
2024-07-31 10:22:20 +03:00
|
|
|
|
|
|
|
|
limits->zql_physical_block_size = zv->zv_volblocksize;
|
|
|
|
|
limits->zql_max_discard_sectors =
|
|
|
|
|
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9;
|
|
|
|
|
limits->zql_discard_granularity = zv->zv_volblocksize;
|
2024-05-28 04:32:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_BLK_ALLOC_DISK_2ARG
|
|
|
|
|
static void
|
|
|
|
|
zvol_queue_limits_convert(zvol_queue_limits_t *limits,
|
|
|
|
|
struct queue_limits *qlimits)
|
|
|
|
|
{
|
|
|
|
|
memset(qlimits, 0, sizeof (struct queue_limits));
|
|
|
|
|
qlimits->max_hw_sectors = limits->zql_max_hw_sectors;
|
|
|
|
|
qlimits->max_segments = limits->zql_max_segments;
|
|
|
|
|
qlimits->max_segment_size = limits->zql_max_segment_size;
|
|
|
|
|
qlimits->io_opt = limits->zql_io_opt;
|
2024-07-31 10:22:20 +03:00
|
|
|
qlimits->physical_block_size = limits->zql_physical_block_size;
|
|
|
|
|
qlimits->max_discard_sectors = limits->zql_max_discard_sectors;
|
2024-08-19 23:30:57 +03:00
|
|
|
qlimits->max_hw_discard_sectors = limits->zql_max_discard_sectors;
|
2024-07-31 10:22:20 +03:00
|
|
|
qlimits->discard_granularity = limits->zql_discard_granularity;
|
2024-07-30 14:40:35 +03:00
|
|
|
#ifdef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
|
2024-07-31 07:48:58 +03:00
|
|
|
qlimits->features =
|
|
|
|
|
BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_IO_STAT;
|
2024-07-30 14:40:35 +03:00
|
|
|
#endif
|
2024-05-28 04:32:07 +03:00
|
|
|
}
|
2024-07-31 07:35:48 +03:00
|
|
|
#endif
|
|
|
|
|
|
2024-05-28 04:32:07 +03:00
|
|
|
static void
|
|
|
|
|
zvol_queue_limits_apply(zvol_queue_limits_t *limits,
|
|
|
|
|
struct request_queue *queue)
|
|
|
|
|
{
|
2024-07-31 07:35:48 +03:00
|
|
|
#ifndef HAVE_BLK_ALLOC_DISK_2ARG
|
2024-05-28 04:32:07 +03:00
|
|
|
blk_queue_max_hw_sectors(queue, limits->zql_max_hw_sectors);
|
|
|
|
|
blk_queue_max_segments(queue, limits->zql_max_segments);
|
|
|
|
|
blk_queue_max_segment_size(queue, limits->zql_max_segment_size);
|
|
|
|
|
blk_queue_io_opt(queue, limits->zql_io_opt);
|
2024-07-31 10:22:20 +03:00
|
|
|
blk_queue_physical_block_size(queue, limits->zql_physical_block_size);
|
|
|
|
|
blk_queue_max_discard_sectors(queue, limits->zql_max_discard_sectors);
|
|
|
|
|
blk_queue_discard_granularity(queue, limits->zql_discard_granularity);
|
|
|
|
|
#endif
|
2024-07-30 14:40:35 +03:00
|
|
|
#ifndef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
|
|
|
|
|
blk_queue_set_write_cache(queue, B_TRUE);
|
2024-07-31 07:48:58 +03:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, queue);
|
2024-07-30 14:40:35 +03:00
|
|
|
#endif
|
2024-05-28 04:32:07 +03:00
|
|
|
}
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
static int
|
2024-05-28 04:32:07 +03:00
|
|
|
zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
|
2022-06-09 17:10:38 +03:00
|
|
|
{
|
|
|
|
|
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
|
|
|
|
|
#if defined(HAVE_BLK_ALLOC_DISK)
|
|
|
|
|
zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
|
|
|
|
|
if (zso->zvo_disk == NULL)
|
|
|
|
|
return (1);
|
|
|
|
|
|
2024-03-27 03:24:57 +03:00
|
|
|
zso->zvo_disk->minors = ZVOL_MINORS;
|
|
|
|
|
zso->zvo_queue = zso->zvo_disk->queue;
|
|
|
|
|
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
|
2024-05-28 04:32:07 +03:00
|
|
|
struct queue_limits qlimits;
|
|
|
|
|
zvol_queue_limits_convert(limits, &qlimits);
|
|
|
|
|
struct gendisk *disk = blk_alloc_disk(&qlimits, NUMA_NO_NODE);
|
2024-03-27 03:24:57 +03:00
|
|
|
if (IS_ERR(disk)) {
|
|
|
|
|
zso->zvo_disk = NULL;
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-16 00:05:58 +03:00
|
|
|
zso->zvo_disk = disk;
|
|
|
|
|
zso->zvo_disk->minors = ZVOL_MINORS;
|
|
|
|
|
zso->zvo_queue = zso->zvo_disk->queue;
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
#else
|
|
|
|
|
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
|
|
|
|
|
if (zso->zvo_queue == NULL)
|
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
|
|
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
|
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zso->zvo_disk->queue = zso->zvo_queue;
|
|
|
|
|
#endif /* HAVE_BLK_ALLOC_DISK */
|
|
|
|
|
#else
|
|
|
|
|
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
|
|
|
|
|
if (zso->zvo_queue == NULL)
|
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
|
|
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
|
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zso->zvo_disk->queue = zso->zvo_queue;
|
|
|
|
|
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
|
2024-07-31 07:35:48 +03:00
|
|
|
|
|
|
|
|
zvol_queue_limits_apply(limits, zso->zvo_queue);
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
2024-05-28 04:32:07 +03:00
|
|
|
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
|
2022-06-09 17:10:38 +03:00
|
|
|
{
|
|
|
|
|
struct zvol_state_os *zso = zv->zv_zso;
|
|
|
|
|
|
|
|
|
|
/* Allocate our blk-mq tag_set */
|
|
|
|
|
if (zvol_blk_mq_alloc_tag_set(zv) != 0)
|
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
|
|
#if defined(HAVE_BLK_ALLOC_DISK)
|
|
|
|
|
zso->zvo_disk = blk_mq_alloc_disk(&zso->tag_set, zv);
|
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
|
blk_mq_free_tag_set(&zso->tag_set);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
zso->zvo_queue = zso->zvo_disk->queue;
|
|
|
|
|
zso->zvo_disk->minors = ZVOL_MINORS;
|
2024-03-27 03:24:57 +03:00
|
|
|
#elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
|
2024-05-28 04:32:07 +03:00
|
|
|
struct queue_limits qlimits;
|
|
|
|
|
zvol_queue_limits_convert(limits, &qlimits);
|
|
|
|
|
struct gendisk *disk = blk_mq_alloc_disk(&zso->tag_set, &qlimits, zv);
|
2024-03-27 03:24:57 +03:00
|
|
|
if (IS_ERR(disk)) {
|
|
|
|
|
zso->zvo_disk = NULL;
|
|
|
|
|
blk_mq_free_tag_set(&zso->tag_set);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zso->zvo_disk = disk;
|
|
|
|
|
zso->zvo_queue = zso->zvo_disk->queue;
|
|
|
|
|
zso->zvo_disk->minors = ZVOL_MINORS;
|
2022-06-09 17:10:38 +03:00
|
|
|
#else
|
|
|
|
|
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
|
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
|
blk_mq_free_tag_set(&zso->tag_set);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
/* Allocate queue */
|
|
|
|
|
zso->zvo_queue = blk_mq_init_queue(&zso->tag_set);
|
|
|
|
|
if (IS_ERR(zso->zvo_queue)) {
|
|
|
|
|
blk_mq_free_tag_set(&zso->tag_set);
|
|
|
|
|
return (1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Our queue is now created, assign it to our disk */
|
|
|
|
|
zso->zvo_disk->queue = zso->zvo_queue;
|
|
|
|
|
#endif
|
2024-07-31 07:35:48 +03:00
|
|
|
|
|
|
|
|
zvol_queue_limits_apply(limits, zso->zvo_queue);
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
|
* Allocate memory for a new zvol_state_t and setup the required
|
|
|
|
|
* request queue and generic disk structures for the block device.
|
|
|
|
|
*/
|
2025-08-06 17:10:52 +03:00
|
|
|
static int
|
|
|
|
|
zvol_alloc(dev_t dev, const char *name, uint64_t volsize, uint64_t volblocksize,
|
|
|
|
|
zvol_state_t **zvp)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
zvol_state_t *zv;
|
2020-04-09 19:16:46 +03:00
|
|
|
struct zvol_state_os *zso;
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volmode;
|
2022-06-09 17:10:38 +03:00
|
|
|
int ret;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2025-08-06 17:10:52 +03:00
|
|
|
ret = dsl_prop_get_integer(name, "volmode", &volmode, NULL);
|
|
|
|
|
if (ret)
|
|
|
|
|
return (ret);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (volmode == ZFS_VOLMODE_DEFAULT)
|
|
|
|
|
volmode = zvol_volmode;
|
|
|
|
|
|
|
|
|
|
if (volmode == ZFS_VOLMODE_NONE)
|
2025-08-06 17:10:52 +03:00
|
|
|
return (0);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
|
2020-04-09 19:16:46 +03:00
|
|
|
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
|
|
|
|
|
zv->zv_zso = zso;
|
2020-11-17 20:50:52 +03:00
|
|
|
zv->zv_volmode = volmode;
|
2025-08-06 17:10:52 +03:00
|
|
|
zv->zv_volsize = volsize;
|
2024-08-16 00:29:50 +03:00
|
|
|
zv->zv_volblocksize = volblocksize;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
list_link_init(&zv->zv_next);
|
|
|
|
|
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
|
zvol: ensure device minors are properly cleaned up
Currently, if a minor is in use when we try to remove it, we'll skip it
and never come back to it again. Since the zvol state is hung off the
minor in the kernel, this can get us into weird situations if something
tries to use it after the removal fails. It's even worse at pool export,
as there's now a vestigial zvol state with no pool under it. It's
weirder again if the pool is subsequently reimported, as the zvol code
(reasonably) assumes the zvol state has been properly setup, when it's
actually left over from the previous import of the pool.
This commit attempts to tackle that by setting a flag on the zvol if its
minor can't be removed, and then checking that flag when a request is
made and rejecting it, thus stopping new work coming in.
The flag also causes a condvar to be signaled when the last client
finishes. For the case where a single minor is being removed (eg
changing volmode), it will wait for this signal before proceeding.
Meanwhile, when removing all minors, a background task is created for
each minor that couldn't be removed on the spot, and those tasks then
wake and clean up.
Since any new tasks are queued on to the pool's spa_zvol_taskq,
spa_export_common() will continue to wait at export until all minors are
removed.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #14872
Closes #16364
2024-07-18 06:24:05 +03:00
|
|
|
cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
|
2021-07-24 01:28:03 +03:00
|
|
|
|
2024-05-28 04:32:07 +03:00
|
|
|
zvol_queue_limits_t limits;
|
|
|
|
|
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
|
|
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
/*
|
|
|
|
|
* The block layer has 3 interfaces for getting BIOs:
|
|
|
|
|
*
|
|
|
|
|
* 1. blk-mq request queues (new)
|
|
|
|
|
* 2. submit_bio() (oldest)
|
|
|
|
|
* 3. regular request queues (old).
|
|
|
|
|
*
|
|
|
|
|
* Each of those interfaces has two permutations:
|
|
|
|
|
*
|
|
|
|
|
* a) We have blk_alloc_disk()/blk_mq_alloc_disk(), which allocates
|
|
|
|
|
* both the disk and its queue (5.14 kernel or newer)
|
|
|
|
|
*
|
|
|
|
|
* b) We don't have blk_*alloc_disk(), and have to allocate the
|
|
|
|
|
* disk and the queue separately. (5.13 kernel or older)
|
|
|
|
|
*/
|
|
|
|
|
if (zv->zv_zso->use_blk_mq) {
|
2024-05-28 04:32:07 +03:00
|
|
|
ret = zvol_alloc_blk_mq(zv, &limits);
|
2025-05-30 17:25:09 +03:00
|
|
|
if (ret != 0)
|
|
|
|
|
goto out_kmem;
|
2022-06-09 17:10:38 +03:00
|
|
|
zso->zvo_disk->fops = &zvol_ops_blk_mq;
|
|
|
|
|
} else {
|
2024-05-28 04:32:07 +03:00
|
|
|
ret = zvol_alloc_non_blk_mq(zso, &limits);
|
2025-05-30 17:25:09 +03:00
|
|
|
if (ret != 0)
|
|
|
|
|
goto out_kmem;
|
2022-06-09 17:10:38 +03:00
|
|
|
zso->zvo_disk->fops = &zvol_ops;
|
2021-07-24 01:28:03 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
/* Limit read-ahead to a single page to prevent over-prefetching. */
|
2020-04-09 19:16:46 +03:00
|
|
|
blk_queue_set_read_ahead(zso->zvo_queue, 1);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-06-09 17:10:38 +03:00
|
|
|
if (!zv->zv_zso->use_blk_mq) {
|
|
|
|
|
/* Disable write merging in favor of the ZIO pipeline. */
|
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_queue->queuedata = zv;
|
|
|
|
|
zso->zvo_dev = dev;
|
2019-09-25 19:20:30 +03:00
|
|
|
zv->zv_open_count = 0;
|
2023-11-28 00:16:59 +03:00
|
|
|
strlcpy(zv->zv_name, name, sizeof (zv->zv_name));
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2019-10-04 01:54:29 +03:00
|
|
|
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
|
|
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->major = zvol_major;
|
|
|
|
|
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2022-04-19 20:38:04 +03:00
|
|
|
/*
|
|
|
|
|
* Setting ZFS_VOLMODE_DEV disables partitioning on ZVOL devices.
|
|
|
|
|
* This is accomplished by limiting the number of minors for the
|
|
|
|
|
* device to one and explicitly disabling partition scanning.
|
|
|
|
|
*/
|
2019-09-25 19:20:30 +03:00
|
|
|
if (volmode == ZFS_VOLMODE_DEV) {
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->minors = 1;
|
2024-08-24 15:06:22 +03:00
|
|
|
zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
|
|
|
|
|
zso->zvo_disk->flags |= GENHD_FL_NO_PART;
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
2022-04-19 20:38:04 +03:00
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->first_minor = (dev & MINORMASK);
|
|
|
|
|
zso->zvo_disk->private_data = zv;
|
|
|
|
|
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
|
2019-09-25 19:20:30 +03:00
|
|
|
ZVOL_DEV_NAME, (dev & MINORMASK));
|
|
|
|
|
|
2025-08-06 17:10:52 +03:00
|
|
|
*zvp = zv;
|
|
|
|
|
return (ret);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
out_kmem:
|
2020-04-09 19:16:46 +03:00
|
|
|
kmem_free(zso, sizeof (struct zvol_state_os));
|
2019-09-25 19:20:30 +03:00
|
|
|
kmem_free(zv, sizeof (zvol_state_t));
|
2025-08-06 17:10:52 +03:00
|
|
|
return (ret);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
zvol: remove the OS-side minor before freeing the zvol
When destroying a zvol, it is not "unpublished" from the system (that
is, /dev/zd* node removed) until zvol_os_free(). Under Linux, at the
time del_gendisk() and put_disk() are called, the device node may still
be have an active hold, from a userspace program or something inside the
kernel (a partition probe). As it is currently, this can lead to calls
to zvol_open() or zvol_release() while the zvol_state_t is partially or
fully freed. zvol_open() has some protection against this by checking
that private_data is NULL, but zvol_release does not.
This implements a better ordering for all of this by adding a new
OS-side method, zvol_os_remove_minor(), which is responsible for fully
decoupling the "private" (OS-side) objects from the zvol_state_t. For
Linux, that means calling put_disk(), nulling private_data, and freeing
zv_zso.
This takes the place of zvol_os_clear_private(), which was a nod in that
direction but did not do enough, and did not do it early enough.
Equivalent changes are made on the FreeBSD side to follow the API
change.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 06:43:17 +03:00
|
|
|
void
|
|
|
|
|
zvol_os_remove_minor(zvol_state_t *zv)
|
|
|
|
|
{
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
ASSERT0(zv->zv_open_count);
|
|
|
|
|
ASSERT0(atomic_read(&zv->zv_suspend_ref));
|
|
|
|
|
ASSERT(zv->zv_flags & ZVOL_REMOVING);
|
|
|
|
|
|
|
|
|
|
struct zvol_state_os *zso = zv->zv_zso;
|
|
|
|
|
zv->zv_zso = NULL;
|
|
|
|
|
|
|
|
|
|
/* Clearing private_data will make new callers return immediately. */
|
zvol: stop using zvol_state_lock to protect OS-side private data
zvol_state_lock is intended to protect access to the global name->zvol
lists (zvol_find_by_name()), but has also been used to control access to
OS-side private data, accessed through whatever kernel object is used to
represent the volume (gendisk, geom, etc).
This appears to have been necessary to some degree because the OS-side
object is what's used to get a handle on zvol_state_t, so zv_state_lock
and zv_suspend_lock can't be used to manage access, but also, with the
private object and the zvol_state_t being shutdown and destroyed at the
same time in zvol_os_free(), we must ensure that the private object
pointer only ever corresponds to a real zvol_state_t, not one in partial
destruction. Taking the global lock seems like a convenient way to
ensure this.
The problem with this is that zvol_state_lock does not actually protect
access to the zvol_state_t internals, so we need to take zv_state_lock
and/or zv_suspend_lock. If those are contended, this can then cause
OS-side operations (eg zvol_open()) to sleep to wait for them while hold
zvol_state_lock. This then blocks out all other OS-side operations which
want to get the private data, and any ZFS-side control operations that
would take the write half of the lock. It's even worse if ZFS-side
operations induce OS-side calls back into the zvol (eg creating a zvol
triggers a partition probe inside the kernel, and also a userspace
access from udev to set up device links). And it gets even works again
if anything decides to defer those ops to a task and wait on them, which
zvol_remove_minors_impl() will do under high load.
However, since the previous commit, we have a guarantee that the private
data pointer will always be NULL'd out in zvol_os_remove_minor()
_before_ the zvol_state_t is made invalid, but it won't happen until all
users are ejected. So, if we make access to the private object pointer
atomic, we remove the need to take a global lockout to access it, and so
we can remove all acquisitions of zvol_state_lock from the OS side.
While here, I've rewritten much of the locking theory comment at the top
of zvol.c. It wasn't wrong, but it hadn't been followed exactly, so I've
tried to describe the purpose of each lock in a little more detail, and
in particular describe where it should and shouldn't be used.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 07:19:24 +03:00
|
|
|
atomic_store_ptr(&zso->zvo_disk->private_data, NULL);
|
zvol: remove the OS-side minor before freeing the zvol
When destroying a zvol, it is not "unpublished" from the system (that
is, /dev/zd* node removed) until zvol_os_free(). Under Linux, at the
time del_gendisk() and put_disk() are called, the device node may still
be have an active hold, from a userspace program or something inside the
kernel (a partition probe). As it is currently, this can lead to calls
to zvol_open() or zvol_release() while the zvol_state_t is partially or
fully freed. zvol_open() has some protection against this by checking
that private_data is NULL, but zvol_release does not.
This implements a better ordering for all of this by adding a new
OS-side method, zvol_os_remove_minor(), which is responsible for fully
decoupling the "private" (OS-side) objects from the zvol_state_t. For
Linux, that means calling put_disk(), nulling private_data, and freeing
zv_zso.
This takes the place of zvol_os_clear_private(), which was a nod in that
direction but did not do enough, and did not do it early enough.
Equivalent changes are made on the FreeBSD side to follow the API
change.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 06:43:17 +03:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Drop the state lock before calling del_gendisk(). There may be
|
|
|
|
|
* callers waiting to acquire it, but del_gendisk() will block until
|
|
|
|
|
* they exit, which would deadlock.
|
|
|
|
|
*/
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
|
|
|
|
|
del_gendisk(zso->zvo_disk);
|
|
|
|
|
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
|
|
|
|
|
(defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
|
|
|
|
|
#if defined(HAVE_BLK_CLEANUP_DISK)
|
|
|
|
|
blk_cleanup_disk(zso->zvo_disk);
|
|
|
|
|
#else
|
|
|
|
|
put_disk(zso->zvo_disk);
|
|
|
|
|
#endif
|
|
|
|
|
#else
|
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
|
put_disk(zso->zvo_disk);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (zso->use_blk_mq)
|
|
|
|
|
blk_mq_free_tag_set(&zso->tag_set);
|
|
|
|
|
|
|
|
|
|
ida_simple_remove(&zvol_ida, MINOR(zso->zvo_dev) >> ZVOL_MINOR_BITS);
|
|
|
|
|
|
|
|
|
|
kmem_free(zso, sizeof (struct zvol_state_os));
|
|
|
|
|
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-07 21:24:38 +03:00
|
|
|
void
|
|
|
|
|
zvol_os_free(zvol_state_t *zv)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
|
|
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
|
|
|
|
|
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT0(zv->zv_open_count);
|
zvol: remove the OS-side minor before freeing the zvol
When destroying a zvol, it is not "unpublished" from the system (that
is, /dev/zd* node removed) until zvol_os_free(). Under Linux, at the
time del_gendisk() and put_disk() are called, the device node may still
be have an active hold, from a userspace program or something inside the
kernel (a partition probe). As it is currently, this can lead to calls
to zvol_open() or zvol_release() while the zvol_state_t is partially or
fully freed. zvol_open() has some protection against this by checking
that private_data is NULL, but zvol_release does not.
This implements a better ordering for all of this by adding a new
OS-side method, zvol_os_remove_minor(), which is responsible for fully
decoupling the "private" (OS-side) objects from the zvol_state_t. For
Linux, that means calling put_disk(), nulling private_data, and freeing
zv_zso.
This takes the place of zvol_os_clear_private(), which was a nod in that
direction but did not do enough, and did not do it early enough.
Equivalent changes are made on the FreeBSD side to follow the API
change.
Sponsored-by: Klara, Inc.
Sponsored-by: Railway Corporation
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Fedor Uporov <fuporov.vstack@gmail.com>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17625
2025-08-05 06:43:17 +03:00
|
|
|
ASSERT0P(zv->zv_zso);
|
|
|
|
|
|
|
|
|
|
ASSERT0P(zv->zv_objset);
|
|
|
|
|
ASSERT0P(zv->zv_zilog);
|
|
|
|
|
ASSERT0P(zv->zv_dn);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
rw_destroy(&zv->zv_suspend_lock);
|
2019-10-04 01:54:29 +03:00
|
|
|
zfs_rangelock_fini(&zv->zv_rangelock);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
zvol: ensure device minors are properly cleaned up
Currently, if a minor is in use when we try to remove it, we'll skip it
and never come back to it again. Since the zvol state is hung off the
minor in the kernel, this can get us into weird situations if something
tries to use it after the removal fails. It's even worse at pool export,
as there's now a vestigial zvol state with no pool under it. It's
weirder again if the pool is subsequently reimported, as the zvol code
(reasonably) assumes the zvol state has been properly setup, when it's
actually left over from the previous import of the pool.
This commit attempts to tackle that by setting a flag on the zvol if its
minor can't be removed, and then checking that flag when a request is
made and rejecting it, thus stopping new work coming in.
The flag also causes a condvar to be signaled when the last client
finishes. For the case where a single minor is being removed (eg
changing volmode), it will wait for this signal before proceeding.
Meanwhile, when removing all minors, a background task is created for
each minor that couldn't be removed on the spot, and those tasks then
wake and clean up.
Since any new tasks are queued on to the pool's spa_zvol_taskq,
spa_export_common() will continue to wait at export until all minors are
removed.
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #14872
Closes #16364
2024-07-18 06:24:05 +03:00
|
|
|
cv_destroy(&zv->zv_removing_cv);
|
2019-09-25 19:20:30 +03:00
|
|
|
mutex_destroy(&zv->zv_state_lock);
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_destroy(&zv->zv_kstat);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
kmem_free(zv, sizeof (zvol_state_t));
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-17 20:50:52 +03:00
|
|
|
void
|
|
|
|
|
zvol_wait_close(zvol_state_t *zv)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-28 19:52:03 +03:00
|
|
|
struct add_disk_work {
|
|
|
|
|
struct delayed_work work;
|
|
|
|
|
struct gendisk *disk;
|
|
|
|
|
int error;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
__zvol_os_add_disk(struct gendisk *disk)
|
|
|
|
|
{
|
|
|
|
|
int error = 0;
|
|
|
|
|
#ifdef HAVE_ADD_DISK_RET
|
2025-08-09 03:04:01 +03:00
|
|
|
error = -add_disk(disk);
|
|
|
|
|
if (error)
|
|
|
|
|
error = SET_ERROR(error);
|
2024-06-28 19:52:03 +03:00
|
|
|
#else
|
|
|
|
|
add_disk(disk);
|
|
|
|
|
#endif
|
|
|
|
|
return (error);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH)
|
|
|
|
|
static void
|
|
|
|
|
zvol_os_add_disk_work(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct add_disk_work *add_disk_work;
|
|
|
|
|
add_disk_work = container_of(work, struct add_disk_work, work.work);
|
|
|
|
|
add_disk_work->error = __zvol_os_add_disk(add_disk_work->disk);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* SPECIAL CASE:
|
|
|
|
|
*
|
|
|
|
|
* This function basically calls add_disk() from a workqueue. You may be
|
|
|
|
|
* thinking: why not just call add_disk() directly?
|
|
|
|
|
*
|
|
|
|
|
* When you call add_disk(), the zvol appears to the world. When this happens,
|
|
|
|
|
* the kernel calls disk_scan_partitions() on the zvol, which behaves
|
|
|
|
|
* differently on the 6.9+ kernels:
|
|
|
|
|
*
|
|
|
|
|
* - 6.8 and older kernels -
|
|
|
|
|
* disk_scan_partitions()
|
|
|
|
|
* handle = bdev_open_by_dev(
|
|
|
|
|
* zvol_open()
|
|
|
|
|
* bdev_release(handle);
|
|
|
|
|
* zvol_release()
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* - 6.9+ kernels -
|
|
|
|
|
* disk_scan_partitions()
|
|
|
|
|
* file = bdev_file_open_by_dev()
|
|
|
|
|
* zvol_open()
|
|
|
|
|
* fput(file)
|
|
|
|
|
* < wait for return to userspace >
|
|
|
|
|
* zvol_release()
|
|
|
|
|
*
|
|
|
|
|
* The difference is that the bdev_release() from the 6.8 kernel is synchronous
|
|
|
|
|
* while the fput() from the 6.9 kernel is async. Or more specifically it's
|
|
|
|
|
* async that has to wait until we return to userspace (since it adds the fput
|
|
|
|
|
* into the caller's work queue with the TWA_RESUME flag set). This is not the
|
|
|
|
|
* behavior we want, since we want do things like create+destroy a zvol within
|
|
|
|
|
* a single ZFS_IOC_CREATE ioctl, and the "create" part needs to release the
|
|
|
|
|
* reference to the zvol while we're in the IOCTL, which can't wait until we
|
|
|
|
|
* return to userspace.
|
|
|
|
|
*
|
|
|
|
|
* We can get around this since fput() has a special codepath for when it's
|
|
|
|
|
* running in a kernel thread or interrupt. In those cases, it just puts the
|
|
|
|
|
* fput into the system workqueue, which we can force to run with
|
|
|
|
|
* __flush_workqueue(). That is why we call add_disk() from a workqueue - so it
|
|
|
|
|
* run from a kernel thread and "tricks" the fput() codepaths.
|
|
|
|
|
*
|
|
|
|
|
* Note that __flush_workqueue() is slowly getting deprecated. This may be ok
|
|
|
|
|
* though, since our IOCTL will spin on EBUSY waiting for the zvol release (via
|
|
|
|
|
* fput) to happen, which it eventually, naturally, will from the system_wq
|
|
|
|
|
* without us explicitly calling __flush_workqueue().
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
zvol_os_add_disk(struct gendisk *disk)
|
|
|
|
|
{
|
|
|
|
|
#if defined(HAVE_BDEV_FILE_OPEN_BY_PATH) /* 6.9+ kernel */
|
|
|
|
|
struct add_disk_work add_disk_work;
|
|
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&add_disk_work.work, zvol_os_add_disk_work);
|
|
|
|
|
add_disk_work.disk = disk;
|
|
|
|
|
add_disk_work.error = 0;
|
|
|
|
|
|
|
|
|
|
/* Use *_delayed_work functions since they're not GPL'd */
|
|
|
|
|
schedule_delayed_work(&add_disk_work.work, 0);
|
|
|
|
|
flush_delayed_work(&add_disk_work.work);
|
|
|
|
|
|
|
|
|
|
__flush_workqueue(system_wq);
|
|
|
|
|
return (add_disk_work.error);
|
|
|
|
|
#else /* <= 6.8 kernel */
|
|
|
|
|
return (__zvol_os_add_disk(disk));
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
|
* Create a block device minor node and setup the linkage between it
|
|
|
|
|
* and the specified volume. Once this function returns the block
|
|
|
|
|
* device is live and ready for use.
|
|
|
|
|
*/
|
2022-02-07 21:24:38 +03:00
|
|
|
int
|
async zvol minor node creation interferes with receive
When we finish a zfs receive, dmu_recv_end_sync() calls
zvol_create_minors(async=TRUE). This kicks off some other threads that
create the minor device nodes (in /dev/zvol/poolname/...). These async
threads call zvol_prefetch_minors_impl() and zvol_create_minor(), which
both call dmu_objset_own(), which puts a "long hold" on the dataset.
Since the zvol minor node creation is asynchronous, this can happen
after the `ZFS_IOC_RECV[_NEW]` ioctl and `zfs receive` process have
completed.
After the first receive ioctl has completed, userland may attempt to do
another receive into the same dataset (e.g. the next incremental
stream). This second receive and the asynchronous minor node creation
can interfere with one another in several different ways, because they
both require exclusive access to the dataset:
1. When the second receive is finishing up, dmu_recv_end_check() does
dsl_dataset_handoff_check(), which can fail with EBUSY if the async
minor node creation already has a "long hold" on this dataset. This
causes the 2nd receive to fail.
2. The async udev rule can fail if zvol_id and/or systemd-udevd try to
open the device while the the second receive's async attempt at minor
node creation owns the dataset (via zvol_prefetch_minors_impl). This
causes the minor node (/dev/zd*) to exist, but the udev-generated
/dev/zvol/... to not exist.
3. The async minor node creation can silently fail with EBUSY if the
first receive's zvol_create_minor() trys to own the dataset while the
second receive's zvol_prefetch_minors_impl already owns the dataset.
To address these problems, this change synchronously creates the minor
node. To avoid the lock ordering problems that the asynchrony was
introduced to fix (see #3681), we create the minor nodes from open
context, with no locks held, rather than from syncing contex as was
originally done.
Implementation notes:
We generally do not need to traverse children or prefetch anything (e.g.
when running the recv, snapshot, create, or clone subcommands of zfs).
We only need recursion when importing/opening a pool and when loading
encryption keys. The existing recursive, asynchronous, prefetching code
is preserved for use in these cases.
Channel programs may need to create zvol minor nodes, when creating a
snapshot of a zvol with the snapdev property set. We figure out what
snapshots are created when running the LUA program in syncing context.
In this case we need to remember what snapshots were created, and then
try to create their minor nodes from open context, after the LUA code
has completed.
There are additional zvol use cases that asynchronously own the dataset,
which can cause similar problems. E.g. changing the volmode or snapdev
properties. These are less problematic because they are not recursive
and don't touch datasets that are not involved in the operation, there
is still potential for interference with subsequent operations. In the
future, these cases should be similarly converted to create the zvol
minor node synchronously from open context.
The async tasks of removing and renaming minors do not own the objset,
so they do not have this problem. However, it may make sense to also
convert these operations to happen synchronously from open context, in
the future.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-65948
Closes #7863
Closes #9885
2020-02-03 20:33:14 +03:00
|
|
|
zvol_os_create_minor(const char *name)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2025-08-06 17:10:52 +03:00
|
|
|
zvol_state_t *zv = NULL;
|
2019-09-25 19:20:30 +03:00
|
|
|
objset_t *os;
|
|
|
|
|
dmu_object_info_t *doi;
|
|
|
|
|
uint64_t volsize;
|
|
|
|
|
uint64_t len;
|
|
|
|
|
unsigned minor = 0;
|
|
|
|
|
int error = 0;
|
|
|
|
|
int idx;
|
|
|
|
|
uint64_t hash = zvol_name_hash(name);
|
2023-10-25 00:53:27 +03:00
|
|
|
uint64_t volthreading;
|
2022-11-08 23:38:08 +03:00
|
|
|
bool replayed_zil = B_FALSE;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
if (zvol_inhibit_dev)
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
|
|
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
|
|
|
|
|
if (idx < 0)
|
|
|
|
|
return (SET_ERROR(-idx));
|
|
|
|
|
minor = idx << ZVOL_MINOR_BITS;
|
2024-03-30 00:37:40 +03:00
|
|
|
if (MINOR(minor) != minor) {
|
|
|
|
|
/* too many partitions can cause an overflow */
|
|
|
|
|
zfs_dbgmsg("zvol: create minor overflow: %s, minor %u/%u",
|
|
|
|
|
name, minor, MINOR(minor));
|
|
|
|
|
ida_simple_remove(&zvol_ida, idx);
|
|
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
|
|
|
|
|
if (zv) {
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
ida_simple_remove(&zvol_ida, idx);
|
|
|
|
|
return (SET_ERROR(EEXIST));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
|
|
|
|
|
|
|
|
|
|
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
|
|
|
|
|
if (error)
|
|
|
|
|
goto out_doi;
|
|
|
|
|
|
|
|
|
|
error = dmu_object_info(os, ZVOL_OBJ, doi);
|
|
|
|
|
if (error)
|
|
|
|
|
goto out_dmu_objset_disown;
|
|
|
|
|
|
|
|
|
|
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
|
|
|
|
|
if (error)
|
|
|
|
|
goto out_dmu_objset_disown;
|
|
|
|
|
|
2025-08-06 17:10:52 +03:00
|
|
|
error = zvol_alloc(MKDEV(zvol_major, minor), name,
|
|
|
|
|
volsize, doi->doi_data_block_size, &zv);
|
|
|
|
|
if (error || zv == NULL)
|
2019-09-25 19:20:30 +03:00
|
|
|
goto out_dmu_objset_disown;
|
2025-08-06 17:10:52 +03:00
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
zv->zv_hash = hash;
|
|
|
|
|
|
|
|
|
|
if (dmu_objset_is_snapshot(os))
|
|
|
|
|
zv->zv_flags |= ZVOL_RDONLY;
|
|
|
|
|
|
|
|
|
|
zv->zv_objset = os;
|
|
|
|
|
|
2023-10-25 00:53:27 +03:00
|
|
|
/* Default */
|
|
|
|
|
zv->zv_threading = B_TRUE;
|
|
|
|
|
if (dsl_prop_get_integer(name, "volthreading", &volthreading, NULL)
|
|
|
|
|
== 0)
|
|
|
|
|
zv->zv_threading = volthreading;
|
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
|
|
|
|
|
|
2022-05-27 20:51:55 +03:00
|
|
|
#ifdef QUEUE_FLAG_DISCARD
|
2019-09-25 19:20:30 +03:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
|
2022-05-27 20:51:55 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
#ifdef QUEUE_FLAG_NONROT
|
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef QUEUE_FLAG_ADD_RANDOM
|
|
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
|
|
|
|
|
#endif
|
|
|
|
|
/* This flag was introduced in kernel version 4.12. */
|
|
|
|
|
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
|
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
|
|
|
|
|
#endif
|
|
|
|
|
|
2025-08-04 13:00:19 +03:00
|
|
|
ASSERT0P(zv->zv_kstat.dk_kstats);
|
2022-07-21 03:14:06 +03:00
|
|
|
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
|
|
|
|
|
if (error)
|
|
|
|
|
goto out_dmu_objset_disown;
|
2025-08-04 13:00:19 +03:00
|
|
|
ASSERT0P(zv->zv_zilog);
|
2022-07-21 03:14:06 +03:00
|
|
|
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (spa_writeable(dmu_objset_spa(os))) {
|
|
|
|
|
if (zil_replay_disable)
|
2022-11-08 23:38:08 +03:00
|
|
|
replayed_zil = zil_destroy(zv->zv_zilog, B_FALSE);
|
2019-09-25 19:20:30 +03:00
|
|
|
else
|
2022-11-08 23:38:08 +03:00
|
|
|
replayed_zil = zil_replay(os, zv, zvol_replay_vector);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
2022-11-08 23:38:08 +03:00
|
|
|
if (replayed_zil)
|
|
|
|
|
zil_close(zv->zv_zilog);
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
zv->zv_zilog = NULL;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* When udev detects the addition of the device it will immediately
|
|
|
|
|
* invoke blkid(8) to determine the type of content on the device.
|
|
|
|
|
* Prefetching the blocks commonly scanned by blkid(8) will speed
|
|
|
|
|
* up this process.
|
|
|
|
|
*/
|
2022-09-27 03:02:38 +03:00
|
|
|
len = MIN(zvol_prefetch_bytes, SPA_MAXBLOCKSIZE);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (len > 0) {
|
|
|
|
|
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
|
|
|
|
|
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
|
|
|
|
|
ZIO_PRIORITY_SYNC_READ);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
zv->zv_objset = NULL;
|
|
|
|
|
out_dmu_objset_disown:
|
|
|
|
|
dmu_objset_disown(os, B_TRUE, FTAG);
|
|
|
|
|
out_doi:
|
|
|
|
|
kmem_free(doi, sizeof (dmu_object_info_t));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Keep in mind that once add_disk() is called, the zvol is
|
|
|
|
|
* announced to the world, and zvol_open()/zvol_release() can
|
|
|
|
|
* be called at any time. Incidentally, add_disk() itself calls
|
|
|
|
|
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
|
|
|
|
|
* directly as well.
|
|
|
|
|
*/
|
2025-08-06 17:10:52 +03:00
|
|
|
if (error == 0 && zv) {
|
2019-09-25 19:20:30 +03:00
|
|
|
rw_enter(&zvol_state_lock, RW_WRITER);
|
|
|
|
|
zvol_insert(zv);
|
|
|
|
|
rw_exit(&zvol_state_lock);
|
2024-06-28 19:52:03 +03:00
|
|
|
error = zvol_os_add_disk(zv->zv_zso->zvo_disk);
|
2019-09-25 19:20:30 +03:00
|
|
|
} else {
|
|
|
|
|
ida_simple_remove(&zvol_ida, idx);
|
|
|
|
|
}
|
|
|
|
|
|
async zvol minor node creation interferes with receive
When we finish a zfs receive, dmu_recv_end_sync() calls
zvol_create_minors(async=TRUE). This kicks off some other threads that
create the minor device nodes (in /dev/zvol/poolname/...). These async
threads call zvol_prefetch_minors_impl() and zvol_create_minor(), which
both call dmu_objset_own(), which puts a "long hold" on the dataset.
Since the zvol minor node creation is asynchronous, this can happen
after the `ZFS_IOC_RECV[_NEW]` ioctl and `zfs receive` process have
completed.
After the first receive ioctl has completed, userland may attempt to do
another receive into the same dataset (e.g. the next incremental
stream). This second receive and the asynchronous minor node creation
can interfere with one another in several different ways, because they
both require exclusive access to the dataset:
1. When the second receive is finishing up, dmu_recv_end_check() does
dsl_dataset_handoff_check(), which can fail with EBUSY if the async
minor node creation already has a "long hold" on this dataset. This
causes the 2nd receive to fail.
2. The async udev rule can fail if zvol_id and/or systemd-udevd try to
open the device while the the second receive's async attempt at minor
node creation owns the dataset (via zvol_prefetch_minors_impl). This
causes the minor node (/dev/zd*) to exist, but the udev-generated
/dev/zvol/... to not exist.
3. The async minor node creation can silently fail with EBUSY if the
first receive's zvol_create_minor() trys to own the dataset while the
second receive's zvol_prefetch_minors_impl already owns the dataset.
To address these problems, this change synchronously creates the minor
node. To avoid the lock ordering problems that the asynchrony was
introduced to fix (see #3681), we create the minor nodes from open
context, with no locks held, rather than from syncing contex as was
originally done.
Implementation notes:
We generally do not need to traverse children or prefetch anything (e.g.
when running the recv, snapshot, create, or clone subcommands of zfs).
We only need recursion when importing/opening a pool and when loading
encryption keys. The existing recursive, asynchronous, prefetching code
is preserved for use in these cases.
Channel programs may need to create zvol minor nodes, when creating a
snapshot of a zvol with the snapdev property set. We figure out what
snapshots are created when running the LUA program in syncing context.
In this case we need to remember what snapshots were created, and then
try to create their minor nodes from open context, after the LUA code
has completed.
There are additional zvol use cases that asynchronously own the dataset,
which can cause similar problems. E.g. changing the volmode or snapdev
properties. These are less problematic because they are not recursive
and don't touch datasets that are not involved in the operation, there
is still potential for interference with subsequent operations. In the
future, these cases should be similarly converted to create the zvol
minor node synchronously from open context.
The async tasks of removing and renaming minors do not own the objset,
so they do not have this problem. However, it may make sense to also
convert these operations to happen synchronously from open context, in
the future.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-65948
Closes #7863
Closes #9885
2020-02-03 20:33:14 +03:00
|
|
|
return (error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
2025-08-06 17:10:52 +03:00
|
|
|
int
|
2022-02-07 21:24:38 +03:00
|
|
|
zvol_os_rename_minor(zvol_state_t *zv, const char *newname)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
|
|
|
|
|
|
|
|
|
|
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
|
|
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
|
|
|
|
|
|
|
|
|
|
/* move to new hashtable entry */
|
2024-04-26 00:24:52 +03:00
|
|
|
zv->zv_hash = zvol_name_hash(newname);
|
2019-09-25 19:20:30 +03:00
|
|
|
hlist_del(&zv->zv_hlink);
|
|
|
|
|
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The block device's read-only state is briefly changed causing
|
|
|
|
|
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
|
|
|
|
|
* the name change and fixes the symlinks. This does not change
|
|
|
|
|
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
|
|
|
|
|
* changes. This would normally be done using kobject_uevent() but
|
|
|
|
|
* that is a GPL-only symbol which is why we need this workaround.
|
|
|
|
|
*/
|
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
|
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
|
2023-11-07 22:34:50 +03:00
|
|
|
|
|
|
|
|
dataset_kstats_rename(&zv->zv_kstat, newname);
|
2025-08-06 17:10:52 +03:00
|
|
|
|
|
|
|
|
return (0);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
2022-02-07 21:24:38 +03:00
|
|
|
void
|
|
|
|
|
zvol_os_set_disk_ro(zvol_state_t *zv, int flags)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-07 21:24:38 +03:00
|
|
|
void
|
|
|
|
|
zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
|
|
set_capacity(zv->zv_zso->zvo_disk, capacity);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
zvol_init(void)
|
|
|
|
|
{
|
|
|
|
|
int error;
|
2022-06-09 17:10:38 +03:00
|
|
|
|
2025-05-08 22:25:40 +03:00
|
|
|
error = zvol_init_impl();
|
|
|
|
|
if (error) {
|
|
|
|
|
printk(KERN_INFO "ZFS: zvol_init_impl() failed %d\n", error);
|
|
|
|
|
return (error);
|
2022-06-09 17:10:38 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2025-08-09 03:04:01 +03:00
|
|
|
error = -register_blkdev(zvol_major, ZVOL_DRIVER);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (error) {
|
|
|
|
|
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
|
2025-08-09 03:04:01 +03:00
|
|
|
return (SET_ERROR(error));
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
2022-06-09 17:10:38 +03:00
|
|
|
|
|
|
|
|
if (zvol_blk_mq_queue_depth == 0) {
|
|
|
|
|
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
|
|
|
|
|
} else {
|
|
|
|
|
zvol_actual_blk_mq_queue_depth =
|
|
|
|
|
MAX(zvol_blk_mq_queue_depth, BLKDEV_MIN_RQ);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (zvol_blk_mq_threads == 0) {
|
|
|
|
|
zvol_blk_mq_actual_threads = num_online_cpus();
|
|
|
|
|
} else {
|
|
|
|
|
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
|
|
|
|
|
1024);
|
|
|
|
|
}
|
2024-08-24 14:33:35 +03:00
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
ida_init(&zvol_ida);
|
|
|
|
|
return (0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
zvol_fini(void)
|
|
|
|
|
{
|
|
|
|
|
unregister_blkdev(zvol_major, ZVOL_DRIVER);
|
2024-04-04 04:21:25 +03:00
|
|
|
|
2025-05-08 22:25:40 +03:00
|
|
|
zvol_fini_impl();
|
2024-04-04 04:21:25 +03:00
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
ida_destroy(&zvol_ida);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module_param(zvol_major, uint, 0444);
|
|
|
|
|
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
|
|
|
|
|
|
|
|
|
|
module_param(zvol_max_discard_blocks, ulong, 0444);
|
|
|
|
|
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
|
|
|
|
|
|
2023-10-24 00:39:59 +03:00
|
|
|
module_param(zvol_blk_mq_queue_depth, uint, 0644);
|
|
|
|
|
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
|
|
|
|
|
|
|
|
|
|
module_param(zvol_use_blk_mq, uint, 0644);
|
|
|
|
|
MODULE_PARM_DESC(zvol_use_blk_mq, "Use the blk-mq API for zvols");
|
|
|
|
|
|
|
|
|
|
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
|
|
|
|
|
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
|
2024-12-04 09:49:09 +03:00
|
|
|
"Process volblocksize blocks per thread");
|
2023-10-24 00:39:59 +03:00
|
|
|
|
2023-06-30 21:34:05 +03:00
|
|
|
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
|
|
|
|
|
module_param(zvol_open_timeout_ms, uint, 0644);
|
|
|
|
|
MODULE_PARM_DESC(zvol_open_timeout_ms, "Timeout for ZVOL open retries");
|
|
|
|
|
#endif
|