2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
|
|
|
|
*/
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
#include <sys/dataset_kstats.h>
|
|
|
|
#include <sys/dbuf.h>
|
|
|
|
#include <sys/dmu_traverse.h>
|
|
|
|
#include <sys/dsl_dataset.h>
|
|
|
|
#include <sys/dsl_prop.h>
|
|
|
|
#include <sys/dsl_dir.h>
|
|
|
|
#include <sys/zap.h>
|
|
|
|
#include <sys/zfeature.h>
|
|
|
|
#include <sys/zil_impl.h>
|
|
|
|
#include <sys/dmu_tx.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/zfs_rlock.h>
|
|
|
|
#include <sys/spa_impl.h>
|
|
|
|
#include <sys/zvol.h>
|
|
|
|
#include <sys/zvol_impl.h>
|
|
|
|
|
|
|
|
#include <linux/blkdev_compat.h>
|
|
|
|
#include <linux/task_io_accounting_ops.h>
|
|
|
|
|
|
|
|
unsigned int zvol_major = ZVOL_MAJOR;
|
|
|
|
unsigned int zvol_request_sync = 0;
|
|
|
|
unsigned int zvol_prefetch_bytes = (128 * 1024);
|
|
|
|
unsigned long zvol_max_discard_blocks = 16384;
|
|
|
|
unsigned int zvol_threads = 32;
|
|
|
|
|
|
|
|
struct zvol_state_os {
|
|
|
|
struct gendisk *zvo_disk; /* generic disk */
|
|
|
|
struct request_queue *zvo_queue; /* request queue */
|
|
|
|
dev_t zvo_dev; /* device id */
|
|
|
|
};
|
|
|
|
|
|
|
|
taskq_t *zvol_taskq;
|
|
|
|
static struct ida zvol_ida;
|
|
|
|
|
2021-03-03 19:15:28 +03:00
|
|
|
typedef struct zv_request_stack {
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_state_t *zv;
|
|
|
|
struct bio *bio;
|
|
|
|
} zv_request_t;
|
|
|
|
|
2021-03-03 19:15:28 +03:00
|
|
|
typedef struct zv_request_task {
|
|
|
|
zv_request_t zvr;
|
|
|
|
taskq_ent_t ent;
|
|
|
|
} zv_request_task_t;
|
|
|
|
|
|
|
|
static zv_request_task_t *
|
|
|
|
zv_request_task_create(zv_request_t zvr)
|
|
|
|
{
|
|
|
|
zv_request_task_t *task;
|
|
|
|
task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
|
|
|
|
taskq_init_ent(&task->ent);
|
|
|
|
task->zvr = zvr;
|
|
|
|
return (task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zv_request_task_free(zv_request_task_t *task)
|
|
|
|
{
|
|
|
|
kmem_free(task, sizeof (*task));
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
* Given a path, return TRUE if path is a ZVOL.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
2020-12-22 21:26:45 +03:00
|
|
|
zvol_is_zvol_impl(const char *path)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2020-12-22 21:26:45 +03:00
|
|
|
dev_t dev = 0;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-22 21:26:45 +03:00
|
|
|
if (vdev_lookup_bdev(path, &dev) != 0)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (B_FALSE);
|
|
|
|
|
2020-12-22 21:26:45 +03:00
|
|
|
if (MAJOR(dev) == zvol_major)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
struct bio *bio = zvr->bio;
|
2020-12-18 19:48:26 +03:00
|
|
|
int error = 0;
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_t uio;
|
2020-12-18 19:48:26 +03:00
|
|
|
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_bvec_init(&uio, bio);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
zvol_state_t *zv = zvr->zv;
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
ASSERT3P(zv->zv_zilog, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/* bio marked as FLUSH need to flush before write */
|
|
|
|
if (bio_is_flush(bio))
|
|
|
|
zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
|
|
|
|
|
|
|
/* Some requests are just for flush and nothing else. */
|
|
|
|
if (uio.uio_resid == 0) {
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
BIO_END_IO(bio, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-22 23:17:13 +03:00
|
|
|
struct request_queue *q = zv->zv_zso->zvo_queue;
|
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
2019-09-25 19:20:30 +03:00
|
|
|
ssize_t start_resid = uio.uio_resid;
|
2020-12-22 23:17:13 +03:00
|
|
|
unsigned long start_time;
|
|
|
|
|
|
|
|
boolean_t acct = blk_queue_io_stat(q);
|
|
|
|
if (acct)
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
boolean_t sync =
|
|
|
|
bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
uio.uio_loffset, uio.uio_resid, RL_WRITER);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volsize = zv->zv_volsize;
|
|
|
|
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
|
|
|
|
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
|
|
|
|
uint64_t off = uio.uio_loffset;
|
|
|
|
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
|
|
|
|
|
|
|
if (bytes > volsize - off) /* don't write past the end */
|
|
|
|
bytes = volsize - off;
|
|
|
|
|
2020-04-11 07:14:01 +03:00
|
|
|
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
/* This will only fail for ENOSPC */
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
|
|
|
|
if (error == 0) {
|
|
|
|
zvol_log_write(zv, tx, off, bytes, sync);
|
|
|
|
}
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
int64_t nwritten = start_resid - uio.uio_resid;
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
|
2019-09-25 19:20:30 +03:00
|
|
|
task_io_account_write(nwritten);
|
|
|
|
|
|
|
|
if (sync)
|
|
|
|
zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
|
|
|
if (acct)
|
|
|
|
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
BIO_END_IO(bio, -error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write_task(void *arg)
|
|
|
|
{
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
zvol_write(&task->zvr);
|
|
|
|
zv_request_task_free(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_discard(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
struct bio *bio = zvr->bio;
|
|
|
|
zvol_state_t *zv = zvr->zv;
|
|
|
|
uint64_t start = BIO_BI_SECTOR(bio) << 9;
|
|
|
|
uint64_t size = BIO_BI_SIZE(bio);
|
|
|
|
uint64_t end = start + size;
|
|
|
|
boolean_t sync;
|
|
|
|
int error = 0;
|
|
|
|
dmu_tx_t *tx;
|
|
|
|
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
ASSERT3P(zv->zv_zilog, !=, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-22 23:17:13 +03:00
|
|
|
struct request_queue *q = zv->zv_zso->zvo_queue;
|
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
|
|
|
unsigned long start_time;
|
|
|
|
|
|
|
|
boolean_t acct = blk_queue_io_stat(q);
|
|
|
|
if (acct)
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
|
|
|
|
|
|
|
|
if (end > zv->zv_volsize) {
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Align the request to volume block boundaries when a secure erase is
|
|
|
|
* not required. This will prevent dnode_free_range() from zeroing out
|
|
|
|
* the unaligned parts which is slow (read-modify-write) and useless
|
|
|
|
* since we are not freeing any space by doing so.
|
|
|
|
*/
|
|
|
|
if (!bio_is_secure_erase(bio)) {
|
|
|
|
start = P2ROUNDUP(start, zv->zv_volblocksize);
|
|
|
|
end = P2ALIGN(end, zv->zv_volblocksize);
|
|
|
|
size = end - start;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start >= end)
|
|
|
|
goto unlock;
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
start, size, RL_WRITER);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
tx = dmu_tx_create(zv->zv_objset);
|
|
|
|
dmu_tx_mark_netfree(tx);
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error != 0) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
} else {
|
|
|
|
zvol_log_truncate(zv, tx, start, size, B_TRUE);
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
error = dmu_free_long_range(zv->zv_objset,
|
|
|
|
ZVOL_OBJ, start, size);
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
if (error == 0 && sync)
|
|
|
|
zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
unlock:
|
2019-09-25 19:20:30 +03:00
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
|
|
|
if (acct)
|
|
|
|
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
BIO_END_IO(bio, -error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard_task(void *arg)
|
|
|
|
{
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
zvol_discard(&task->zvr);
|
|
|
|
zv_request_task_free(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_read(zv_request_t *zvr)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
struct bio *bio = zvr->bio;
|
2020-12-18 19:48:26 +03:00
|
|
|
int error = 0;
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_t uio;
|
2020-12-18 19:48:26 +03:00
|
|
|
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_bvec_init(&uio, bio);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
zvol_state_t *zv = zvr->zv;
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3P(zv, !=, NULL);
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-22 23:17:13 +03:00
|
|
|
struct request_queue *q = zv->zv_zso->zvo_queue;
|
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
2019-09-25 19:20:30 +03:00
|
|
|
ssize_t start_resid = uio.uio_resid;
|
2020-12-22 23:17:13 +03:00
|
|
|
unsigned long start_time;
|
|
|
|
|
|
|
|
boolean_t acct = blk_queue_io_stat(q);
|
|
|
|
if (acct)
|
|
|
|
start_time = blk_generic_start_io_acct(q, disk, READ, bio);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
|
|
|
|
uio.uio_loffset, uio.uio_resid, RL_READER);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volsize = zv->zv_volsize;
|
|
|
|
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
|
|
|
|
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
|
|
|
|
|
|
|
|
/* don't read past the end */
|
|
|
|
if (bytes > volsize - uio.uio_loffset)
|
|
|
|
bytes = volsize - uio.uio_loffset;
|
|
|
|
|
|
|
|
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
|
|
|
|
if (error) {
|
|
|
|
/* convert checksum errors into IO errors */
|
|
|
|
if (error == ECKSUM)
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
int64_t nread = start_resid - uio.uio_resid;
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
|
2019-09-25 19:20:30 +03:00
|
|
|
task_io_account_read(nread);
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
2020-12-22 23:17:13 +03:00
|
|
|
|
|
|
|
if (acct)
|
|
|
|
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
BIO_END_IO(bio, -error);
|
2021-03-03 19:15:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_read_task(void *arg)
|
|
|
|
{
|
|
|
|
zv_request_task_t *task = arg;
|
|
|
|
zvol_read(&task->zvr);
|
|
|
|
zv_request_task_free(task);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
2020-08-09 19:12:25 +03:00
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
|
|
|
static blk_qc_t
|
|
|
|
zvol_submit_bio(struct bio *bio)
|
|
|
|
#else
|
2019-09-25 19:20:30 +03:00
|
|
|
static MAKE_REQUEST_FN_RET
|
|
|
|
zvol_request(struct request_queue *q, struct bio *bio)
|
2020-08-09 19:12:25 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
2020-08-09 19:12:25 +03:00
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
2021-02-23 05:07:51 +03:00
|
|
|
#if defined(HAVE_BIO_BDEV_DISK)
|
|
|
|
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
|
|
|
#else
|
2020-08-09 19:12:25 +03:00
|
|
|
struct request_queue *q = bio->bi_disk->queue;
|
2021-02-23 05:07:51 +03:00
|
|
|
#endif
|
2020-08-09 19:12:25 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_state_t *zv = q->queuedata;
|
|
|
|
fstrans_cookie_t cookie = spl_fstrans_mark();
|
|
|
|
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
|
|
|
|
uint64_t size = BIO_BI_SIZE(bio);
|
|
|
|
int rw = bio_data_dir(bio);
|
|
|
|
|
|
|
|
if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"%s: bad access: offset=%llu, size=%lu\n",
|
|
|
|
zv->zv_zso->zvo_disk->disk_name,
|
|
|
|
(long long unsigned)offset,
|
|
|
|
(long unsigned)size);
|
|
|
|
|
|
|
|
BIO_END_IO(bio, -SET_ERROR(EIO));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-03-03 19:15:28 +03:00
|
|
|
zv_request_t zvr = {
|
|
|
|
.zv = zv,
|
|
|
|
.bio = bio,
|
|
|
|
};
|
|
|
|
zv_request_task_t *task;
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
if (rw == WRITE) {
|
|
|
|
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
|
|
|
|
BIO_END_IO(bio, -SET_ERROR(EROFS));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
* Prevents the zvol from being suspended, or the ZIL being
|
|
|
|
* concurrently opened. Will be released after the i/o
|
|
|
|
* completes.
|
2019-09-25 19:20:30 +03:00
|
|
|
*/
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open a ZIL if this is the first time we have written to this
|
|
|
|
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
|
|
|
|
* than zv_state_lock so that we don't need to acquire an
|
|
|
|
* additional lock in this path.
|
|
|
|
*/
|
|
|
|
if (zv->zv_zilog == NULL) {
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
|
|
|
|
if (zv->zv_zilog == NULL) {
|
|
|
|
zv->zv_zilog = zil_open(zv->zv_objset,
|
|
|
|
zvol_get_data);
|
|
|
|
zv->zv_flags |= ZVOL_WRITTEN_TO;
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
/* replay / destroy done in zvol_create_minor */
|
|
|
|
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
|
|
|
|
ZIL_REPLAY_NEEDED));
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
rw_downgrade(&zv->zv_suspend_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
* We don't want this thread to be blocked waiting for i/o to
|
|
|
|
* complete, so we instead wait from a taskq callback. The
|
|
|
|
* i/o may be a ZIL write (via zil_commit()), or a read of an
|
|
|
|
* indirect block, or a read of a data block (if this is a
|
|
|
|
* partial-block write). We will indicate that the i/o is
|
|
|
|
* complete by calling BIO_END_IO() from the taskq callback.
|
|
|
|
*
|
|
|
|
* This design allows the calling thread to continue and
|
|
|
|
* initiate more concurrent operations by calling
|
|
|
|
* zvol_request() again. There are typically only a small
|
|
|
|
* number of threads available to call zvol_request() (e.g.
|
|
|
|
* one per iSCSI target), so keeping the latency of
|
|
|
|
* zvol_request() low is important for performance.
|
|
|
|
*
|
|
|
|
* The zvol_request_sync module parameter allows this
|
|
|
|
* behavior to be altered, for performance evaluation
|
|
|
|
* purposes. If the callback blocks, setting
|
|
|
|
* zvol_request_sync=1 will result in much worse performance.
|
|
|
|
*
|
|
|
|
* We can have up to zvol_threads concurrent i/o's being
|
|
|
|
* processed for all zvols on the system. This is typically
|
|
|
|
* a vast improvement over the zvol_request_sync=1 behavior
|
|
|
|
* of one i/o at a time per zvol. However, an even better
|
|
|
|
* design would be for zvol_request() to initiate the zio
|
|
|
|
* directly, and then be notified by the zio_done callback,
|
|
|
|
* which would call BIO_END_IO(). Unfortunately, the DMU/ZIL
|
|
|
|
* interfaces lack this functionality (they block waiting for
|
|
|
|
* the i/o to complete).
|
2019-09-25 19:20:30 +03:00
|
|
|
*/
|
|
|
|
if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
if (zvol_request_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
taskq_dispatch_ent(zvol_taskq,
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_discard_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
} else {
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
if (zvol_request_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
taskq_dispatch_ent(zvol_taskq,
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_write_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The SCST driver, and possibly others, may issue READ I/Os
|
|
|
|
* with a length of zero bytes. These empty I/Os contain no
|
|
|
|
* data and require no additional handling.
|
|
|
|
*/
|
|
|
|
if (size == 0) {
|
|
|
|
BIO_END_IO(bio, 0);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
/* See comment in WRITE case above. */
|
|
|
|
if (zvol_request_sync) {
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_read(&zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
} else {
|
2021-03-03 19:15:28 +03:00
|
|
|
task = zv_request_task_create(zvr);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
taskq_dispatch_ent(zvol_taskq,
|
2021-03-03 19:15:28 +03:00
|
|
|
zvol_read_task, task, 0, &task->ent);
|
Improve ZVOL sync write performance by using a taskq
== Summary ==
Prior to this change, sync writes to a zvol are processed serially.
This commit makes zvols process concurrently outstanding sync writes in
parallel, similar to how reads and async writes are already handled.
The result is that the throughput of sync writes is tripled.
== Background ==
When a write comes in for a zvol (e.g. over iscsi), it is processed by
calling `zvol_request()` to initiate the operation. ZFS is expected to
later call `BIO_END_IO()` when the operation completes (possibly from a
different thread). There are a limited number of threads that are
available to call `zvol_request()` - one one per iscsi client (unless
using MC/S). Therefore, to ensure good performance, the latency of
`zvol_request()` is important, so that many i/o operations to the zvol
can be processed concurrently. In other words, if the client has
multiple outstanding requests to the zvol, the zvol should have multiple
outstanding requests to the storage hardware (i.e. issue multiple
concurrent `zio_t`'s).
For reads, and async writes (i.e. writes which can be acknowledged
before the data reaches stable storage), `zvol_request()` achieves low
latency by dispatching the bulk of the work (including waiting for i/o
to disk) to a taskq. The taskq callback (`zvol_read()` or
`zvol_write()`) blocks while waiting for the i/o to disk to complete.
The `zvol_taskq` has 32 threads (by default), so we can have up to 32
concurrent i/os to disk in service of requests to zvols.
However, for sync writes (i.e. writes which must be persisted to stable
storage before they can be acknowledged, by calling `zil_commit()`),
`zvol_request()` does not use `zvol_taskq`. Instead it blocks while
waiting for the ZIL write to disk to complete. This has the effect of
serializing sync writes to each zvol. In other words, each zvol will
only process one sync write at a time, waiting for it to be written to
the ZIL before accepting the next request.
The same issue applies to FLUSH operations, for which `zvol_request()`
calls `zil_commit()` directly.
== Description of change ==
This commit changes `zvol_request()` to use
`taskq_dispatch_ent(zvol_taskq)` for sync writes, and FLUSh operations.
Therefore we can have up to 32 threads (the taskq threads)
simultaneously calling `zil_commit()`, for a theoretical performance
improvement of up to 32x.
To avoid the locking issue described in the comment (which this commit
removes), we acquire the rangelock from the taskq callback (e.g.
`zvol_write()`) rather than from `zvol_request()`. This applies to all
writes (sync and async), reads, and discard operations. This means that
multiple simultaneously-outstanding i/o's which access the same block
can complete in any order. This was previously thought to be incorrect,
but a review of the block device interface requirements revealed that
this is fine - the order is inherently not defined. The shorter hold
time of the rangelock should also have a slight performance improvement.
For an additional slight performance improvement, we use
`taskq_dispatch_ent()` instead of `taskq_dispatch()`, which avoids a
`kmem_alloc()` and eliminates a failure mode. This applies to all
writes (sync and async), reads, and discard operations.
== Performance results ==
We used a zvol as an iscsi target (server) for a Windows initiator
(client), with a single connection (the default - i.e. not MC/S).
We used `diskspd` to generate a workload with 4 threads, doing 1MB
writes to random offsets in the zvol. Without this change we get
231MB/s, and with the change we get 728MB/s, which is 3.15x the original
performance.
We ran a real-world workload, restoring a MSSQL database, and saw
throughput 2.5x the original.
We saw more modest performance wins (typically 1.5x-2x) when using MC/S
with 4 connections, and with different number of client threads (1, 8,
32).
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10163
2020-03-31 20:50:44 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spl_fstrans_unmark(cookie);
|
2020-08-09 19:12:25 +03:00
|
|
|
#if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
|
|
|
|
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
|
2019-09-25 19:20:30 +03:00
|
|
|
return (BLK_QC_T_NONE);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zvol_open(struct block_device *bdev, fmode_t flag)
|
|
|
|
{
|
|
|
|
zvol_state_t *zv;
|
|
|
|
int error = 0;
|
|
|
|
boolean_t drop_suspend = B_TRUE;
|
|
|
|
|
|
|
|
rw_enter(&zvol_state_lock, RW_READER);
|
|
|
|
/*
|
|
|
|
* Obtain a copy of private_data under the zvol_state_lock to make
|
|
|
|
* sure that either the result of zvol free code path setting
|
|
|
|
* bdev->bd_disk->private_data to NULL is observed, or zvol_free()
|
|
|
|
* is not called on this zv because of the positive zv_open_count.
|
|
|
|
*/
|
|
|
|
zv = bdev->bd_disk->private_data;
|
|
|
|
if (zv == NULL) {
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
return (SET_ERROR(-ENXIO));
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
/*
|
|
|
|
* make sure zvol is not suspended during first open
|
|
|
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
|
|
|
* ordering - zv_suspend_lock before zv_state_lock
|
|
|
|
*/
|
|
|
|
if (zv->zv_open_count == 0) {
|
|
|
|
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
/* check to see if zv_suspend_lock is needed */
|
|
|
|
if (zv->zv_open_count != 0) {
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
}
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
if (zv->zv_open_count == 0) {
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
|
2019-09-25 19:20:30 +03:00
|
|
|
error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
|
|
|
|
if (error)
|
|
|
|
goto out_mutex;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
|
|
|
|
error = -EROFS;
|
|
|
|
goto out_open_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
zv->zv_open_count++;
|
|
|
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
if (drop_suspend)
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
|
2020-10-18 19:54:21 +03:00
|
|
|
zfs_check_media_change(bdev);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
out_open_count:
|
|
|
|
if (zv->zv_open_count == 0)
|
|
|
|
zvol_last_close(zv);
|
|
|
|
|
|
|
|
out_mutex:
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
if (drop_suspend)
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
if (error == -EINTR) {
|
|
|
|
error = -ERESTARTSYS;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_release(struct gendisk *disk, fmode_t mode)
|
|
|
|
{
|
|
|
|
zvol_state_t *zv;
|
|
|
|
boolean_t drop_suspend = B_TRUE;
|
|
|
|
|
|
|
|
rw_enter(&zvol_state_lock, RW_READER);
|
|
|
|
zv = disk->private_data;
|
|
|
|
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
* make sure zvol is not suspended during last close
|
|
|
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
|
|
|
* ordering - zv_suspend_lock before zv_state_lock
|
|
|
|
*/
|
|
|
|
if (zv->zv_open_count == 1) {
|
|
|
|
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
/* check to see if zv_suspend_lock is needed */
|
|
|
|
if (zv->zv_open_count != 1) {
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
drop_suspend = B_FALSE;
|
|
|
|
}
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
zv->zv_open_count--;
|
2020-10-21 20:23:08 +03:00
|
|
|
if (zv->zv_open_count == 0) {
|
|
|
|
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_last_close(zv);
|
2020-10-21 20:23:08 +03:00
|
|
|
}
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
|
|
|
|
if (drop_suspend)
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zvol_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
zvol_state_t *zv = bdev->bd_disk->private_data;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case BLKFLSBUF:
|
|
|
|
fsync_bdev(bdev);
|
|
|
|
invalidate_bdev(bdev);
|
|
|
|
rw_enter(&zv->zv_suspend_lock, RW_READER);
|
|
|
|
|
|
|
|
if (!(zv->zv_flags & ZVOL_RDONLY))
|
|
|
|
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
|
|
|
|
|
|
|
|
rw_exit(&zv->zv_suspend_lock);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BLKZNAME:
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = -ENOTTY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static int
|
|
|
|
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
return (zvol_ioctl(bdev, mode, cmd, arg));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define zvol_compat_ioctl NULL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
zvol_check_events(struct gendisk *disk, unsigned int clearing)
|
|
|
|
{
|
|
|
|
unsigned int mask = 0;
|
|
|
|
|
|
|
|
rw_enter(&zvol_state_lock, RW_READER);
|
|
|
|
|
|
|
|
zvol_state_t *zv = disk->private_data;
|
|
|
|
if (zv != NULL) {
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
|
|
|
|
zv->zv_changed = 0;
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
|
|
|
|
return (mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zvol_revalidate_disk(struct gendisk *disk)
|
|
|
|
{
|
|
|
|
rw_enter(&zvol_state_lock, RW_READER);
|
|
|
|
|
|
|
|
zvol_state_t *zv = disk->private_data;
|
|
|
|
if (zv != NULL) {
|
|
|
|
mutex_enter(&zv->zv_state_lock);
|
|
|
|
set_capacity(zv->zv_zso->zvo_disk,
|
|
|
|
zv->zv_volsize >> SECTOR_BITS);
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2020-06-15 21:30:37 +03:00
|
|
|
static int
|
2019-09-25 19:20:30 +03:00
|
|
|
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
|
|
|
|
{
|
2020-12-18 20:36:19 +03:00
|
|
|
struct gendisk *disk = zv->zv_zso->zvo_disk;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2020-12-23 00:53:25 +03:00
|
|
|
#if defined(HAVE_REVALIDATE_DISK_SIZE)
|
2020-12-18 20:36:19 +03:00
|
|
|
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
|
2020-12-23 00:53:25 +03:00
|
|
|
#elif defined(HAVE_REVALIDATE_DISK)
|
2020-12-18 20:36:19 +03:00
|
|
|
revalidate_disk(disk);
|
2020-12-23 00:53:25 +03:00
|
|
|
#else
|
|
|
|
zvol_revalidate_disk(disk);
|
2020-10-18 20:06:18 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_clear_private(zvol_state_t *zv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Cleared while holding zvol_state_lock as a writer
|
|
|
|
* which will prevent zvol_open() from opening it.
|
|
|
|
*/
|
|
|
|
zv->zv_zso->zvo_disk->private_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Provide a simple virtual geometry for legacy compatibility. For devices
|
|
|
|
* smaller than 1 MiB a small head and sector count is used to allow very
|
|
|
|
* tiny devices. For devices over 1 Mib a standard head and sector count
|
|
|
|
* is used to keep the cylinders count reasonable.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
zvol_state_t *zv = bdev->bd_disk->private_data;
|
|
|
|
sector_t sectors;
|
|
|
|
|
|
|
|
ASSERT3U(zv->zv_open_count, >, 0);
|
|
|
|
|
|
|
|
sectors = get_capacity(zv->zv_zso->zvo_disk);
|
|
|
|
|
|
|
|
if (sectors > 2048) {
|
|
|
|
geo->heads = 16;
|
|
|
|
geo->sectors = 63;
|
|
|
|
} else {
|
|
|
|
geo->heads = 2;
|
|
|
|
geo->sectors = 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
geo->start = 0;
|
|
|
|
geo->cylinders = sectors / (geo->heads * geo->sectors);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct block_device_operations zvol_ops = {
|
|
|
|
.open = zvol_open,
|
|
|
|
.release = zvol_release,
|
|
|
|
.ioctl = zvol_ioctl,
|
|
|
|
.compat_ioctl = zvol_compat_ioctl,
|
|
|
|
.check_events = zvol_check_events,
|
2021-05-12 05:53:02 +03:00
|
|
|
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
|
2019-09-25 19:20:30 +03:00
|
|
|
.revalidate_disk = zvol_revalidate_disk,
|
2021-05-12 05:53:02 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
.getgeo = zvol_getgeo,
|
|
|
|
.owner = THIS_MODULE,
|
2020-08-09 19:12:25 +03:00
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
2021-07-24 01:28:03 +03:00
|
|
|
.submit_bio = zvol_submit_bio,
|
2020-08-09 19:12:25 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate memory for a new zvol_state_t and setup the required
|
|
|
|
* request queue and generic disk structures for the block device.
|
|
|
|
*/
|
|
|
|
static zvol_state_t *
|
|
|
|
zvol_alloc(dev_t dev, const char *name)
|
|
|
|
{
|
|
|
|
zvol_state_t *zv;
|
2020-04-09 19:16:46 +03:00
|
|
|
struct zvol_state_os *zso;
|
2019-09-25 19:20:30 +03:00
|
|
|
uint64_t volmode;
|
|
|
|
|
|
|
|
if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
if (volmode == ZFS_VOLMODE_DEFAULT)
|
|
|
|
volmode = zvol_volmode;
|
|
|
|
|
|
|
|
if (volmode == ZFS_VOLMODE_NONE)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
|
2020-04-09 19:16:46 +03:00
|
|
|
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
|
|
|
|
zv->zv_zso = zso;
|
2020-11-17 20:50:52 +03:00
|
|
|
zv->zv_volmode = volmode;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
list_link_init(&zv->zv_next);
|
|
|
|
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
|
|
|
|
|
2020-08-09 19:12:25 +03:00
|
|
|
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
|
2021-07-24 01:28:03 +03:00
|
|
|
#ifdef HAVE_BLK_ALLOC_DISK
|
|
|
|
zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
|
|
|
|
if (zso->zvo_disk == NULL)
|
|
|
|
goto out_kmem;
|
|
|
|
|
|
|
|
zso->zvo_disk->minors = ZVOL_MINORS;
|
|
|
|
zso->zvo_queue = zso->zvo_disk->queue;
|
|
|
|
#else
|
2020-08-09 19:12:25 +03:00
|
|
|
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
|
2021-07-24 01:28:03 +03:00
|
|
|
if (zso->zvo_queue == NULL)
|
|
|
|
goto out_kmem;
|
|
|
|
|
|
|
|
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
goto out_kmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
zso->zvo_disk->queue = zso->zvo_queue;
|
|
|
|
#endif /* HAVE_BLK_ALLOC_DISK */
|
2020-08-09 19:12:25 +03:00
|
|
|
#else
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
|
|
|
|
if (zso->zvo_queue == NULL)
|
2019-09-25 19:20:30 +03:00
|
|
|
goto out_kmem;
|
|
|
|
|
2021-07-24 01:28:03 +03:00
|
|
|
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
|
|
|
|
if (zso->zvo_disk == NULL) {
|
|
|
|
blk_cleanup_queue(zso->zvo_queue);
|
|
|
|
goto out_kmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
zso->zvo_disk->queue = zso->zvo_queue;
|
|
|
|
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
|
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
/* Limit read-ahead to a single page to prevent over-prefetching. */
|
2020-04-09 19:16:46 +03:00
|
|
|
blk_queue_set_read_ahead(zso->zvo_queue, 1);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
/* Disable write merging in favor of the ZIO pipeline. */
|
2020-04-09 19:16:46 +03:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
2021-08-06 00:35:34 +03:00
|
|
|
/* Enable /proc/diskstats */
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
|
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_queue->queuedata = zv;
|
|
|
|
zso->zvo_dev = dev;
|
2019-09-25 19:20:30 +03:00
|
|
|
zv->zv_open_count = 0;
|
|
|
|
strlcpy(zv->zv_name, name, MAXNAMELEN);
|
|
|
|
|
2019-10-04 01:54:29 +03:00
|
|
|
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
|
|
|
|
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->major = zvol_major;
|
|
|
|
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
if (volmode == ZFS_VOLMODE_DEV) {
|
|
|
|
/*
|
|
|
|
* ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
|
|
|
|
* gendisk->minors = 1 as noted in include/linux/genhd.h.
|
|
|
|
* Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
|
|
|
|
* and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
|
|
|
|
* setting gendisk->flags accordingly.
|
|
|
|
*/
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->minors = 1;
|
2019-09-25 19:20:30 +03:00
|
|
|
#if defined(GENHD_FL_EXT_DEVT)
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
|
2019-09-25 19:20:30 +03:00
|
|
|
#endif
|
|
|
|
#if defined(GENHD_FL_NO_PART_SCAN)
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
2019-09-25 19:20:30 +03:00
|
|
|
#endif
|
|
|
|
}
|
2020-04-09 19:16:46 +03:00
|
|
|
zso->zvo_disk->first_minor = (dev & MINORMASK);
|
|
|
|
zso->zvo_disk->fops = &zvol_ops;
|
|
|
|
zso->zvo_disk->private_data = zv;
|
|
|
|
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
|
2019-09-25 19:20:30 +03:00
|
|
|
ZVOL_DEV_NAME, (dev & MINORMASK));
|
|
|
|
|
|
|
|
return (zv);
|
|
|
|
|
|
|
|
out_kmem:
|
2020-04-09 19:16:46 +03:00
|
|
|
kmem_free(zso, sizeof (struct zvol_state_os));
|
2019-09-25 19:20:30 +03:00
|
|
|
kmem_free(zv, sizeof (zvol_state_t));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup then free a zvol_state_t which was created by zvol_alloc().
|
|
|
|
* At this time, the structure is not opened by anyone, is taken off
|
|
|
|
* the zvol_state_list, and has its private data set to NULL.
|
|
|
|
* The zvol_state_lock is dropped.
|
Timeout waiting for ZVOL device to be created
We've seen cases where after creating a ZVOL, the ZVOL device node in
"/dev" isn't generated after 20 seconds of waiting, which is the point
at which our applications gives up on waiting and reports an error.
The workload when this occurs is to "refresh" 400+ ZVOLs roughly at the
same time, based on a policy set by the user. This refresh operation
will destroy the ZVOL, and re-create it based on a snapshot.
When this occurs, we see many hundreds of entries on the "z_zvol" taskq
(based on inspection of the /proc/spl/taskq-all file). Many of the
entries on the taskq end up in the "zvol_remove_minors_impl" function,
and I've measured the latency of that function:
Function = zvol_remove_minors_impl
msecs : count distribution
0 -> 1 : 0 | |
2 -> 3 : 0 | |
4 -> 7 : 1 | |
8 -> 15 : 0 | |
16 -> 31 : 0 | |
32 -> 63 : 0 | |
64 -> 127 : 1 | |
128 -> 255 : 45 |****************************************|
256 -> 511 : 5 |**** |
That data is from a 10 second sample, using the BCC "funclatency" tool.
As we can see, in this 10 second sample, most calls took 128ms at a
minimum. Thus, some basic math tells us that in any 20 second interval,
we could only process at most about 150 removals, which is much less
than the 400+ that'll occur based on the workload.
As a result of this, and since all ZVOL minor operations will go through
the single threaded "z_zvol" taskq, the latency for creating a single
ZVOL device can be unreasonably large due to other ZVOL activity on the
system. In our case, it's large enough to cause the application to
generate an error and fail the operation.
When profiling the "zvol_remove_minors_impl" function, I saw that most
of the time in the function was spent off-cpu, blocked in the function
"taskq_wait_outstanding". How this works, is "zvol_remove_minors_impl"
will dispatch calls to "zvol_free" using the "system_taskq", and then
the "taskq_wait_outstanding" function is used to wait for all of those
dispatched calls to occur before "zvol_remove_minors_impl" will return.
As far as I can tell, "zvol_remove_minors_impl" doesn't necessarily have
to wait for all calls to "zvol_free" to occur before it returns. Thus,
this change removes the call to "taskq_wait_oustanding", so that calls
to "zvol_free" don't affect the latency of "zvol_remove_minors_impl".
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: John Gallagher <john.gallagher@delphix.com>
Signed-off-by: Prakash Surya <prakash.surya@delphix.com>
Closes #9380
2019-10-01 22:33:12 +03:00
|
|
|
*
|
|
|
|
* This function may take many milliseconds to complete (e.g. we've seen
|
|
|
|
* it take over 256ms), due to the calls to "blk_cleanup_queue" and
|
|
|
|
* "del_gendisk". Thus, consumers need to be careful to account for this
|
|
|
|
* latency when calling this function.
|
2019-09-25 19:20:30 +03:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
zvol_free(zvol_state_t *zv)
|
|
|
|
{
|
|
|
|
|
|
|
|
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
|
|
|
|
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
|
2020-10-21 20:23:08 +03:00
|
|
|
ASSERT0(zv->zv_open_count);
|
|
|
|
ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
rw_destroy(&zv->zv_suspend_lock);
|
2019-10-04 01:54:29 +03:00
|
|
|
zfs_rangelock_fini(&zv->zv_rangelock);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
del_gendisk(zv->zv_zso->zvo_disk);
|
2021-07-24 01:28:03 +03:00
|
|
|
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
|
|
|
|
defined(HAVE_BLK_ALLOC_DISK)
|
|
|
|
blk_cleanup_disk(zv->zv_zso->zvo_disk);
|
|
|
|
#else
|
2019-09-25 19:20:30 +03:00
|
|
|
blk_cleanup_queue(zv->zv_zso->zvo_queue);
|
|
|
|
put_disk(zv->zv_zso->zvo_disk);
|
2021-07-24 01:28:03 +03:00
|
|
|
#endif
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
ida_simple_remove(&zvol_ida,
|
|
|
|
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
|
|
|
|
|
|
|
|
mutex_destroy(&zv->zv_state_lock);
|
2020-06-06 03:17:02 +03:00
|
|
|
dataset_kstats_destroy(&zv->zv_kstat);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
|
|
|
|
kmem_free(zv, sizeof (zvol_state_t));
|
|
|
|
}
|
|
|
|
|
2020-11-17 20:50:52 +03:00
|
|
|
void
|
|
|
|
zvol_wait_close(zvol_state_t *zv)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-09-25 19:20:30 +03:00
|
|
|
/*
|
|
|
|
* Create a block device minor node and setup the linkage between it
|
|
|
|
* and the specified volume. Once this function returns the block
|
|
|
|
* device is live and ready for use.
|
|
|
|
*/
|
|
|
|
static int
|
async zvol minor node creation interferes with receive
When we finish a zfs receive, dmu_recv_end_sync() calls
zvol_create_minors(async=TRUE). This kicks off some other threads that
create the minor device nodes (in /dev/zvol/poolname/...). These async
threads call zvol_prefetch_minors_impl() and zvol_create_minor(), which
both call dmu_objset_own(), which puts a "long hold" on the dataset.
Since the zvol minor node creation is asynchronous, this can happen
after the `ZFS_IOC_RECV[_NEW]` ioctl and `zfs receive` process have
completed.
After the first receive ioctl has completed, userland may attempt to do
another receive into the same dataset (e.g. the next incremental
stream). This second receive and the asynchronous minor node creation
can interfere with one another in several different ways, because they
both require exclusive access to the dataset:
1. When the second receive is finishing up, dmu_recv_end_check() does
dsl_dataset_handoff_check(), which can fail with EBUSY if the async
minor node creation already has a "long hold" on this dataset. This
causes the 2nd receive to fail.
2. The async udev rule can fail if zvol_id and/or systemd-udevd try to
open the device while the the second receive's async attempt at minor
node creation owns the dataset (via zvol_prefetch_minors_impl). This
causes the minor node (/dev/zd*) to exist, but the udev-generated
/dev/zvol/... to not exist.
3. The async minor node creation can silently fail with EBUSY if the
first receive's zvol_create_minor() trys to own the dataset while the
second receive's zvol_prefetch_minors_impl already owns the dataset.
To address these problems, this change synchronously creates the minor
node. To avoid the lock ordering problems that the asynchrony was
introduced to fix (see #3681), we create the minor nodes from open
context, with no locks held, rather than from syncing contex as was
originally done.
Implementation notes:
We generally do not need to traverse children or prefetch anything (e.g.
when running the recv, snapshot, create, or clone subcommands of zfs).
We only need recursion when importing/opening a pool and when loading
encryption keys. The existing recursive, asynchronous, prefetching code
is preserved for use in these cases.
Channel programs may need to create zvol minor nodes, when creating a
snapshot of a zvol with the snapdev property set. We figure out what
snapshots are created when running the LUA program in syncing context.
In this case we need to remember what snapshots were created, and then
try to create their minor nodes from open context, after the LUA code
has completed.
There are additional zvol use cases that asynchronously own the dataset,
which can cause similar problems. E.g. changing the volmode or snapdev
properties. These are less problematic because they are not recursive
and don't touch datasets that are not involved in the operation, there
is still potential for interference with subsequent operations. In the
future, these cases should be similarly converted to create the zvol
minor node synchronously from open context.
The async tasks of removing and renaming minors do not own the objset,
so they do not have this problem. However, it may make sense to also
convert these operations to happen synchronously from open context, in
the future.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-65948
Closes #7863
Closes #9885
2020-02-03 20:33:14 +03:00
|
|
|
zvol_os_create_minor(const char *name)
|
2019-09-25 19:20:30 +03:00
|
|
|
{
|
|
|
|
zvol_state_t *zv;
|
|
|
|
objset_t *os;
|
|
|
|
dmu_object_info_t *doi;
|
|
|
|
uint64_t volsize;
|
|
|
|
uint64_t len;
|
|
|
|
unsigned minor = 0;
|
|
|
|
int error = 0;
|
|
|
|
int idx;
|
|
|
|
uint64_t hash = zvol_name_hash(name);
|
|
|
|
|
|
|
|
if (zvol_inhibit_dev)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
|
|
|
|
if (idx < 0)
|
|
|
|
return (SET_ERROR(-idx));
|
|
|
|
minor = idx << ZVOL_MINOR_BITS;
|
|
|
|
|
|
|
|
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
|
|
|
|
if (zv) {
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
mutex_exit(&zv->zv_state_lock);
|
|
|
|
ida_simple_remove(&zvol_ida, idx);
|
|
|
|
return (SET_ERROR(EEXIST));
|
|
|
|
}
|
|
|
|
|
|
|
|
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
|
|
|
|
|
|
|
|
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
|
|
|
|
if (error)
|
|
|
|
goto out_doi;
|
|
|
|
|
|
|
|
error = dmu_object_info(os, ZVOL_OBJ, doi);
|
|
|
|
if (error)
|
|
|
|
goto out_dmu_objset_disown;
|
|
|
|
|
|
|
|
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
|
|
|
|
if (error)
|
|
|
|
goto out_dmu_objset_disown;
|
|
|
|
|
|
|
|
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
|
|
|
|
if (zv == NULL) {
|
|
|
|
error = SET_ERROR(EAGAIN);
|
|
|
|
goto out_dmu_objset_disown;
|
|
|
|
}
|
|
|
|
zv->zv_hash = hash;
|
|
|
|
|
|
|
|
if (dmu_objset_is_snapshot(os))
|
|
|
|
zv->zv_flags |= ZVOL_RDONLY;
|
|
|
|
|
|
|
|
zv->zv_volblocksize = doi->doi_data_block_size;
|
|
|
|
zv->zv_volsize = volsize;
|
|
|
|
zv->zv_objset = os;
|
|
|
|
|
|
|
|
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
|
|
|
|
|
|
|
|
blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
|
|
|
|
(DMU_MAX_ACCESS / 4) >> 9);
|
|
|
|
blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
|
|
|
|
blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
|
|
|
|
blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
|
|
|
|
zv->zv_volblocksize);
|
|
|
|
blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
|
|
|
|
blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
|
|
|
|
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
|
|
|
|
blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
|
|
|
|
zv->zv_volblocksize);
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
|
|
|
|
#ifdef QUEUE_FLAG_NONROT
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
|
|
|
|
#endif
|
|
|
|
#ifdef QUEUE_FLAG_ADD_RANDOM
|
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
|
|
|
|
#endif
|
|
|
|
/* This flag was introduced in kernel version 4.12. */
|
|
|
|
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
|
|
|
|
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
|
|
|
|
#endif
|
|
|
|
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
ASSERT3P(zv->zv_zilog, ==, NULL);
|
|
|
|
zv->zv_zilog = zil_open(os, zvol_get_data);
|
2019-09-25 19:20:30 +03:00
|
|
|
if (spa_writeable(dmu_objset_spa(os))) {
|
|
|
|
if (zil_replay_disable)
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
zil_destroy(zv->zv_zilog, B_FALSE);
|
2019-09-25 19:20:30 +03:00
|
|
|
else
|
|
|
|
zil_replay(os, zv, zvol_replay_vector);
|
|
|
|
}
|
zvol: call zil_replaying() during replay
zil_replaying(zil, tx) has the side-effect of informing the ZIL that an
entry has been replayed in the (still open) tx. The ZIL uses that
information to record the replay progress in the ZIL header when that
tx's txg syncs.
ZPL log entries are not idempotent and logically dependent and thus
calling zil_replaying() is necessary for correctness.
For ZVOLs the question of correctness is more nuanced: ZVOL logs only
TX_WRITE and TX_TRUNCATE, both of which are idempotent. Logical
dependencies between two records exist only if the write or discard
request had sync semantics or if the ranges affected by the records
overlap.
Thus, at a first glance, it would be correct to restart replay from
the beginning if we crash before replay completes. But this does not
address the following scenario:
Assume one log record per LWB.
The chain on disk is
HDR -> 1:W(1, "A") -> 2:W(1, "B") -> 3:W(2, "X") -> 4:W(3, "Z")
where N:W(O, C) represents log entry number N which is a TX_WRITE of C
to offset A.
We replay 1, 2 and 3 in one txg, sync that txg, then crash.
Bit flips corrupt 2, 3, and 4.
We come up again and restart replay from the beginning because
we did not call zil_replaying() during replay.
We replay 1 again, then interpret 2's invalid checksum as the end
of the ZIL chain and call replay done.
The replayed zvol content is "AX".
If we had called zil_replaying() the HDR would have pointed to 3
and our resumed replay would not have replayed anything because
3 was corrupted, resulting in zvol content "BX".
If 3 logically depends on 2 then the replay corrupted the ZVOL_OBJ's
contents.
This patch adds the zil_replaying() calls to the replay functions.
Since the callbacks in the replay function need the zilog_t* pointer
so that they can call zil_replaying() we open the ZIL while
replaying in zvol_create_minor(). We also verify that replay has
been done when on-demand-opening the ZIL on the first modifying
bio.
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Christian Schwarz <me@cschwarz.com>
Closes #11667
2021-03-07 20:49:58 +03:00
|
|
|
zil_close(zv->zv_zilog);
|
|
|
|
zv->zv_zilog = NULL;
|
2020-06-06 03:17:02 +03:00
|
|
|
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
|
|
|
|
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
|
2019-09-25 19:20:30 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When udev detects the addition of the device it will immediately
|
|
|
|
* invoke blkid(8) to determine the type of content on the device.
|
|
|
|
* Prefetching the blocks commonly scanned by blkid(8) will speed
|
|
|
|
* up this process.
|
|
|
|
*/
|
|
|
|
len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
|
|
|
|
if (len > 0) {
|
|
|
|
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
|
|
|
|
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
|
|
|
|
ZIO_PRIORITY_SYNC_READ);
|
|
|
|
}
|
|
|
|
|
|
|
|
zv->zv_objset = NULL;
|
|
|
|
out_dmu_objset_disown:
|
|
|
|
dmu_objset_disown(os, B_TRUE, FTAG);
|
|
|
|
out_doi:
|
|
|
|
kmem_free(doi, sizeof (dmu_object_info_t));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep in mind that once add_disk() is called, the zvol is
|
|
|
|
* announced to the world, and zvol_open()/zvol_release() can
|
|
|
|
* be called at any time. Incidentally, add_disk() itself calls
|
|
|
|
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
|
|
|
|
* directly as well.
|
|
|
|
*/
|
|
|
|
if (error == 0) {
|
|
|
|
rw_enter(&zvol_state_lock, RW_WRITER);
|
|
|
|
zvol_insert(zv);
|
|
|
|
rw_exit(&zvol_state_lock);
|
|
|
|
add_disk(zv->zv_zso->zvo_disk);
|
|
|
|
} else {
|
|
|
|
ida_simple_remove(&zvol_ida, idx);
|
|
|
|
}
|
|
|
|
|
async zvol minor node creation interferes with receive
When we finish a zfs receive, dmu_recv_end_sync() calls
zvol_create_minors(async=TRUE). This kicks off some other threads that
create the minor device nodes (in /dev/zvol/poolname/...). These async
threads call zvol_prefetch_minors_impl() and zvol_create_minor(), which
both call dmu_objset_own(), which puts a "long hold" on the dataset.
Since the zvol minor node creation is asynchronous, this can happen
after the `ZFS_IOC_RECV[_NEW]` ioctl and `zfs receive` process have
completed.
After the first receive ioctl has completed, userland may attempt to do
another receive into the same dataset (e.g. the next incremental
stream). This second receive and the asynchronous minor node creation
can interfere with one another in several different ways, because they
both require exclusive access to the dataset:
1. When the second receive is finishing up, dmu_recv_end_check() does
dsl_dataset_handoff_check(), which can fail with EBUSY if the async
minor node creation already has a "long hold" on this dataset. This
causes the 2nd receive to fail.
2. The async udev rule can fail if zvol_id and/or systemd-udevd try to
open the device while the the second receive's async attempt at minor
node creation owns the dataset (via zvol_prefetch_minors_impl). This
causes the minor node (/dev/zd*) to exist, but the udev-generated
/dev/zvol/... to not exist.
3. The async minor node creation can silently fail with EBUSY if the
first receive's zvol_create_minor() trys to own the dataset while the
second receive's zvol_prefetch_minors_impl already owns the dataset.
To address these problems, this change synchronously creates the minor
node. To avoid the lock ordering problems that the asynchrony was
introduced to fix (see #3681), we create the minor nodes from open
context, with no locks held, rather than from syncing contex as was
originally done.
Implementation notes:
We generally do not need to traverse children or prefetch anything (e.g.
when running the recv, snapshot, create, or clone subcommands of zfs).
We only need recursion when importing/opening a pool and when loading
encryption keys. The existing recursive, asynchronous, prefetching code
is preserved for use in these cases.
Channel programs may need to create zvol minor nodes, when creating a
snapshot of a zvol with the snapdev property set. We figure out what
snapshots are created when running the LUA program in syncing context.
In this case we need to remember what snapshots were created, and then
try to create their minor nodes from open context, after the LUA code
has completed.
There are additional zvol use cases that asynchronously own the dataset,
which can cause similar problems. E.g. changing the volmode or snapdev
properties. These are less problematic because they are not recursive
and don't touch datasets that are not involved in the operation, there
is still potential for interference with subsequent operations. In the
future, these cases should be similarly converted to create the zvol
minor node synchronously from open context.
The async tasks of removing and renaming minors do not own the objset,
so they do not have this problem. However, it may make sense to also
convert these operations to happen synchronously from open context, in
the future.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-65948
Closes #7863
Closes #9885
2020-02-03 20:33:14 +03:00
|
|
|
return (error);
|
2019-09-25 19:20:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_rename_minor(zvol_state_t *zv, const char *newname)
|
|
|
|
{
|
|
|
|
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
|
|
|
|
|
|
|
|
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
|
|
|
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
|
|
|
|
|
|
|
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
|
|
|
|
|
|
|
|
/* move to new hashtable entry */
|
|
|
|
zv->zv_hash = zvol_name_hash(zv->zv_name);
|
|
|
|
hlist_del(&zv->zv_hlink);
|
|
|
|
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The block device's read-only state is briefly changed causing
|
|
|
|
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
|
|
|
|
* the name change and fixes the symlinks. This does not change
|
|
|
|
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
|
|
|
|
* changes. This would normally be done using kobject_uevent() but
|
|
|
|
* that is a GPL-only symbol which is why we need this workaround.
|
|
|
|
*/
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
set_disk_ro(zv->zv_zso->zvo_disk, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
|
|
|
|
{
|
|
|
|
|
|
|
|
set_capacity(zv->zv_zso->zvo_disk, capacity);
|
|
|
|
}
|
|
|
|
|
|
|
|
const static zvol_platform_ops_t zvol_linux_ops = {
|
|
|
|
.zv_free = zvol_free,
|
|
|
|
.zv_rename_minor = zvol_rename_minor,
|
async zvol minor node creation interferes with receive
When we finish a zfs receive, dmu_recv_end_sync() calls
zvol_create_minors(async=TRUE). This kicks off some other threads that
create the minor device nodes (in /dev/zvol/poolname/...). These async
threads call zvol_prefetch_minors_impl() and zvol_create_minor(), which
both call dmu_objset_own(), which puts a "long hold" on the dataset.
Since the zvol minor node creation is asynchronous, this can happen
after the `ZFS_IOC_RECV[_NEW]` ioctl and `zfs receive` process have
completed.
After the first receive ioctl has completed, userland may attempt to do
another receive into the same dataset (e.g. the next incremental
stream). This second receive and the asynchronous minor node creation
can interfere with one another in several different ways, because they
both require exclusive access to the dataset:
1. When the second receive is finishing up, dmu_recv_end_check() does
dsl_dataset_handoff_check(), which can fail with EBUSY if the async
minor node creation already has a "long hold" on this dataset. This
causes the 2nd receive to fail.
2. The async udev rule can fail if zvol_id and/or systemd-udevd try to
open the device while the the second receive's async attempt at minor
node creation owns the dataset (via zvol_prefetch_minors_impl). This
causes the minor node (/dev/zd*) to exist, but the udev-generated
/dev/zvol/... to not exist.
3. The async minor node creation can silently fail with EBUSY if the
first receive's zvol_create_minor() trys to own the dataset while the
second receive's zvol_prefetch_minors_impl already owns the dataset.
To address these problems, this change synchronously creates the minor
node. To avoid the lock ordering problems that the asynchrony was
introduced to fix (see #3681), we create the minor nodes from open
context, with no locks held, rather than from syncing contex as was
originally done.
Implementation notes:
We generally do not need to traverse children or prefetch anything (e.g.
when running the recv, snapshot, create, or clone subcommands of zfs).
We only need recursion when importing/opening a pool and when loading
encryption keys. The existing recursive, asynchronous, prefetching code
is preserved for use in these cases.
Channel programs may need to create zvol minor nodes, when creating a
snapshot of a zvol with the snapdev property set. We figure out what
snapshots are created when running the LUA program in syncing context.
In this case we need to remember what snapshots were created, and then
try to create their minor nodes from open context, after the LUA code
has completed.
There are additional zvol use cases that asynchronously own the dataset,
which can cause similar problems. E.g. changing the volmode or snapdev
properties. These are less problematic because they are not recursive
and don't touch datasets that are not involved in the operation, there
is still potential for interference with subsequent operations. In the
future, these cases should be similarly converted to create the zvol
minor node synchronously from open context.
The async tasks of removing and renaming minors do not own the objset,
so they do not have this problem. However, it may make sense to also
convert these operations to happen synchronously from open context, in
the future.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
External-issue: DLPX-65948
Closes #7863
Closes #9885
2020-02-03 20:33:14 +03:00
|
|
|
.zv_create_minor = zvol_os_create_minor,
|
2019-09-25 19:20:30 +03:00
|
|
|
.zv_update_volsize = zvol_update_volsize,
|
|
|
|
.zv_clear_private = zvol_clear_private,
|
|
|
|
.zv_is_zvol = zvol_is_zvol_impl,
|
|
|
|
.zv_set_disk_ro = zvol_set_disk_ro_impl,
|
|
|
|
.zv_set_capacity = zvol_set_capacity_impl,
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
zvol_init(void)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int threads = MIN(MAX(zvol_threads, 1), 1024);
|
|
|
|
|
|
|
|
error = register_blkdev(zvol_major, ZVOL_DRIVER);
|
|
|
|
if (error) {
|
|
|
|
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
|
|
|
|
threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
|
|
|
|
if (zvol_taskq == NULL) {
|
|
|
|
unregister_blkdev(zvol_major, ZVOL_DRIVER);
|
|
|
|
return (-ENOMEM);
|
|
|
|
}
|
|
|
|
zvol_init_impl();
|
|
|
|
ida_init(&zvol_ida);
|
|
|
|
zvol_register_ops(&zvol_linux_ops);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
zvol_fini(void)
|
|
|
|
{
|
|
|
|
zvol_fini_impl();
|
|
|
|
unregister_blkdev(zvol_major, ZVOL_DRIVER);
|
|
|
|
taskq_destroy(zvol_taskq);
|
|
|
|
ida_destroy(&zvol_ida);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BEGIN CSTYLED */
|
|
|
|
module_param(zvol_inhibit_dev, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
|
|
|
|
|
|
|
|
module_param(zvol_major, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
|
|
|
|
|
|
|
|
module_param(zvol_threads, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
|
|
|
|
|
|
|
|
module_param(zvol_request_sync, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
|
|
|
|
|
|
|
|
module_param(zvol_max_discard_blocks, ulong, 0444);
|
|
|
|
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
|
|
|
|
|
|
|
|
module_param(zvol_prefetch_bytes, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
|
|
|
|
|
|
|
|
module_param(zvol_volmode, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
|
|
|
|
/* END CSTYLED */
|