2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2020-10-22 00:08:06 +03:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
|
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
|
|
|
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
|
|
|
|
* Copyright 2017 Nexenta Systems, Inc.
|
2023-03-10 22:59:53 +03:00
|
|
|
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Portions Copyright 2007 Jeremy Teo */
|
|
|
|
/* Portions Copyright 2010 Robert Milkowski */
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/sysmacros.h>
|
|
|
|
#include <sys/vfs.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/kmem.h>
|
|
|
|
#include <sys/cmn_err.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/zfs_dir.h>
|
|
|
|
#include <sys/zfs_acl.h>
|
|
|
|
#include <sys/zfs_ioctl.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/dmu.h>
|
|
|
|
#include <sys/dmu_objset.h>
|
2023-12-05 22:03:48 +03:00
|
|
|
#include <sys/dsl_crypt.h>
|
2020-10-22 00:08:06 +03:00
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/txg.h>
|
|
|
|
#include <sys/dbuf.h>
|
|
|
|
#include <sys/policy.h>
|
2023-03-10 22:59:53 +03:00
|
|
|
#include <sys/zfeature.h>
|
2020-10-22 00:08:06 +03:00
|
|
|
#include <sys/zfs_vnops.h>
|
|
|
|
#include <sys/zfs_quota.h>
|
2020-11-02 23:07:07 +03:00
|
|
|
#include <sys/zfs_vfsops.h>
|
|
|
|
#include <sys/zfs_znode.h>
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2024-02-06 03:44:45 +03:00
|
|
|
/*
|
|
|
|
* Enable the experimental block cloning feature. If this setting is 0, then
|
|
|
|
* even if feature@block_cloning is enabled, attempts to clone blocks will act
|
|
|
|
* as though the feature is disabled.
|
|
|
|
*/
|
|
|
|
int zfs_bclone_enabled = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When set zfs_clone_range() waits for dirty data to be written to disk.
|
|
|
|
* This allows the clone operation to reliably succeed when a file is modified
|
|
|
|
* and then immediately cloned. For small files this may be slower than making
|
|
|
|
* a copy of the file and is therefore not the default. However, in certain
|
|
|
|
* scenarios this behavior may be desirable so a tunable is provided.
|
|
|
|
*/
|
|
|
|
static int zfs_bclone_wait_dirty = 0;
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Enable Direct I/O. If this setting is 0, then all I/O requests will be
|
|
|
|
* directed through the ARC acting as though the dataset property direct was
|
|
|
|
* set to disabled.
|
|
|
|
*/
|
2024-10-03 04:24:29 +03:00
|
|
|
static int zfs_dio_enabled = 0;
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
|
|
|
|
|
2024-02-06 03:44:45 +03:00
|
|
|
/*
|
|
|
|
* Maximum bytes to read per chunk in zfs_read().
|
|
|
|
*/
|
|
|
|
static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
|
|
|
|
{
|
2022-09-16 23:36:47 +03:00
|
|
|
int error = 0;
|
2020-10-22 00:08:06 +03:00
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
|
|
|
|
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
2023-10-31 00:51:56 +03:00
|
|
|
return (error);
|
2022-05-03 23:23:26 +03:00
|
|
|
atomic_inc_32(&zp->z_sync_writes_cnt);
|
2020-10-22 00:08:06 +03:00
|
|
|
zil_commit(zfsvfs->z_log, zp->z_id);
|
2022-05-03 23:23:26 +03:00
|
|
|
atomic_dec_32(&zp->z_sync_writes_cnt);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
2022-09-16 23:36:47 +03:00
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
|
|
|
|
/*
|
|
|
|
* Lseek support for finding holes (cmd == SEEK_HOLE) and
|
|
|
|
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
|
|
|
|
{
|
2021-11-08 00:27:44 +03:00
|
|
|
zfs_locked_range_t *lr;
|
2020-10-31 19:40:08 +03:00
|
|
|
uint64_t noff = (uint64_t)*off; /* new offset */
|
|
|
|
uint64_t file_sz;
|
|
|
|
int error;
|
|
|
|
boolean_t hole;
|
|
|
|
|
|
|
|
file_sz = zp->z_size;
|
|
|
|
if (noff >= file_sz) {
|
|
|
|
return (SET_ERROR(ENXIO));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd == F_SEEK_HOLE)
|
|
|
|
hole = B_TRUE;
|
|
|
|
else
|
|
|
|
hole = B_FALSE;
|
|
|
|
|
2021-11-08 00:27:44 +03:00
|
|
|
/* Flush any mmap()'d data to disk */
|
2023-02-14 22:04:34 +03:00
|
|
|
if (zn_has_cached_data(zp, 0, file_sz - 1))
|
2024-03-26 00:56:49 +03:00
|
|
|
zn_flush_cached_data(zp, B_TRUE);
|
2021-11-08 00:27:44 +03:00
|
|
|
|
2023-03-28 18:19:03 +03:00
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
|
2020-10-31 19:40:08 +03:00
|
|
|
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
|
2021-11-08 00:27:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
if (error == ESRCH)
|
|
|
|
return (SET_ERROR(ENXIO));
|
|
|
|
|
2021-11-08 00:27:44 +03:00
|
|
|
/* File was dirty, so fall back to using generic logic */
|
2020-10-31 19:40:08 +03:00
|
|
|
if (error == EBUSY) {
|
|
|
|
if (hole)
|
|
|
|
*off = file_sz;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could find a hole that begins after the logical end-of-file,
|
|
|
|
* because dmu_offset_next() only works on whole blocks. If the
|
|
|
|
* EOF falls mid-block, then indicate that the "virtual hole"
|
|
|
|
* at the end of the file begins at the logical EOF, rather than
|
|
|
|
* at the end of the last block.
|
|
|
|
*/
|
|
|
|
if (noff > file_sz) {
|
|
|
|
ASSERT(hole);
|
|
|
|
noff = file_sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (noff < *off)
|
|
|
|
return (error);
|
|
|
|
*off = noff;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
error = zfs_holey_common(zp, cmd, off);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-31 19:40:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* SEEK_HOLE && SEEK_DATA */
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
if (flag & V_ACE_MASK)
|
2022-11-08 21:28:56 +03:00
|
|
|
#if defined(__linux__)
|
|
|
|
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
|
2023-04-11 00:15:36 +03:00
|
|
|
zfs_init_idmap);
|
2022-11-08 21:28:56 +03:00
|
|
|
#else
|
|
|
|
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
|
|
|
|
NULL);
|
|
|
|
#endif
|
2020-10-31 19:40:08 +03:00
|
|
|
else
|
2022-11-08 21:28:56 +03:00
|
|
|
#if defined(__linux__)
|
2023-04-11 00:15:36 +03:00
|
|
|
error = zfs_zaccess_rwx(zp, mode, flag, cr, zfs_init_idmap);
|
2022-11-08 21:28:56 +03:00
|
|
|
#else
|
2022-10-19 21:17:09 +03:00
|
|
|
error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
|
2022-11-08 21:28:56 +03:00
|
|
|
#endif
|
2020-10-31 19:40:08 +03:00
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-31 19:40:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Determine if Direct I/O has been requested (either via the O_DIRECT flag or
|
|
|
|
* the "direct" dataset property). When inherited by the property only apply
|
|
|
|
* the O_DIRECT flag to correctly aligned IO requests. The rational for this
|
|
|
|
* is it allows the property to be safely set on a dataset without forcing
|
|
|
|
* all of the applications to be aware of the alignment restrictions. When
|
|
|
|
* O_DIRECT is explicitly requested by an application return EINVAL if the
|
|
|
|
* request is unaligned. In all cases, if the range for this request has
|
|
|
|
* been mmap'ed then we will perform buffered I/O to keep the mapped region
|
|
|
|
* synhronized with the ARC.
|
|
|
|
*
|
|
|
|
* It is possible that a file's pages could be mmap'ed after it is checked
|
|
|
|
* here. If so, that is handled coorarding in zfs_write(). See comments in the
|
|
|
|
* following area for how this is handled:
|
|
|
|
* zfs_write() -> update_pages()
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_setup_direct(struct znode *zp, zfs_uio_t *uio, zfs_uio_rw_t rw,
|
|
|
|
int *ioflagp)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
objset_t *os = zfsvfs->z_os;
|
|
|
|
int ioflag = *ioflagp;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (!zfs_dio_enabled || os->os_direct == ZFS_DIRECT_DISABLED ||
|
|
|
|
zn_has_cached_data(zp, zfs_uio_offset(uio),
|
|
|
|
zfs_uio_offset(uio) + zfs_uio_resid(uio) - 1)) {
|
|
|
|
/*
|
|
|
|
* Direct I/O is disabled or the region is mmap'ed. In either
|
|
|
|
* case the I/O request will just directed through the ARC.
|
|
|
|
*/
|
|
|
|
ioflag &= ~O_DIRECT;
|
|
|
|
goto out;
|
|
|
|
} else if (os->os_direct == ZFS_DIRECT_ALWAYS &&
|
|
|
|
zfs_uio_page_aligned(uio) &&
|
|
|
|
zfs_uio_aligned(uio, PAGE_SIZE)) {
|
|
|
|
if ((rw == UIO_WRITE && zfs_uio_resid(uio) >= zp->z_blksz) ||
|
|
|
|
(rw == UIO_READ)) {
|
|
|
|
ioflag |= O_DIRECT;
|
|
|
|
}
|
|
|
|
} else if (os->os_direct == ZFS_DIRECT_ALWAYS && (ioflag & O_DIRECT)) {
|
|
|
|
/*
|
|
|
|
* Direct I/O was requested through the direct=always, but it
|
|
|
|
* is not properly PAGE_SIZE aligned. The request will be
|
|
|
|
* directed through the ARC.
|
|
|
|
*/
|
|
|
|
ioflag &= ~O_DIRECT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ioflag & O_DIRECT) {
|
|
|
|
if (!zfs_uio_page_aligned(uio) ||
|
|
|
|
!zfs_uio_aligned(uio, PAGE_SIZE)) {
|
|
|
|
error = SET_ERROR(EINVAL);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = zfs_uio_get_dio_pages_alloc(uio, rw);
|
|
|
|
if (error) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
IMPLY(ioflag & O_DIRECT, uio->uio_extflg & UIO_DIRECT);
|
|
|
|
ASSERT0(error);
|
|
|
|
|
|
|
|
out:
|
|
|
|
*ioflagp = ioflag;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Read bytes from specified file into supplied buffer.
|
|
|
|
*
|
|
|
|
* IN: zp - inode of file to be read from.
|
|
|
|
* uio - structure supplying read location, range info,
|
|
|
|
* and return buffer.
|
|
|
|
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
|
|
|
|
* O_DIRECT flag; used to bypass page cache.
|
|
|
|
* cr - credentials of caller.
|
|
|
|
*
|
|
|
|
* OUT: uio - updated offset and range, buffer filled.
|
|
|
|
*
|
|
|
|
* RETURN: 0 on success, error code on failure.
|
|
|
|
*
|
|
|
|
* Side Effects:
|
|
|
|
* inode - atime updated if byte count > 0
|
|
|
|
*/
|
|
|
|
int
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
2020-10-22 00:08:06 +03:00
|
|
|
{
|
2022-02-16 04:38:43 +03:00
|
|
|
(void) cr;
|
2020-10-22 00:08:06 +03:00
|
|
|
int error = 0;
|
|
|
|
boolean_t frsync = B_FALSE;
|
|
|
|
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EACCES));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't copy out anything useful for directories. */
|
|
|
|
if (Z_ISDIR(ZTOTYPE(zp))) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EISDIR));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate file offset
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_offset(uio) < (offset_t)0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fasttrack empty reads
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_resid(uio) == 0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef FRSYNC
|
|
|
|
/*
|
|
|
|
* If we're in FRSYNC mode, sync out this znode before reading it.
|
|
|
|
* Only do this for non-snapshots.
|
|
|
|
*
|
|
|
|
* Some platforms do not support FRSYNC and instead map it
|
|
|
|
* to O_SYNC, which results in unnecessary calls to zil_commit. We
|
|
|
|
* only honor FRSYNC requests on platforms which support it.
|
|
|
|
*/
|
|
|
|
frsync = !!(ioflag & FRSYNC);
|
|
|
|
#endif
|
|
|
|
if (zfsvfs->z_log &&
|
|
|
|
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
|
|
|
|
zil_commit(zfsvfs->z_log, zp->z_id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the range against changes.
|
|
|
|
*/
|
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are reading past end-of-file we can skip
|
|
|
|
* to the end; but we might still need to set atime.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_offset(uio) >= zp->z_size) {
|
2020-10-22 00:08:06 +03:00
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
ASSERT(zfs_uio_offset(uio) < zp->z_size);
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting up Direct I/O if requested.
|
|
|
|
*/
|
|
|
|
error = zfs_setup_direct(zp, uio, UIO_READ, &ioflag);
|
|
|
|
if (error) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-11-04 16:49:40 +03:00
|
|
|
#if defined(__linux__)
|
2021-09-20 20:30:50 +03:00
|
|
|
ssize_t start_offset = zfs_uio_offset(uio);
|
2021-11-04 16:49:40 +03:00
|
|
|
#endif
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
ssize_t chunk_size = zfs_vnops_read_chunk_size;
|
2021-01-21 08:27:30 +03:00
|
|
|
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
|
2020-10-22 00:08:06 +03:00
|
|
|
ssize_t start_resid = n;
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
ssize_t dio_remaining_resid = 0;
|
|
|
|
|
|
|
|
if (uio->uio_extflg & UIO_DIRECT) {
|
|
|
|
/*
|
|
|
|
* All pages for an O_DIRECT request ahve already been mapped
|
|
|
|
* so there's no compelling reason to handle this uio in
|
|
|
|
* smaller chunks.
|
|
|
|
*/
|
|
|
|
chunk_size = DMU_MAX_ACCESS;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the event that the O_DIRECT request is reading the entire
|
|
|
|
* file, it is possible file's length is not page sized
|
|
|
|
* aligned. However, lower layers expect that the Direct I/O
|
|
|
|
* request is page-aligned. In this case, as much of the file
|
|
|
|
* that can be read using Direct I/O happens and the remaining
|
|
|
|
* amount will be read through the ARC.
|
|
|
|
*
|
|
|
|
* This is still consistent with the semantics of Direct I/O in
|
|
|
|
* ZFS as at a minimum the I/O request must be page-aligned.
|
|
|
|
*/
|
|
|
|
dio_remaining_resid = n - P2ALIGN_TYPED(n, PAGE_SIZE, ssize_t);
|
|
|
|
if (dio_remaining_resid != 0)
|
|
|
|
n -= dio_remaining_resid;
|
|
|
|
}
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
while (n > 0) {
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
ssize_t nbytes = MIN(n, chunk_size -
|
|
|
|
P2PHASE(zfs_uio_offset(uio), chunk_size));
|
2020-10-22 00:08:06 +03:00
|
|
|
#ifdef UIO_NOCOPY
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
|
2020-10-22 00:08:06 +03:00
|
|
|
error = mappedread_sf(zp, nbytes, uio);
|
|
|
|
else
|
|
|
|
#endif
|
2023-02-14 22:04:34 +03:00
|
|
|
if (zn_has_cached_data(zp, zfs_uio_offset(uio),
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
zfs_uio_offset(uio) + nbytes - 1)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
error = mappedread(zp, nbytes, uio);
|
|
|
|
} else {
|
|
|
|
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
|
|
|
|
uio, nbytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/* convert checksum errors into IO errors */
|
|
|
|
if (error == ECKSUM)
|
|
|
|
error = SET_ERROR(EIO);
|
2021-11-04 16:49:40 +03:00
|
|
|
|
|
|
|
#if defined(__linux__)
|
2021-09-20 20:30:50 +03:00
|
|
|
/*
|
|
|
|
* if we actually read some bytes, bubbling EFAULT
|
2021-11-04 16:49:40 +03:00
|
|
|
* up to become EAGAIN isn't what we want here...
|
|
|
|
*
|
|
|
|
* ...on Linux, at least. On FBSD, doing this breaks.
|
2021-09-20 20:30:50 +03:00
|
|
|
*/
|
|
|
|
if (error == EFAULT &&
|
|
|
|
(zfs_uio_offset(uio) - start_offset) != 0)
|
|
|
|
error = 0;
|
2021-11-04 16:49:40 +03:00
|
|
|
#endif
|
2020-10-22 00:08:06 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
n -= nbytes;
|
|
|
|
}
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
if (error == 0 && (uio->uio_extflg & UIO_DIRECT) &&
|
|
|
|
dio_remaining_resid != 0) {
|
|
|
|
/*
|
|
|
|
* Temporarily remove the UIO_DIRECT flag from the UIO so the
|
|
|
|
* remainder of the file can be read using the ARC.
|
|
|
|
*/
|
|
|
|
uio->uio_extflg &= ~UIO_DIRECT;
|
|
|
|
|
|
|
|
if (zn_has_cached_data(zp, zfs_uio_offset(uio),
|
|
|
|
zfs_uio_offset(uio) + dio_remaining_resid - 1)) {
|
|
|
|
error = mappedread(zp, dio_remaining_resid, uio);
|
|
|
|
} else {
|
|
|
|
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio,
|
|
|
|
dio_remaining_resid);
|
|
|
|
}
|
|
|
|
uio->uio_extflg |= UIO_DIRECT;
|
|
|
|
|
|
|
|
if (error != 0)
|
|
|
|
n += dio_remaining_resid;
|
|
|
|
} else if (error && (uio->uio_extflg & UIO_DIRECT)) {
|
|
|
|
n += dio_remaining_resid;
|
|
|
|
}
|
2020-10-22 00:08:06 +03:00
|
|
|
int64_t nread = start_resid - n;
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
|
|
|
|
out:
|
|
|
|
zfs_rangelock_exit(lr);
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Cleanup for Direct I/O if requested.
|
|
|
|
*/
|
|
|
|
if (uio->uio_extflg & UIO_DIRECT)
|
|
|
|
zfs_uio_free_dio_pages(uio, UIO_READ);
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
static void
|
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
|
|
|
|
uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
zilog_t *zilog = zfsvfs->z_log;
|
|
|
|
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
|
|
|
|
|
|
|
|
ASSERT(clear_setid_bits_txgp != NULL);
|
|
|
|
ASSERT(tx != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear Set-UID/Set-GID bits on successful write if not
|
|
|
|
* privileged and at least one of the execute bits is set.
|
|
|
|
*
|
|
|
|
* It would be nice to do this after all writes have
|
|
|
|
* been done, but that would still expose the ISUID/ISGID
|
|
|
|
* to another app after the partial write is committed.
|
|
|
|
*
|
|
|
|
* Note: we don't call zfs_fuid_map_id() here because
|
|
|
|
* user 0 is not an ephemeral uid.
|
|
|
|
*/
|
|
|
|
mutex_enter(&zp->z_acl_lock);
|
|
|
|
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
|
|
|
|
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
|
|
|
|
secpolicy_vnode_setid_retain(zp, cr,
|
|
|
|
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
|
|
|
|
uint64_t newmode;
|
|
|
|
|
|
|
|
zp->z_mode &= ~(S_ISUID | S_ISGID);
|
|
|
|
newmode = zp->z_mode;
|
|
|
|
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
|
|
|
|
(void *)&newmode, sizeof (uint64_t), tx);
|
|
|
|
|
|
|
|
mutex_exit(&zp->z_acl_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure SUID/SGID bits will be removed when we replay the
|
|
|
|
* log. If the setid bits are keep coming back, don't log more
|
|
|
|
* than one TX_SETATTR per transaction group.
|
|
|
|
*/
|
|
|
|
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
|
2022-02-25 16:26:54 +03:00
|
|
|
vattr_t va = {0};
|
2022-02-04 01:37:57 +03:00
|
|
|
|
2022-04-05 23:02:17 +03:00
|
|
|
va.va_mask = ATTR_MODE;
|
2022-02-04 01:37:57 +03:00
|
|
|
va.va_nodeid = zp->z_id;
|
|
|
|
va.va_mode = newmode;
|
2022-04-05 23:02:17 +03:00
|
|
|
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
|
|
|
|
ATTR_MODE, NULL);
|
2022-02-04 01:37:57 +03:00
|
|
|
*clear_setid_bits_txgp = dmu_tx_get_txg(tx);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_exit(&zp->z_acl_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Write the bytes to a file.
|
|
|
|
*
|
|
|
|
* IN: zp - znode of file to be written to.
|
|
|
|
* uio - structure supplying write location, range info,
|
|
|
|
* and data buffer.
|
|
|
|
* ioflag - O_APPEND flag set if in append mode.
|
|
|
|
* O_DIRECT flag; used to bypass page cache.
|
|
|
|
* cr - credentials of caller.
|
|
|
|
*
|
|
|
|
* OUT: uio - updated offset and range.
|
|
|
|
*
|
|
|
|
* RETURN: 0 if success
|
|
|
|
* error code if failure
|
|
|
|
*
|
|
|
|
* Timestamps:
|
|
|
|
* ip - ctime|mtime updated if byte count > 0
|
|
|
|
*/
|
|
|
|
int
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
2020-10-22 00:08:06 +03:00
|
|
|
{
|
2022-01-21 22:54:05 +03:00
|
|
|
int error = 0, error1;
|
2021-01-21 08:27:30 +03:00
|
|
|
ssize_t start_resid = zfs_uio_resid(uio);
|
2022-02-04 01:37:57 +03:00
|
|
|
uint64_t clear_setid_bits_txg = 0;
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
boolean_t o_direct_defer = B_FALSE;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fasttrack empty write
|
|
|
|
*/
|
|
|
|
ssize_t n = start_resid;
|
|
|
|
if (n == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
sa_bulk_attr_t bulk[4];
|
|
|
|
int count = 0;
|
|
|
|
uint64_t mtime[2], ctime[2];
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
|
|
|
|
&zp->z_size, 8);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
|
|
|
|
&zp->z_pflags, 8);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callers might not be able to detect properly that we are read-only,
|
|
|
|
* so check it explicitly here.
|
|
|
|
*/
|
|
|
|
if (zfs_is_readonly(zfsvfs)) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EROFS));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-03-07 20:31:52 +03:00
|
|
|
* If immutable or not appending then return EPERM.
|
|
|
|
* Intentionally allow ZFS_READONLY through here.
|
|
|
|
* See zfs_zaccess_common()
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2021-03-07 20:31:52 +03:00
|
|
|
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
|
2020-10-22 00:08:06 +03:00
|
|
|
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
|
2021-01-21 08:27:30 +03:00
|
|
|
(zfs_uio_offset(uio) < zp->z_size))) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EPERM));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate file offset
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
if (woff < 0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Setting up Direct I/O if requested.
|
|
|
|
*/
|
|
|
|
error = zfs_setup_direct(zp, uio, UIO_WRITE, &ioflag);
|
|
|
|
if (error) {
|
|
|
|
zfs_exit(zfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Pre-fault the pages to ensure slow (eg NFS) pages
|
|
|
|
* don't hold up txg.
|
|
|
|
*/
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
ssize_t pfbytes = MIN(n, DMU_MAX_ACCESS >> 1);
|
|
|
|
if (zfs_uio_prefaultpages(pfbytes, uio)) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EFAULT));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If in append mode, set the io offset pointer to eof.
|
|
|
|
*/
|
|
|
|
zfs_locked_range_t *lr;
|
|
|
|
if (ioflag & O_APPEND) {
|
|
|
|
/*
|
|
|
|
* Obtain an appending range lock to guarantee file append
|
|
|
|
* semantics. We reset the write offset once we have the lock.
|
|
|
|
*/
|
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
|
|
|
|
woff = lr->lr_offset;
|
|
|
|
if (lr->lr_length == UINT64_MAX) {
|
|
|
|
/*
|
|
|
|
* We overlocked the file because this write will cause
|
|
|
|
* the file block size to increase.
|
|
|
|
* Note that zp_size cannot change with this lock held.
|
|
|
|
*/
|
|
|
|
woff = zp->z_size;
|
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_setoffset(uio, woff);
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* We need to update the starting offset as well because it is
|
|
|
|
* set previously in the ZPL (Linux) and VNOPS (FreeBSD)
|
|
|
|
* layers.
|
|
|
|
*/
|
|
|
|
zfs_uio_setsoffset(uio, woff);
|
2020-10-22 00:08:06 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Note that if the file block size will change as a result of
|
|
|
|
* this write, then this range lock will lock the entire file
|
|
|
|
* so that we can re-write the block safely.
|
|
|
|
*/
|
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
if (zn_rlimit_fsize_uio(zp, uio)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-11-14 21:16:26 +03:00
|
|
|
return (SET_ERROR(EFBIG));
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
2020-11-10 00:01:56 +03:00
|
|
|
const rlim64_t limit = MAXOFFSET_T;
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
if (woff >= limit) {
|
|
|
|
zfs_rangelock_exit(lr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EFBIG));
|
|
|
|
}
|
|
|
|
|
2020-11-10 00:01:56 +03:00
|
|
|
if (n > limit - woff)
|
2020-10-22 00:08:06 +03:00
|
|
|
n = limit - woff;
|
|
|
|
|
|
|
|
uint64_t end_size = MAX(zp->z_size, woff + n);
|
|
|
|
zilog_t *zilog = zfsvfs->z_log;
|
2023-10-31 00:51:56 +03:00
|
|
|
boolean_t commit = (ioflag & (O_SYNC | O_DSYNC)) ||
|
|
|
|
(zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2020-11-05 01:10:13 +03:00
|
|
|
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
|
|
|
|
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
|
|
|
|
const uint64_t projid = zp->z_projid;
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* In the event we are increasing the file block size
|
|
|
|
* (lr_length == UINT64_MAX), we will direct the write to the ARC.
|
|
|
|
* Because zfs_grow_blocksize() will read from the ARC in order to
|
|
|
|
* grow the dbuf, we avoid doing Direct I/O here as that would cause
|
|
|
|
* data written to disk to be overwritten by data in the ARC during
|
|
|
|
* the sync phase. Besides writing data twice to disk, we also
|
|
|
|
* want to avoid consistency concerns between data in the the ARC and
|
|
|
|
* on disk while growing the file's blocksize.
|
|
|
|
*
|
|
|
|
* We will only temporarily remove Direct I/O and put it back after
|
|
|
|
* we have grown the blocksize. We do this in the event a request
|
|
|
|
* is larger than max_blksz, so further requests to
|
|
|
|
* dmu_write_uio_dbuf() will still issue the requests using Direct
|
|
|
|
* IO.
|
|
|
|
*
|
|
|
|
* As an example:
|
|
|
|
* The first block to file is being written as a 4k request with
|
|
|
|
* a recorsize of 1K. The first 1K issued in the loop below will go
|
|
|
|
* through the ARC; however, the following 3 1K requests will
|
|
|
|
* use Direct I/O.
|
|
|
|
*/
|
|
|
|
if (uio->uio_extflg & UIO_DIRECT && lr->lr_length == UINT64_MAX) {
|
|
|
|
uio->uio_extflg &= ~UIO_DIRECT;
|
|
|
|
o_direct_defer = B_TRUE;
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Write the file in reasonable size chunks. Each chunk is written
|
|
|
|
* in a separate transaction; this keeps the intent log records small
|
|
|
|
* and allows us to do more fine-grained space accounting.
|
|
|
|
*/
|
|
|
|
while (n > 0) {
|
2021-01-21 08:27:30 +03:00
|
|
|
woff = zfs_uio_offset(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2020-11-05 01:10:13 +03:00
|
|
|
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
|
|
|
|
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
|
|
|
|
(projid != ZFS_DEFAULT_PROJID &&
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
|
2020-11-05 01:10:13 +03:00
|
|
|
projid))) {
|
2020-10-22 00:08:06 +03:00
|
|
|
error = SET_ERROR(EDQUOT);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
uint64_t blksz;
|
|
|
|
if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) {
|
|
|
|
if (zp->z_blksz > zfsvfs->z_max_blksz &&
|
|
|
|
!ISP2(zp->z_blksz)) {
|
|
|
|
/*
|
|
|
|
* File's blocksize is already larger than the
|
|
|
|
* "recordsize" property. Only let it grow to
|
|
|
|
* the next power of 2.
|
|
|
|
*/
|
|
|
|
blksz = 1 << highbit64(zp->z_blksz);
|
|
|
|
} else {
|
|
|
|
blksz = zfsvfs->z_max_blksz;
|
|
|
|
}
|
|
|
|
blksz = MIN(blksz, P2ROUNDUP(end_size,
|
|
|
|
SPA_MINBLOCKSIZE));
|
|
|
|
blksz = MAX(blksz, zp->z_blksz);
|
|
|
|
} else {
|
|
|
|
blksz = zp->z_blksz;
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
arc_buf_t *abuf = NULL;
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
ssize_t nbytes = n;
|
|
|
|
if (n >= blksz && woff >= zp->z_size &&
|
|
|
|
P2PHASE(woff, blksz) == 0 &&
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
!(uio->uio_extflg & UIO_DIRECT) &&
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
(blksz >= SPA_OLD_MAXBLOCKSIZE || n < 4 * blksz)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* This write covers a full block. "Borrow" a buffer
|
|
|
|
* from the dmu so that we can fill it before we enter
|
|
|
|
* a transaction. This avoids the possibility of
|
|
|
|
* holding up the transaction if the data copy hangs
|
|
|
|
* up on a pagefault (e.g., from an NFS server mapping).
|
|
|
|
*/
|
|
|
|
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
blksz);
|
2020-10-22 00:08:06 +03:00
|
|
|
ASSERT(abuf != NULL);
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
ASSERT(arc_buf_size(abuf) == blksz);
|
|
|
|
if ((error = zfs_uiocopy(abuf->b_data, blksz,
|
|
|
|
UIO_WRITE, uio, &nbytes))) {
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
break;
|
|
|
|
}
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
ASSERT3S(nbytes, ==, blksz);
|
|
|
|
} else {
|
|
|
|
nbytes = MIN(n, (DMU_MAX_ACCESS >> 1) -
|
|
|
|
P2PHASE(woff, blksz));
|
|
|
|
if (pfbytes < nbytes) {
|
|
|
|
if (zfs_uio_prefaultpages(nbytes, uio)) {
|
|
|
|
error = SET_ERROR(EFAULT);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pfbytes = nbytes;
|
|
|
|
}
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start a transaction.
|
|
|
|
*/
|
|
|
|
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
|
|
|
|
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
|
|
|
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
|
|
|
|
DB_DNODE_ENTER(db);
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, nbytes);
|
2020-10-22 00:08:06 +03:00
|
|
|
DB_DNODE_EXIT(db);
|
|
|
|
zfs_sa_upgrade_txholds(tx, zp);
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
if (abuf != NULL)
|
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* NB: We must call zfs_clear_setid_bits_if_necessary before
|
|
|
|
* committing the transaction!
|
|
|
|
*/
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* If rangelock_enter() over-locked we grow the blocksize
|
|
|
|
* and then reduce the lock range. This will only happen
|
|
|
|
* on the first iteration since rangelock_reduce() will
|
|
|
|
* shrink down lr_length to the appropriate size.
|
|
|
|
*/
|
|
|
|
if (lr->lr_length == UINT64_MAX) {
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
zfs_grow_blocksize(zp, blksz, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_rangelock_reduce(lr, woff, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t tx_bytes;
|
|
|
|
if (abuf == NULL) {
|
2021-01-21 08:27:30 +03:00
|
|
|
tx_bytes = zfs_uio_resid(uio);
|
|
|
|
zfs_uio_fault_disable(uio, B_TRUE);
|
2020-10-22 00:08:06 +03:00
|
|
|
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
|
|
|
|
uio, nbytes, tx);
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_fault_disable(uio, B_FALSE);
|
2020-10-22 00:08:06 +03:00
|
|
|
#ifdef __linux__
|
|
|
|
if (error == EFAULT) {
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
/*
|
|
|
|
* Account for partial writes before
|
|
|
|
* continuing the loop.
|
|
|
|
* Update needs to occur before the next
|
2021-01-21 08:27:30 +03:00
|
|
|
* zfs_uio_prefaultpages, or prefaultpages may
|
2020-10-22 00:08:06 +03:00
|
|
|
* error, and we may break the loop early.
|
|
|
|
*/
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
n -= tx_bytes - zfs_uio_resid(uio);
|
|
|
|
pfbytes -= tx_bytes - zfs_uio_resid(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2022-01-21 22:54:05 +03:00
|
|
|
/*
|
|
|
|
* On FreeBSD, EFAULT should be propagated back to the
|
|
|
|
* VFS, which will handle faulting and will retry.
|
|
|
|
*/
|
|
|
|
if (error != 0 && error != EFAULT) {
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
break;
|
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
tx_bytes -= zfs_uio_resid(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
} else {
|
|
|
|
/*
|
2020-11-19 02:06:59 +03:00
|
|
|
* Thus, we're writing a full block at a block-aligned
|
|
|
|
* offset and extending the file past EOF.
|
|
|
|
*
|
|
|
|
* dmu_assign_arcbuf_by_dbuf() will directly assign the
|
|
|
|
* arc buffer to a dbuf.
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2020-11-19 02:06:59 +03:00
|
|
|
error = dmu_assign_arcbuf_by_dbuf(
|
|
|
|
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
|
|
|
|
if (error != 0) {
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* XXX This might not be necessary if
|
|
|
|
* dmu_assign_arcbuf_by_dbuf is guaranteed
|
|
|
|
* to be atomic.
|
|
|
|
*/
|
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-11-19 02:06:59 +03:00
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
break;
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
|
|
|
|
zfs_uioskip(uio, nbytes);
|
2020-11-19 02:06:59 +03:00
|
|
|
tx_bytes = nbytes;
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* There is a window where a file's pages can be mmap'ed after
|
|
|
|
* zfs_setup_direct() is called. This is due to the fact that
|
|
|
|
* the rangelock in this function is acquired after calling
|
|
|
|
* zfs_setup_direct(). This is done so that
|
|
|
|
* zfs_uio_prefaultpages() does not attempt to fault in pages
|
|
|
|
* on Linux for Direct I/O requests. This is not necessary as
|
|
|
|
* the pages are pinned in memory and can not be faulted out.
|
|
|
|
* Ideally, the rangelock would be held before calling
|
|
|
|
* zfs_setup_direct() and zfs_uio_prefaultpages(); however,
|
|
|
|
* this can lead to a deadlock as zfs_getpage() also acquires
|
|
|
|
* the rangelock as a RL_WRITER and prefaulting the pages can
|
|
|
|
* lead to zfs_getpage() being called.
|
|
|
|
*
|
|
|
|
* In the case of the pages being mapped after
|
|
|
|
* zfs_setup_direct() is called, the call to update_pages()
|
|
|
|
* will still be made to make sure there is consistency between
|
|
|
|
* the ARC and the Linux page cache. This is an ufortunate
|
|
|
|
* situation as the data will be read back into the ARC after
|
|
|
|
* the Direct I/O write has completed, but this is the penality
|
|
|
|
* for writing to a mmap'ed region of a file using Direct I/O.
|
|
|
|
*/
|
2023-02-14 22:04:34 +03:00
|
|
|
if (tx_bytes &&
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
zn_has_cached_data(zp, woff, woff + tx_bytes - 1)) {
|
2020-11-05 00:47:14 +03:00
|
|
|
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we made no progress, we're done. If we made even
|
|
|
|
* partial progress, update the znode and ZIL accordingly.
|
|
|
|
*/
|
|
|
|
if (tx_bytes == 0) {
|
|
|
|
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
|
|
|
|
(void *)&zp->z_size, sizeof (uint64_t), tx);
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
ASSERT(error != 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
|
|
|
|
&clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the file size (zp_size) if it has changed;
|
|
|
|
* account for possible concurrent updates.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
(void) atomic_cas_64(&zp->z_size, end_size,
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_offset(uio));
|
2022-01-21 22:54:05 +03:00
|
|
|
ASSERT(error == 0 || error == EFAULT);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are replaying and eof is non zero then force
|
|
|
|
* the file size to the specified eof. Note, there's no
|
|
|
|
* concurrency during replay.
|
|
|
|
*/
|
|
|
|
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
|
|
|
|
zp->z_size = zfsvfs->z_replay_eof;
|
|
|
|
|
2022-01-21 22:54:05 +03:00
|
|
|
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
|
|
|
if (error1 != 0)
|
|
|
|
/* Avoid clobbering EFAULT. */
|
|
|
|
error = error1;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* NB: During replay, the TX_SETATTR record logged by
|
|
|
|
* zfs_clear_setid_bits_if_necessary must precede any of
|
|
|
|
* the TX_WRITE records logged here.
|
|
|
|
*/
|
2023-10-31 00:51:56 +03:00
|
|
|
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, commit,
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
uio->uio_extflg & UIO_DIRECT ? B_TRUE : B_FALSE, NULL,
|
|
|
|
NULL);
|
2022-02-04 01:37:57 +03:00
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Direct I/O was deferred in order to grow the first block.
|
|
|
|
* At this point it can be re-enabled for subsequent writes.
|
|
|
|
*/
|
|
|
|
if (o_direct_defer) {
|
|
|
|
ASSERT(ioflag & O_DIRECT);
|
|
|
|
uio->uio_extflg |= UIO_DIRECT;
|
|
|
|
o_direct_defer = B_FALSE;
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
2020-12-18 19:48:26 +03:00
|
|
|
ASSERT3S(tx_bytes, ==, nbytes);
|
2020-10-22 00:08:06 +03:00
|
|
|
n -= nbytes;
|
Use big transactions for small recordsize writes.
When ZFS appends files in chunks bigger than recordsize, it borrows
buffer from ARC and fills it before opening transaction. This
supposed to help in case of page faults to not hold transaction open
indefinitely. The problem appears when recordsize is set lower than
default 128KB. Since each block is committed in separate transaction,
per-transaction overhead becomes significant, and what is even worse,
active use of of per-dataset and per-pool locks to protect space use
accounting for each transaction badly hurts the code SMP scalability.
The same transaction size limitation applies in case of file rewrite,
but without even excuse of buffer borrowing.
To address the issue, disable the borrowing mechanism if recordsize
is smaller than default and the write request is 4x bigger than it.
In such case writes up to 32MB are executed in single transaction,
that dramatically reduces overhead and lock contention. Since the
borrowing mechanism is not used for file rewrites, and it was never
used by zvols, which seem to work fine, I don't think this change
should create significant problems, partially because in addition to
the borrowing mechanism there are also used pre-faults.
My tests with 4/8 threads writing several files same time on datasets
with 32KB recordsize in 1MB requests show reduction of CPU usage by
the user threads by 25-35%. I would measure it in GB/s, but at that
block size we are now limited by the lock contention of single write
issue taskqueue, which is a separate problem we are going to work on.
Reviewed-by: Brian Atkinson <batkinson@lanl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #14964
2023-06-28 03:00:30 +03:00
|
|
|
pfbytes -= nbytes;
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
if (o_direct_defer) {
|
|
|
|
ASSERT(ioflag & O_DIRECT);
|
|
|
|
uio->uio_extflg |= UIO_DIRECT;
|
|
|
|
o_direct_defer = B_FALSE;
|
|
|
|
}
|
|
|
|
|
2021-02-09 22:17:29 +03:00
|
|
|
zfs_znode_update_vfs(zp);
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
/*
|
|
|
|
* Cleanup for Direct I/O if requested.
|
|
|
|
*/
|
|
|
|
if (uio->uio_extflg & UIO_DIRECT)
|
|
|
|
zfs_uio_free_dio_pages(uio, UIO_WRITE);
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
2020-11-14 21:16:26 +03:00
|
|
|
* If we're in replay mode, or we made no progress, or the
|
|
|
|
* uio data is inaccessible return an error. Otherwise, it's
|
|
|
|
* at least a partial write, so it's successful.
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
|
2020-11-14 21:16:26 +03:00
|
|
|
error == EFAULT) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2023-10-31 00:51:56 +03:00
|
|
|
if (commit)
|
2020-10-22 00:08:06 +03:00
|
|
|
zil_commit(zilog, zp->z_id);
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
int64_t nwritten = start_resid - zfs_uio_resid(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
|
2024-01-10 02:57:09 +03:00
|
|
|
zilog_t *zilog;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2024-01-10 02:57:09 +03:00
|
|
|
zilog = zfsvfs->z_log;
|
2020-10-22 00:08:06 +03:00
|
|
|
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
|
|
|
|
|
|
|
|
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
|
|
|
zil_commit(zilog, 0);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-11-02 23:07:07 +03:00
|
|
|
#ifdef ZFS_DEBUG
|
|
|
|
static int zil_fault_io = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void zfs_get_done(zgd_t *zgd, int error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get data to generate a TX_WRITE intent log record.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-20 08:53:31 +03:00
|
|
|
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
|
|
|
|
struct lwb *lwb, zio_t *zio)
|
2020-11-02 23:07:07 +03:00
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = arg;
|
|
|
|
objset_t *os = zfsvfs->z_os;
|
|
|
|
znode_t *zp;
|
|
|
|
uint64_t object = lr->lr_foid;
|
|
|
|
uint64_t offset = lr->lr_offset;
|
|
|
|
uint64_t size = lr->lr_length;
|
|
|
|
zgd_t *zgd;
|
|
|
|
int error = 0;
|
2021-03-20 08:53:31 +03:00
|
|
|
uint64_t zp_gen;
|
2020-11-02 23:07:07 +03:00
|
|
|
|
|
|
|
ASSERT3P(lwb, !=, NULL);
|
|
|
|
ASSERT3U(size, !=, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Nothing to do if the file has been removed
|
|
|
|
*/
|
|
|
|
if (zfs_zget(zfsvfs, object, &zp) != 0)
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
if (zp->z_unlinked) {
|
|
|
|
/*
|
|
|
|
* Release the vnode asynchronously as we currently have the
|
|
|
|
* txg stopped from syncing.
|
|
|
|
*/
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
}
|
2021-03-20 08:53:31 +03:00
|
|
|
/* check if generation number matches */
|
|
|
|
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
|
|
|
|
sizeof (zp_gen)) != 0) {
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(EIO));
|
|
|
|
}
|
|
|
|
if (zp_gen != gen) {
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
}
|
2020-11-02 23:07:07 +03:00
|
|
|
|
2023-01-10 22:03:46 +03:00
|
|
|
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
|
2020-11-02 23:07:07 +03:00
|
|
|
zgd->zgd_lwb = lwb;
|
|
|
|
zgd->zgd_private = zp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write records come in two flavors: immediate and indirect.
|
|
|
|
* For small writes it's cheaper to store the data with the
|
|
|
|
* log record (immediate); for large writes it's cheaper to
|
|
|
|
* sync the data and get a pointer to it (indirect) so that
|
|
|
|
* we don't have to write the data twice.
|
|
|
|
*/
|
|
|
|
if (buf != NULL) { /* immediate write */
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, offset,
|
|
|
|
size, RL_READER);
|
2020-11-02 23:07:07 +03:00
|
|
|
/* test for truncation needs to be done while range locked */
|
|
|
|
if (offset >= zp->z_size) {
|
|
|
|
error = SET_ERROR(ENOENT);
|
|
|
|
} else {
|
|
|
|
error = dmu_read(os, object, offset, size, buf,
|
|
|
|
DMU_READ_NO_PREFETCH);
|
|
|
|
}
|
|
|
|
ASSERT(error == 0 || error == ENOENT);
|
|
|
|
} else { /* indirect write */
|
ZIL: Second attempt to reduce scope of zl_issuer_lock.
The previous patch #14841 appeared to have significant flaw, causing
deadlocks if zl_get_data callback got blocked waiting for TXG sync. I
already handled some of such cases in the original patch, but issue
#14982 shown cases that were impossible to solve in that design.
This patch fixes the problem by postponing log blocks allocation till
the very end, just before the zios issue, leaving nothing blocking after
that point to cause deadlocks. Before that point though any sleeps are
now allowed, not causing sync thread blockage. This require slightly
more complicated lwb state machine to allocate blocks and issue zios
in proper order. But with removal of special early issue workarounds
the new code is much cleaner now, and should even be more efficient.
Since this patch uses null zios between write, I've found that null
zios do not wait for logical children ready status in zio_ready(),
that makes parent write to proceed prematurely, producing incorrect
log blocks. Added ZIO_CHILD_LOGICAL_BIT to zio_wait_for_children()
fixes it.
Reviewed-by: Rob Norris <rob.norris@klarasystems.com>
Reviewed-by: Mark Maybee <mark.maybee@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by: iXsystems, Inc.
Closes #15122
2023-08-25 03:08:49 +03:00
|
|
|
ASSERT3P(zio, !=, NULL);
|
2020-11-02 23:07:07 +03:00
|
|
|
/*
|
|
|
|
* Have to lock the whole block to ensure when it's
|
|
|
|
* written out and its checksum is being calculated
|
|
|
|
* that no one can change the data. We need to re-check
|
|
|
|
* blocksize after we get the lock in case it's changed!
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
uint64_t blkoff;
|
|
|
|
size = zp->z_blksz;
|
|
|
|
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
|
|
|
|
offset -= blkoff;
|
|
|
|
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
|
|
|
|
offset, size, RL_READER);
|
|
|
|
if (zp->z_blksz == size)
|
|
|
|
break;
|
|
|
|
offset += blkoff;
|
|
|
|
zfs_rangelock_exit(zgd->zgd_lr);
|
|
|
|
}
|
|
|
|
/* test for truncation needs to be done while range locked */
|
|
|
|
if (lr->lr_offset >= zp->z_size)
|
|
|
|
error = SET_ERROR(ENOENT);
|
|
|
|
#ifdef ZFS_DEBUG
|
|
|
|
if (zil_fault_io) {
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
zil_fault_io = 0;
|
|
|
|
}
|
|
|
|
#endif
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
|
|
|
|
dmu_buf_t *dbp;
|
2020-11-02 23:07:07 +03:00
|
|
|
if (error == 0)
|
2023-08-11 19:04:08 +03:00
|
|
|
error = dmu_buf_hold_noread(os, object, offset, zgd,
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
&dbp);
|
2020-11-02 23:07:07 +03:00
|
|
|
|
|
|
|
if (error == 0) {
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
zgd->zgd_db = dbp;
|
|
|
|
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp;
|
|
|
|
boolean_t direct_write = B_FALSE;
|
|
|
|
mutex_enter(&db->db_mtx);
|
|
|
|
dbuf_dirty_record_t *dr =
|
|
|
|
dbuf_find_dirty_eq(db, lr->lr_common.lrc_txg);
|
|
|
|
if (dr != NULL && dr->dt.dl.dr_diowrite)
|
|
|
|
direct_write = B_TRUE;
|
|
|
|
mutex_exit(&db->db_mtx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All Direct I/O writes will have already completed and
|
|
|
|
* the block pointer can be immediately stored in the
|
|
|
|
* log record.
|
|
|
|
*/
|
|
|
|
if (direct_write) {
|
|
|
|
/*
|
|
|
|
* A Direct I/O write always covers an entire
|
|
|
|
* block.
|
|
|
|
*/
|
|
|
|
ASSERT3U(dbp->db_size, ==, zp->z_blksz);
|
|
|
|
lr->lr_blkptr = dr->dt.dl.dr_overridden_by;
|
|
|
|
zfs_get_done(zgd, 0);
|
|
|
|
return (0);
|
|
|
|
}
|
2020-11-02 23:07:07 +03:00
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
blkptr_t *bp = &lr->lr_blkptr;
|
2020-11-02 23:07:07 +03:00
|
|
|
zgd->zgd_bp = bp;
|
|
|
|
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
ASSERT3U(dbp->db_offset, ==, offset);
|
|
|
|
ASSERT3U(dbp->db_size, ==, size);
|
2020-11-02 23:07:07 +03:00
|
|
|
|
|
|
|
error = dmu_sync(zio, lr->lr_common.lrc_txg,
|
|
|
|
zfs_get_done, zgd);
|
|
|
|
ASSERT(error || lr->lr_length <= size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On success, we need to wait for the write I/O
|
|
|
|
* initiated by dmu_sync() to complete before we can
|
|
|
|
* release this dbuf. We will finish everything up
|
|
|
|
* in the zfs_get_done() callback.
|
|
|
|
*/
|
|
|
|
if (error == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
if (error == EALREADY) {
|
|
|
|
lr->lr_common.lrc_txtype = TX_WRITE2;
|
|
|
|
/*
|
|
|
|
* TX_WRITE2 relies on the data previously
|
|
|
|
* written by the TX_WRITE that caused
|
|
|
|
* EALREADY. We zero out the BP because
|
|
|
|
* it is the old, currently-on-disk BP.
|
|
|
|
*/
|
|
|
|
zgd->zgd_bp = NULL;
|
|
|
|
BP_ZERO(bp);
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_get_done(zgd, error);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zfs_get_done(zgd_t *zgd, int error)
|
|
|
|
{
|
2022-02-16 04:38:43 +03:00
|
|
|
(void) error;
|
2020-11-02 23:07:07 +03:00
|
|
|
znode_t *zp = zgd->zgd_private;
|
|
|
|
|
|
|
|
if (zgd->zgd_db)
|
|
|
|
dmu_buf_rele(zgd->zgd_db, zgd);
|
|
|
|
|
|
|
|
zfs_rangelock_exit(zgd->zgd_lr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the vnode asynchronously as we currently have the
|
|
|
|
* txg stopped from syncing.
|
|
|
|
*/
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
|
|
|
|
kmem_free(zgd, sizeof (zgd_t));
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
static int
|
|
|
|
zfs_enter_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Swap. Not sure if the order of zfs_enter()s is important. */
|
|
|
|
if (zfsvfs1 > zfsvfs2) {
|
|
|
|
zfsvfs_t *tmpzfsvfs;
|
|
|
|
|
|
|
|
tmpzfsvfs = zfsvfs2;
|
|
|
|
zfsvfs2 = zfsvfs1;
|
|
|
|
zfsvfs1 = tmpzfsvfs;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = zfs_enter(zfsvfs1, tag);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (zfsvfs1 != zfsvfs2) {
|
|
|
|
error = zfs_enter(zfsvfs2, tag);
|
|
|
|
if (error != 0) {
|
|
|
|
zfs_exit(zfsvfs1, tag);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zfs_exit_two(zfsvfs_t *zfsvfs1, zfsvfs_t *zfsvfs2, const char *tag)
|
|
|
|
{
|
|
|
|
|
|
|
|
zfs_exit(zfsvfs1, tag);
|
|
|
|
if (zfsvfs1 != zfsvfs2)
|
|
|
|
zfs_exit(zfsvfs2, tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We split each clone request in chunks that can fit into a single ZIL
|
|
|
|
* log entry. Each ZIL log entry can fit 130816 bytes for a block cloning
|
|
|
|
* operation (see zil_max_log_data() and zfs_log_clone_range()). This gives
|
|
|
|
* us room for storing 1022 block pointers.
|
|
|
|
*
|
|
|
|
* On success, the function return the number of bytes copied in *lenp.
|
|
|
|
* Note, it doesn't return how much bytes are left to be copied.
|
2023-08-08 19:37:06 +03:00
|
|
|
* On errors which are caused by any file system limitations or
|
|
|
|
* brt limitations `EINVAL` is returned. In the most cases a user
|
|
|
|
* requested bad parameters, it could be possible to clone the file but
|
|
|
|
* some parameters don't match the requirements.
|
2023-03-10 22:59:53 +03:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
|
|
|
|
uint64_t *outoffp, uint64_t *lenp, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *inzfsvfs, *outzfsvfs;
|
|
|
|
objset_t *inos, *outos;
|
|
|
|
zfs_locked_range_t *inlr, *outlr;
|
|
|
|
dmu_buf_impl_t *db;
|
|
|
|
dmu_tx_t *tx;
|
|
|
|
zilog_t *zilog;
|
|
|
|
uint64_t inoff, outoff, len, done;
|
|
|
|
uint64_t outsize, size;
|
|
|
|
int error;
|
|
|
|
int count = 0;
|
|
|
|
sa_bulk_attr_t bulk[3];
|
|
|
|
uint64_t mtime[2], ctime[2];
|
|
|
|
uint64_t uid, gid, projid;
|
|
|
|
blkptr_t *bps;
|
|
|
|
size_t maxblocks, nbps;
|
|
|
|
uint_t inblksz;
|
|
|
|
uint64_t clear_setid_bits_txg = 0;
|
2024-02-06 03:44:45 +03:00
|
|
|
uint64_t last_synced_txg = 0;
|
2023-03-10 22:59:53 +03:00
|
|
|
|
|
|
|
inoff = *inoffp;
|
|
|
|
outoff = *outoffp;
|
|
|
|
len = *lenp;
|
|
|
|
done = 0;
|
|
|
|
|
|
|
|
inzfsvfs = ZTOZSB(inzp);
|
|
|
|
outzfsvfs = ZTOZSB(outzp);
|
2023-05-03 10:24:47 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to call zfs_enter() potentially on two different datasets,
|
|
|
|
* so we need a dedicated function for that.
|
|
|
|
*/
|
|
|
|
error = zfs_enter_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
inos = inzfsvfs->z_os;
|
|
|
|
outos = outzfsvfs->z_os;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Both source and destination have to belong to the same storage pool.
|
|
|
|
*/
|
|
|
|
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EXDEV));
|
|
|
|
}
|
|
|
|
|
2023-07-27 21:32:34 +03:00
|
|
|
/*
|
|
|
|
* outos and inos belongs to the same storage pool.
|
|
|
|
* see a few lines above, only one check.
|
|
|
|
*/
|
|
|
|
if (!spa_feature_is_enabled(dmu_objset_spa(outos),
|
|
|
|
SPA_FEATURE_BLOCK_CLONING)) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EOPNOTSUPP));
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
ASSERT(!outzfsvfs->z_replay);
|
|
|
|
|
2023-10-31 23:49:41 +03:00
|
|
|
/*
|
|
|
|
* Block cloning from an unencrypted dataset into an encrypted
|
|
|
|
* dataset and vice versa is not supported.
|
|
|
|
*/
|
|
|
|
if (inos->os_encrypted != outos->os_encrypted) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EXDEV));
|
|
|
|
}
|
|
|
|
|
2023-12-05 22:03:48 +03:00
|
|
|
/*
|
|
|
|
* Cloning across encrypted datasets is possible only if they
|
|
|
|
* share the same master key.
|
|
|
|
*/
|
|
|
|
if (inos != outos && inos->os_encrypted &&
|
|
|
|
!dmu_objset_crypto_key_equal(inos, outos)) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EXDEV));
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
error = zfs_verify_zp(inzp);
|
|
|
|
if (error == 0)
|
|
|
|
error = zfs_verify_zp(outzp);
|
|
|
|
if (error != 0) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't copy source file's flags that's why we don't allow to clone
|
|
|
|
* files that are in quarantine.
|
|
|
|
*/
|
|
|
|
if (inzp->z_pflags & ZFS_AV_QUARANTINED) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EACCES));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inoff >= inzp->z_size) {
|
|
|
|
*lenp = 0;
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (len > inzp->z_size - inoff) {
|
|
|
|
len = inzp->z_size - inoff;
|
|
|
|
}
|
|
|
|
if (len == 0) {
|
|
|
|
*lenp = 0;
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callers might not be able to detect properly that we are read-only,
|
|
|
|
* so check it explicitly here.
|
|
|
|
*/
|
|
|
|
if (zfs_is_readonly(outzfsvfs)) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EROFS));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If immutable or not appending then return EPERM.
|
|
|
|
* Intentionally allow ZFS_READONLY through here.
|
|
|
|
* See zfs_zaccess_common()
|
|
|
|
*/
|
|
|
|
if ((outzp->z_pflags & ZFS_IMMUTABLE) != 0) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EPERM));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No overlapping if we are cloning within the same file.
|
|
|
|
*/
|
|
|
|
if (inzp == outzp) {
|
|
|
|
if (inoff < outoff + len && outoff < inoff + len) {
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-26 00:56:49 +03:00
|
|
|
/* Flush any mmap()'d data to disk */
|
|
|
|
if (zn_has_cached_data(inzp, inoff, inoff + len - 1))
|
|
|
|
zn_flush_cached_data(inzp, B_TRUE);
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
/*
|
|
|
|
* Maintain predictable lock order.
|
|
|
|
*/
|
|
|
|
if (inzp < outzp || (inzp == outzp && inoff < outoff)) {
|
|
|
|
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
|
|
|
|
RL_READER);
|
|
|
|
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
|
|
|
|
RL_WRITER);
|
|
|
|
} else {
|
|
|
|
outlr = zfs_rangelock_enter(&outzp->z_rangelock, outoff, len,
|
|
|
|
RL_WRITER);
|
|
|
|
inlr = zfs_rangelock_enter(&inzp->z_rangelock, inoff, len,
|
|
|
|
RL_READER);
|
|
|
|
}
|
|
|
|
|
|
|
|
inblksz = inzp->z_blksz;
|
|
|
|
|
|
|
|
/*
|
2024-01-09 20:46:43 +03:00
|
|
|
* We cannot clone into a file with different block size if we can't
|
|
|
|
* grow it (block size is already bigger, has more than one block, or
|
|
|
|
* not locked for growth). There are other possible reasons for the
|
|
|
|
* grow to fail, but we cover what we can before opening transaction
|
|
|
|
* and the rest detect after we try to do it.
|
2023-03-10 22:59:53 +03:00
|
|
|
*/
|
2024-01-09 20:46:43 +03:00
|
|
|
if (inblksz < outzp->z_blksz) {
|
|
|
|
error = SET_ERROR(EINVAL);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2023-09-09 20:22:36 +03:00
|
|
|
if (inblksz != outzp->z_blksz && (outzp->z_size > outzp->z_blksz ||
|
2024-01-09 20:46:43 +03:00
|
|
|
outlr->lr_length != UINT64_MAX)) {
|
2023-09-09 20:22:36 +03:00
|
|
|
error = SET_ERROR(EINVAL);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Block size must be power-of-2 if destination offset != 0.
|
|
|
|
* There can be no multiple blocks of non-power-of-2 size.
|
|
|
|
*/
|
|
|
|
if (outoff != 0 && !ISP2(inblksz)) {
|
2023-08-08 19:37:06 +03:00
|
|
|
error = SET_ERROR(EINVAL);
|
2023-03-10 22:59:53 +03:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offsets and len must be at block boundries.
|
|
|
|
*/
|
|
|
|
if ((inoff % inblksz) != 0 || (outoff % inblksz) != 0) {
|
2023-08-08 19:37:06 +03:00
|
|
|
error = SET_ERROR(EINVAL);
|
2023-03-10 22:59:53 +03:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Length must be multipe of blksz, except for the end of the file.
|
|
|
|
*/
|
|
|
|
if ((len % inblksz) != 0 &&
|
|
|
|
(len < inzp->z_size - inoff || len < outzp->z_size - outoff)) {
|
2023-08-08 19:37:06 +03:00
|
|
|
error = SET_ERROR(EINVAL);
|
2023-03-10 22:59:53 +03:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2023-09-29 18:22:46 +03:00
|
|
|
/*
|
|
|
|
* If we are copying only one block and it is smaller than recordsize
|
|
|
|
* property, do not allow destination to grow beyond one block if it
|
|
|
|
* is not there yet. Otherwise the destination will get stuck with
|
|
|
|
* that block size forever, that can be as small as 512 bytes, no
|
|
|
|
* matter how big the destination grow later.
|
|
|
|
*/
|
|
|
|
if (len <= inblksz && inblksz < outzfsvfs->z_max_blksz &&
|
|
|
|
outzp->z_size <= inblksz && outoff + len > inblksz) {
|
|
|
|
error = SET_ERROR(EINVAL);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
error = zn_rlimit_fsize(outoff + len);
|
|
|
|
if (error != 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inoff >= MAXOFFSET_T || outoff >= MAXOFFSET_T) {
|
|
|
|
error = SET_ERROR(EFBIG);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(outzfsvfs), NULL,
|
|
|
|
&mtime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(outzfsvfs), NULL,
|
|
|
|
&ctime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(outzfsvfs), NULL,
|
|
|
|
&outzp->z_size, 8);
|
|
|
|
|
|
|
|
zilog = outzfsvfs->z_log;
|
|
|
|
maxblocks = zil_max_log_data(zilog, sizeof (lr_clone_range_t)) /
|
|
|
|
sizeof (bps[0]);
|
|
|
|
|
|
|
|
uid = KUID_TO_SUID(ZTOUID(outzp));
|
|
|
|
gid = KGID_TO_SGID(ZTOGID(outzp));
|
|
|
|
projid = outzp->z_projid;
|
|
|
|
|
2023-06-22 06:44:00 +03:00
|
|
|
bps = vmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
|
2023-03-10 22:59:53 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clone the file in reasonable size chunks. Each chunk is cloned
|
|
|
|
* in a separate transaction; this keeps the intent log records small
|
|
|
|
* and allows us to do more fine-grained space accounting.
|
|
|
|
*/
|
|
|
|
while (len > 0) {
|
|
|
|
size = MIN(inblksz * maxblocks, len);
|
|
|
|
|
|
|
|
if (zfs_id_overblockquota(outzfsvfs, DMU_USERUSED_OBJECT,
|
|
|
|
uid) ||
|
|
|
|
zfs_id_overblockquota(outzfsvfs, DMU_GROUPUSED_OBJECT,
|
|
|
|
gid) ||
|
|
|
|
(projid != ZFS_DEFAULT_PROJID &&
|
|
|
|
zfs_id_overblockquota(outzfsvfs, DMU_PROJECTUSED_OBJECT,
|
|
|
|
projid))) {
|
|
|
|
error = SET_ERROR(EDQUOT);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nbps = maxblocks;
|
2024-02-06 03:44:45 +03:00
|
|
|
last_synced_txg = spa_last_synced_txg(dmu_objset_spa(inos));
|
2023-05-03 01:46:14 +03:00
|
|
|
error = dmu_read_l0_bps(inos, inzp->z_id, inoff, size, bps,
|
2023-03-10 22:59:53 +03:00
|
|
|
&nbps);
|
|
|
|
if (error != 0) {
|
|
|
|
/*
|
2023-08-15 03:34:14 +03:00
|
|
|
* If we are trying to clone a block that was created
|
2024-02-06 03:44:45 +03:00
|
|
|
* in the current transaction group, the error will be
|
|
|
|
* EAGAIN here. Based on zfs_bclone_wait_dirty either
|
|
|
|
* return a shortened range to the caller so it can
|
|
|
|
* fallback, or wait for the next TXG and check again.
|
2023-03-10 22:59:53 +03:00
|
|
|
*/
|
2024-02-06 03:44:45 +03:00
|
|
|
if (error == EAGAIN && zfs_bclone_wait_dirty) {
|
|
|
|
txg_wait_synced(dmu_objset_pool(inos),
|
|
|
|
last_synced_txg + 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-05-03 01:46:14 +03:00
|
|
|
/*
|
|
|
|
* Start a transaction.
|
|
|
|
*/
|
|
|
|
tx = dmu_tx_create(outos);
|
2023-03-10 22:59:53 +03:00
|
|
|
dmu_tx_hold_sa(tx, outzp->z_sa_hdl, B_FALSE);
|
|
|
|
db = (dmu_buf_impl_t *)sa_get_db(outzp->z_sa_hdl);
|
|
|
|
DB_DNODE_ENTER(db);
|
|
|
|
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), outoff, size);
|
|
|
|
DB_DNODE_EXIT(db);
|
|
|
|
zfs_sa_upgrade_txholds(tx, outzp);
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error != 0) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-01-09 20:46:43 +03:00
|
|
|
* Copy source znode's block size. This is done only if the
|
|
|
|
* whole znode is locked (see zfs_rangelock_cb()) and only
|
|
|
|
* on the first iteration since zfs_rangelock_reduce() will
|
|
|
|
* shrink down lr_length to the appropriate size.
|
2023-03-10 22:59:53 +03:00
|
|
|
*/
|
|
|
|
if (outlr->lr_length == UINT64_MAX) {
|
|
|
|
zfs_grow_blocksize(outzp, inblksz, tx);
|
2024-01-09 20:46:43 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Block growth may fail for many reasons we can not
|
|
|
|
* predict here. If it happen the cloning is doomed.
|
|
|
|
*/
|
|
|
|
if (inblksz != outzp->z_blksz) {
|
|
|
|
error = SET_ERROR(EINVAL);
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
/*
|
|
|
|
* Round range lock up to the block boundary, so we
|
|
|
|
* prevent appends until we are done.
|
|
|
|
*/
|
|
|
|
zfs_rangelock_reduce(outlr, outoff,
|
|
|
|
((len - 1) / inblksz + 1) * inblksz);
|
|
|
|
}
|
|
|
|
|
2023-05-03 00:24:43 +03:00
|
|
|
error = dmu_brt_clone(outos, outzp->z_id, outoff, size, tx,
|
2023-11-29 21:51:34 +03:00
|
|
|
bps, nbps);
|
2023-05-03 00:24:43 +03:00
|
|
|
if (error != 0) {
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
break;
|
|
|
|
}
|
2023-03-10 22:59:53 +03:00
|
|
|
|
2024-01-17 19:51:07 +03:00
|
|
|
if (zn_has_cached_data(outzp, outoff, outoff + size - 1)) {
|
|
|
|
update_pages(outzp, outoff, size, outos);
|
|
|
|
}
|
|
|
|
|
2023-03-10 22:59:53 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(outzfsvfs, outzp, cr,
|
|
|
|
&clear_setid_bits_txg, tx);
|
|
|
|
|
|
|
|
zfs_tstamp_update_setup(outzp, CONTENT_MODIFIED, mtime, ctime);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the file size (zp_size) if it has changed;
|
|
|
|
* account for possible concurrent updates.
|
|
|
|
*/
|
|
|
|
while ((outsize = outzp->z_size) < outoff + size) {
|
|
|
|
(void) atomic_cas_64(&outzp->z_size, outsize,
|
|
|
|
outoff + size);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = sa_bulk_update(outzp->z_sa_hdl, bulk, count, tx);
|
|
|
|
|
|
|
|
zfs_log_clone_range(zilog, tx, TX_CLONE_RANGE, outzp, outoff,
|
|
|
|
size, inblksz, bps, nbps);
|
|
|
|
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
|
|
|
if (error != 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
inoff += size;
|
|
|
|
outoff += size;
|
|
|
|
len -= size;
|
|
|
|
done += size;
|
2024-05-25 04:45:09 +03:00
|
|
|
|
2024-05-29 20:49:11 +03:00
|
|
|
if (issig()) {
|
2024-05-25 04:45:09 +03:00
|
|
|
error = SET_ERROR(EINTR);
|
|
|
|
break;
|
|
|
|
}
|
2023-03-10 22:59:53 +03:00
|
|
|
}
|
|
|
|
|
2023-06-22 06:44:00 +03:00
|
|
|
vmem_free(bps, sizeof (bps[0]) * maxblocks);
|
2023-03-10 22:59:53 +03:00
|
|
|
zfs_znode_update_vfs(outzp);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
zfs_rangelock_exit(outlr);
|
|
|
|
zfs_rangelock_exit(inlr);
|
|
|
|
|
|
|
|
if (done > 0) {
|
|
|
|
/*
|
|
|
|
* If we have made at least partial progress, reset the error.
|
|
|
|
*/
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
|
|
|
|
|
|
|
|
if (outos->os_sync == ZFS_SYNC_ALWAYS) {
|
|
|
|
zil_commit(zilog, outzp->z_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
*inoffp += done;
|
|
|
|
*outoffp += done;
|
|
|
|
*lenp = done;
|
2023-09-09 20:22:36 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we made no progress, there must be a good reason.
|
|
|
|
* EOF is handled explicitly above, before the loop.
|
|
|
|
*/
|
|
|
|
ASSERT3S(error, !=, 0);
|
2023-03-10 22:59:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
zfs_exit_two(inzfsvfs, outzfsvfs, FTAG);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Usual pattern would be to call zfs_clone_range() from zfs_replay_clone(),
|
|
|
|
* but we cannot do that, because when replaying we don't have source znode
|
|
|
|
* available. This is why we need a dedicated replay function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
|
|
|
|
const blkptr_t *bps, size_t nbps)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs;
|
|
|
|
dmu_buf_impl_t *db;
|
|
|
|
dmu_tx_t *tx;
|
|
|
|
int error;
|
|
|
|
int count = 0;
|
|
|
|
sa_bulk_attr_t bulk[3];
|
|
|
|
uint64_t mtime[2], ctime[2];
|
|
|
|
|
|
|
|
ASSERT3U(off, <, MAXOFFSET_T);
|
|
|
|
ASSERT3U(len, >, 0);
|
|
|
|
ASSERT3U(nbps, >, 0);
|
|
|
|
|
|
|
|
zfsvfs = ZTOZSB(zp);
|
|
|
|
|
|
|
|
ASSERT(spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
|
|
|
|
SPA_FEATURE_BLOCK_CLONING));
|
|
|
|
|
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
ASSERT(zfsvfs->z_replay);
|
|
|
|
ASSERT(!zfs_is_readonly(zfsvfs));
|
|
|
|
|
|
|
|
if ((off % blksz) != 0) {
|
|
|
|
zfs_exit(zfsvfs, FTAG);
|
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
|
|
|
|
&zp->z_size, 8);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start a transaction.
|
|
|
|
*/
|
|
|
|
tx = dmu_tx_create(zfsvfs->z_os);
|
|
|
|
|
|
|
|
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
|
|
|
db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
|
|
|
|
DB_DNODE_ENTER(db);
|
|
|
|
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), off, len);
|
|
|
|
DB_DNODE_EXIT(db);
|
|
|
|
zfs_sa_upgrade_txholds(tx, zp);
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error != 0) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
zfs_exit(zfsvfs, FTAG);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zp->z_blksz < blksz)
|
|
|
|
zfs_grow_blocksize(zp, blksz, tx);
|
|
|
|
|
2023-11-29 21:51:34 +03:00
|
|
|
dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps);
|
2023-03-10 22:59:53 +03:00
|
|
|
|
|
|
|
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
|
|
|
|
|
|
|
|
if (zp->z_size < off + len)
|
|
|
|
zp->z_size = off + len;
|
|
|
|
|
|
|
|
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* zil_replaying() not only check if we are replaying ZIL, but also
|
|
|
|
* updates the ZIL header to record replay progress.
|
|
|
|
*/
|
|
|
|
VERIFY(zil_replaying(zfsvfs->z_log, tx));
|
|
|
|
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
|
|
|
zfs_znode_update_vfs(zp);
|
|
|
|
|
|
|
|
zfs_exit(zfsvfs, FTAG);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-10-31 19:40:08 +03:00
|
|
|
EXPORT_SYMBOL(zfs_access);
|
2020-10-22 00:08:06 +03:00
|
|
|
EXPORT_SYMBOL(zfs_fsync);
|
2020-10-31 19:40:08 +03:00
|
|
|
EXPORT_SYMBOL(zfs_holey);
|
2020-10-22 00:08:06 +03:00
|
|
|
EXPORT_SYMBOL(zfs_read);
|
|
|
|
EXPORT_SYMBOL(zfs_write);
|
|
|
|
EXPORT_SYMBOL(zfs_getsecattr);
|
|
|
|
EXPORT_SYMBOL(zfs_setsecattr);
|
2023-03-10 22:59:53 +03:00
|
|
|
EXPORT_SYMBOL(zfs_clone_range);
|
|
|
|
EXPORT_SYMBOL(zfs_clone_range_replay);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
Cleanup: 64-bit kernel module parameters should use fixed width types
Various module parameters such as `zfs_arc_max` were originally
`uint64_t` on OpenSolaris/Illumos, but were changed to `unsigned long`
for Linux compatibility because Linux's kernel default module parameter
implementation did not support 64-bit types on 32-bit platforms. This
caused problems when porting OpenZFS to Windows because its LLP64 memory
model made `unsigned long` a 32-bit type on 64-bit, which created the
undesireable situation that parameters that should accept 64-bit values
could not on 64-bit Windows.
Upon inspection, it turns out that the Linux kernel module parameter
interface is extensible, such that we are allowed to define our own
types. Rather than maintaining the original type change via hacks to to
continue shrinking module parameters on 32-bit Linux, we implement
support for 64-bit module parameters on Linux.
After doing a review of all 64-bit kernel parameters (found via the man
page and also proposed changes by Andrew Innes), the kernel module
parameters fell into a few groups:
Parameters that were originally 64-bit on Illumos:
* dbuf_cache_max_bytes
* dbuf_metadata_cache_max_bytes
* l2arc_feed_min_ms
* l2arc_feed_secs
* l2arc_headroom
* l2arc_headroom_boost
* l2arc_write_boost
* l2arc_write_max
* metaslab_aliquot
* metaslab_force_ganging
* zfetch_array_rd_sz
* zfs_arc_max
* zfs_arc_meta_limit
* zfs_arc_meta_min
* zfs_arc_min
* zfs_async_block_max_blocks
* zfs_condense_max_obsolete_bytes
* zfs_condense_min_mapping_bytes
* zfs_deadman_checktime_ms
* zfs_deadman_synctime_ms
* zfs_initialize_chunk_size
* zfs_initialize_value
* zfs_lua_max_instrlimit
* zfs_lua_max_memlimit
* zil_slog_bulk
Parameters that were originally 32-bit on Illumos:
* zfs_per_txg_dirty_frees_percent
Parameters that were originally `ssize_t` on Illumos:
* zfs_immediate_write_sz
Note that `ssize_t` is `int32_t` on 32-bit and `int64_t` on 64-bit. It
has been upgraded to 64-bit.
Parameters that were `long`/`unsigned long` because of Linux/FreeBSD
influence:
* l2arc_rebuild_blocks_min_l2size
* zfs_key_max_salt_uses
* zfs_max_log_walking
* zfs_max_logsm_summary_length
* zfs_metaslab_max_size_cache_sec
* zfs_min_metaslabs_to_flush
* zfs_multihost_interval
* zfs_unflushed_log_block_max
* zfs_unflushed_log_block_min
* zfs_unflushed_log_block_pct
* zfs_unflushed_max_mem_amt
* zfs_unflushed_max_mem_ppm
New parameters that do not exist in Illumos:
* l2arc_trim_ahead
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_arc_sys_free
* zfs_deadman_ziotime_ms
* zfs_delete_blocks
* zfs_history_output_max
* zfs_livelist_max_entries
* zfs_max_async_dedup_frees
* zfs_max_nvlist_src_size
* zfs_rebuild_max_segment
* zfs_rebuild_vdev_limit
* zfs_unflushed_log_txg_max
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
* zfs_vnops_read_chunk_size
* zvol_max_discard_blocks
Rather than clutter the lists with commentary, the module parameters
that need comments are repeated below.
A few parameters were defined in Linux/FreeBSD specific code, where the
use of ulong/long is not an issue for portability, so we leave them
alone:
* zfs_delete_blocks
* zfs_key_max_salt_uses
* zvol_max_discard_blocks
The documentation for a few parameters was found to be incorrect:
* zfs_deadman_checktime_ms - incorrectly documented as int
* zfs_delete_blocks - not documented as Linux only
* zfs_history_output_max - incorrectly documented as int
* zfs_vnops_read_chunk_size - incorrectly documented as long
* zvol_max_discard_blocks - incorrectly documented as ulong
The documentation for these has been fixed, alongside the changes to
document the switch to fixed width types.
In addition, several kernel module parameters were percentages or held
ashift values, so being 64-bit never made sense for them. They have been
downgraded to 32-bit:
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_per_txg_dirty_frees_percent
* zfs_unflushed_log_block_pct
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
Of special note are `zfs_vdev_max_auto_ashift` and
`zfs_vdev_min_auto_ashift`, which were already defined as `uint64_t`,
and passed to the kernel as `ulong`. This is inherently buggy on big
endian 32-bit Linux, since the values would not be written to the
correct locations. 32-bit FreeBSD was unaffected because its sysctl code
correctly treated this as a `uint64_t`.
Lastly, a code comment suggests that `zfs_arc_sys_free` is
Linux-specific, but there is nothing to indicate to me that it is
Linux-specific. Nothing was done about that.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Original-patch-by: Andrew Innes <andrew.c12@gmail.com>
Original-patch-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13984
Closes #14004
2022-10-03 22:06:54 +03:00
|
|
|
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, U64, ZMOD_RW,
|
2020-10-22 00:08:06 +03:00
|
|
|
"Bytes to read per chunk");
|
2024-02-06 03:44:45 +03:00
|
|
|
|
|
|
|
ZFS_MODULE_PARAM(zfs, zfs_, bclone_enabled, INT, ZMOD_RW,
|
|
|
|
"Enable block cloning");
|
|
|
|
|
|
|
|
ZFS_MODULE_PARAM(zfs, zfs_, bclone_wait_dirty, INT, ZMOD_RW,
|
|
|
|
"Wait for dirty blocks when cloning");
|
Adding Direct IO Support
Adding O_DIRECT support to ZFS to bypass the ARC for writes/reads.
O_DIRECT support in ZFS will always ensure there is coherency between
buffered and O_DIRECT IO requests. This ensures that all IO requests,
whether buffered or direct, will see the same file contents at all
times. Just as in other FS's , O_DIRECT does not imply O_SYNC. While
data is written directly to VDEV disks, metadata will not be synced
until the associated TXG is synced.
For both O_DIRECT read and write request the offset and request sizes,
at a minimum, must be PAGE_SIZE aligned. In the event they are not,
then EINVAL is returned unless the direct property is set to always (see
below).
For O_DIRECT writes:
The request also must be block aligned (recordsize) or the write
request will take the normal (buffered) write path. In the event that
request is block aligned and a cached copy of the buffer in the ARC,
then it will be discarded from the ARC forcing all further reads to
retrieve the data from disk.
For O_DIRECT reads:
The only alignment restrictions are PAGE_SIZE alignment. In the event
that the requested data is in buffered (in the ARC) it will just be
copied from the ARC into the user buffer.
For both O_DIRECT writes and reads the O_DIRECT flag will be ignored in
the event that file contents are mmap'ed. In this case, all requests
that are at least PAGE_SIZE aligned will just fall back to the buffered
paths. If the request however is not PAGE_SIZE aligned, EINVAL will
be returned as always regardless if the file's contents are mmap'ed.
Since O_DIRECT writes go through the normal ZIO pipeline, the
following operations are supported just as with normal buffered writes:
Checksum
Compression
Encryption
Erasure Coding
There is one caveat for the data integrity of O_DIRECT writes that is
distinct for each of the OS's supported by ZFS.
FreeBSD - FreeBSD is able to place user pages under write protection so
any data in the user buffers and written directly down to the
VDEV disks is guaranteed to not change. There is no concern
with data integrity and O_DIRECT writes.
Linux - Linux is not able to place anonymous user pages under write
protection. Because of this, if the user decides to manipulate
the page contents while the write operation is occurring, data
integrity can not be guaranteed. However, there is a module
parameter `zfs_vdev_direct_write_verify` that controls the
if a O_DIRECT writes that can occur to a top-level VDEV before
a checksum verify is run before the contents of the I/O buffer
are committed to disk. In the event of a checksum verification
failure the write will return EIO. The number of O_DIRECT write
checksum verification errors can be observed by doing
`zpool status -d`, which will list all verification errors that
have occurred on a top-level VDEV. Along with `zpool status`, a
ZED event will be issues as `dio_verify` when a checksum
verification error occurs.
ZVOLs and dedup is not currently supported with Direct I/O.
A new dataset property `direct` has been added with the following 3
allowable values:
disabled - Accepts O_DIRECT flag, but silently ignores it and treats
the request as a buffered IO request.
standard - Follows the alignment restrictions outlined above for
write/read IO requests when the O_DIRECT flag is used.
always - Treats every write/read IO request as though it passed
O_DIRECT and will do O_DIRECT if the alignment restrictions
are met otherwise will redirect through the ARC. This
property will not allow a request to fail.
There is also a module parameter zfs_dio_enabled that can be used to
force all reads and writes through the ARC. By setting this module
parameter to 0, it mimics as if the direct dataset property is set to
disabled.
Reviewed-by: Brian Behlendorf <behlendorf@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Atkinson <batkinson@lanl.gov>
Co-authored-by: Mark Maybee <mark.maybee@delphix.com>
Co-authored-by: Matt Macy <mmacy@FreeBSD.org>
Co-authored-by: Brian Behlendorf <behlendorf@llnl.gov>
Closes #10018
2024-09-14 23:47:59 +03:00
|
|
|
|
|
|
|
ZFS_MODULE_PARAM(zfs, zfs_, dio_enabled, INT, ZMOD_RW,
|
|
|
|
"Enable Direct I/O");
|