2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2020-10-22 00:08:06 +03:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
|
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
|
|
|
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
|
|
|
|
* Copyright 2017 Nexenta Systems, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Portions Copyright 2007 Jeremy Teo */
|
|
|
|
/* Portions Copyright 2010 Robert Milkowski */
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/sysmacros.h>
|
|
|
|
#include <sys/vfs.h>
|
2021-02-21 07:16:50 +03:00
|
|
|
#include <sys/uio_impl.h>
|
2020-10-22 00:08:06 +03:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/kmem.h>
|
|
|
|
#include <sys/cmn_err.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/zfs_dir.h>
|
|
|
|
#include <sys/zfs_acl.h>
|
|
|
|
#include <sys/zfs_ioctl.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/dmu.h>
|
|
|
|
#include <sys/dmu_objset.h>
|
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/txg.h>
|
|
|
|
#include <sys/dbuf.h>
|
|
|
|
#include <sys/policy.h>
|
|
|
|
#include <sys/zfs_vnops.h>
|
|
|
|
#include <sys/zfs_quota.h>
|
2020-11-02 23:07:07 +03:00
|
|
|
#include <sys/zfs_vfsops.h>
|
|
|
|
#include <sys/zfs_znode.h>
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
|
|
|
|
static ulong_t zfs_fsync_sync_cnt = 4;
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
|
|
|
|
{
|
2022-09-16 23:36:47 +03:00
|
|
|
int error = 0;
|
2020-10-22 00:08:06 +03:00
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
|
2022-10-28 01:28:03 +03:00
|
|
|
(void) tsd_set(zfs_fsyncer_key, (void *)(uintptr_t)zfs_fsync_sync_cnt);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
goto out;
|
2022-05-03 23:23:26 +03:00
|
|
|
atomic_inc_32(&zp->z_sync_writes_cnt);
|
2020-10-22 00:08:06 +03:00
|
|
|
zil_commit(zfsvfs->z_log, zp->z_id);
|
2022-05-03 23:23:26 +03:00
|
|
|
atomic_dec_32(&zp->z_sync_writes_cnt);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
2022-09-16 23:36:47 +03:00
|
|
|
out:
|
2020-10-22 00:08:06 +03:00
|
|
|
tsd_set(zfs_fsyncer_key, NULL);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
|
|
|
|
/*
|
|
|
|
* Lseek support for finding holes (cmd == SEEK_HOLE) and
|
|
|
|
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
|
|
|
|
{
|
2021-11-08 00:27:44 +03:00
|
|
|
zfs_locked_range_t *lr;
|
2020-10-31 19:40:08 +03:00
|
|
|
uint64_t noff = (uint64_t)*off; /* new offset */
|
|
|
|
uint64_t file_sz;
|
|
|
|
int error;
|
|
|
|
boolean_t hole;
|
|
|
|
|
|
|
|
file_sz = zp->z_size;
|
|
|
|
if (noff >= file_sz) {
|
|
|
|
return (SET_ERROR(ENXIO));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd == F_SEEK_HOLE)
|
|
|
|
hole = B_TRUE;
|
|
|
|
else
|
|
|
|
hole = B_FALSE;
|
|
|
|
|
2021-11-08 00:27:44 +03:00
|
|
|
/* Flush any mmap()'d data to disk */
|
|
|
|
if (zn_has_cached_data(zp))
|
|
|
|
zn_flush_cached_data(zp, B_FALSE);
|
|
|
|
|
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, file_sz, RL_READER);
|
2020-10-31 19:40:08 +03:00
|
|
|
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
|
2021-11-08 00:27:44 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
if (error == ESRCH)
|
|
|
|
return (SET_ERROR(ENXIO));
|
|
|
|
|
2021-11-08 00:27:44 +03:00
|
|
|
/* File was dirty, so fall back to using generic logic */
|
2020-10-31 19:40:08 +03:00
|
|
|
if (error == EBUSY) {
|
|
|
|
if (hole)
|
|
|
|
*off = file_sz;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could find a hole that begins after the logical end-of-file,
|
|
|
|
* because dmu_offset_next() only works on whole blocks. If the
|
|
|
|
* EOF falls mid-block, then indicate that the "virtual hole"
|
|
|
|
* at the end of the file begins at the logical EOF, rather than
|
|
|
|
* at the end of the last block.
|
|
|
|
*/
|
|
|
|
if (noff > file_sz) {
|
|
|
|
ASSERT(hole);
|
|
|
|
noff = file_sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (noff < *off)
|
|
|
|
return (error);
|
|
|
|
*off = noff;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
error = zfs_holey_common(zp, cmd, off);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-31 19:40:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* SEEK_HOLE && SEEK_DATA */
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-31 19:40:08 +03:00
|
|
|
|
|
|
|
if (flag & V_ACE_MASK)
|
2022-11-08 21:28:56 +03:00
|
|
|
#if defined(__linux__)
|
|
|
|
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
|
|
|
|
kcred->user_ns);
|
|
|
|
#else
|
|
|
|
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
|
|
|
|
NULL);
|
|
|
|
#endif
|
2020-10-31 19:40:08 +03:00
|
|
|
else
|
2022-11-08 21:28:56 +03:00
|
|
|
#if defined(__linux__)
|
|
|
|
error = zfs_zaccess_rwx(zp, mode, flag, cr, kcred->user_ns);
|
|
|
|
#else
|
2022-10-19 21:17:09 +03:00
|
|
|
error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
|
2022-11-08 21:28:56 +03:00
|
|
|
#endif
|
2020-10-31 19:40:08 +03:00
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-31 19:40:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Cleanup: 64-bit kernel module parameters should use fixed width types
Various module parameters such as `zfs_arc_max` were originally
`uint64_t` on OpenSolaris/Illumos, but were changed to `unsigned long`
for Linux compatibility because Linux's kernel default module parameter
implementation did not support 64-bit types on 32-bit platforms. This
caused problems when porting OpenZFS to Windows because its LLP64 memory
model made `unsigned long` a 32-bit type on 64-bit, which created the
undesireable situation that parameters that should accept 64-bit values
could not on 64-bit Windows.
Upon inspection, it turns out that the Linux kernel module parameter
interface is extensible, such that we are allowed to define our own
types. Rather than maintaining the original type change via hacks to to
continue shrinking module parameters on 32-bit Linux, we implement
support for 64-bit module parameters on Linux.
After doing a review of all 64-bit kernel parameters (found via the man
page and also proposed changes by Andrew Innes), the kernel module
parameters fell into a few groups:
Parameters that were originally 64-bit on Illumos:
* dbuf_cache_max_bytes
* dbuf_metadata_cache_max_bytes
* l2arc_feed_min_ms
* l2arc_feed_secs
* l2arc_headroom
* l2arc_headroom_boost
* l2arc_write_boost
* l2arc_write_max
* metaslab_aliquot
* metaslab_force_ganging
* zfetch_array_rd_sz
* zfs_arc_max
* zfs_arc_meta_limit
* zfs_arc_meta_min
* zfs_arc_min
* zfs_async_block_max_blocks
* zfs_condense_max_obsolete_bytes
* zfs_condense_min_mapping_bytes
* zfs_deadman_checktime_ms
* zfs_deadman_synctime_ms
* zfs_initialize_chunk_size
* zfs_initialize_value
* zfs_lua_max_instrlimit
* zfs_lua_max_memlimit
* zil_slog_bulk
Parameters that were originally 32-bit on Illumos:
* zfs_per_txg_dirty_frees_percent
Parameters that were originally `ssize_t` on Illumos:
* zfs_immediate_write_sz
Note that `ssize_t` is `int32_t` on 32-bit and `int64_t` on 64-bit. It
has been upgraded to 64-bit.
Parameters that were `long`/`unsigned long` because of Linux/FreeBSD
influence:
* l2arc_rebuild_blocks_min_l2size
* zfs_key_max_salt_uses
* zfs_max_log_walking
* zfs_max_logsm_summary_length
* zfs_metaslab_max_size_cache_sec
* zfs_min_metaslabs_to_flush
* zfs_multihost_interval
* zfs_unflushed_log_block_max
* zfs_unflushed_log_block_min
* zfs_unflushed_log_block_pct
* zfs_unflushed_max_mem_amt
* zfs_unflushed_max_mem_ppm
New parameters that do not exist in Illumos:
* l2arc_trim_ahead
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_arc_sys_free
* zfs_deadman_ziotime_ms
* zfs_delete_blocks
* zfs_history_output_max
* zfs_livelist_max_entries
* zfs_max_async_dedup_frees
* zfs_max_nvlist_src_size
* zfs_rebuild_max_segment
* zfs_rebuild_vdev_limit
* zfs_unflushed_log_txg_max
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
* zfs_vnops_read_chunk_size
* zvol_max_discard_blocks
Rather than clutter the lists with commentary, the module parameters
that need comments are repeated below.
A few parameters were defined in Linux/FreeBSD specific code, where the
use of ulong/long is not an issue for portability, so we leave them
alone:
* zfs_delete_blocks
* zfs_key_max_salt_uses
* zvol_max_discard_blocks
The documentation for a few parameters was found to be incorrect:
* zfs_deadman_checktime_ms - incorrectly documented as int
* zfs_delete_blocks - not documented as Linux only
* zfs_history_output_max - incorrectly documented as int
* zfs_vnops_read_chunk_size - incorrectly documented as long
* zvol_max_discard_blocks - incorrectly documented as ulong
The documentation for these has been fixed, alongside the changes to
document the switch to fixed width types.
In addition, several kernel module parameters were percentages or held
ashift values, so being 64-bit never made sense for them. They have been
downgraded to 32-bit:
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_per_txg_dirty_frees_percent
* zfs_unflushed_log_block_pct
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
Of special note are `zfs_vdev_max_auto_ashift` and
`zfs_vdev_min_auto_ashift`, which were already defined as `uint64_t`,
and passed to the kernel as `ulong`. This is inherently buggy on big
endian 32-bit Linux, since the values would not be written to the
correct locations. 32-bit FreeBSD was unaffected because its sysctl code
correctly treated this as a `uint64_t`.
Lastly, a code comment suggests that `zfs_arc_sys_free` is
Linux-specific, but there is nothing to indicate to me that it is
Linux-specific. Nothing was done about that.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Original-patch-by: Andrew Innes <andrew.c12@gmail.com>
Original-patch-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13984
Closes #14004
2022-10-03 22:06:54 +03:00
|
|
|
static uint64_t zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read bytes from specified file into supplied buffer.
|
|
|
|
*
|
|
|
|
* IN: zp - inode of file to be read from.
|
|
|
|
* uio - structure supplying read location, range info,
|
|
|
|
* and return buffer.
|
|
|
|
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
|
|
|
|
* O_DIRECT flag; used to bypass page cache.
|
|
|
|
* cr - credentials of caller.
|
|
|
|
*
|
|
|
|
* OUT: uio - updated offset and range, buffer filled.
|
|
|
|
*
|
|
|
|
* RETURN: 0 on success, error code on failure.
|
|
|
|
*
|
|
|
|
* Side Effects:
|
|
|
|
* inode - atime updated if byte count > 0
|
|
|
|
*/
|
|
|
|
int
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
2020-10-22 00:08:06 +03:00
|
|
|
{
|
2022-02-16 04:38:43 +03:00
|
|
|
(void) cr;
|
2020-10-22 00:08:06 +03:00
|
|
|
int error = 0;
|
|
|
|
boolean_t frsync = B_FALSE;
|
|
|
|
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EACCES));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't copy out anything useful for directories. */
|
|
|
|
if (Z_ISDIR(ZTOTYPE(zp))) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EISDIR));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate file offset
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_offset(uio) < (offset_t)0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fasttrack empty reads
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_resid(uio) == 0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef FRSYNC
|
|
|
|
/*
|
|
|
|
* If we're in FRSYNC mode, sync out this znode before reading it.
|
|
|
|
* Only do this for non-snapshots.
|
|
|
|
*
|
|
|
|
* Some platforms do not support FRSYNC and instead map it
|
|
|
|
* to O_SYNC, which results in unnecessary calls to zil_commit. We
|
|
|
|
* only honor FRSYNC requests on platforms which support it.
|
|
|
|
*/
|
|
|
|
frsync = !!(ioflag & FRSYNC);
|
|
|
|
#endif
|
|
|
|
if (zfsvfs->z_log &&
|
|
|
|
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
|
|
|
|
zil_commit(zfsvfs->z_log, zp->z_id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the range against changes.
|
|
|
|
*/
|
|
|
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are reading past end-of-file we can skip
|
|
|
|
* to the end; but we might still need to set atime.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_offset(uio) >= zp->z_size) {
|
2020-10-22 00:08:06 +03:00
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-01-21 08:27:30 +03:00
|
|
|
ASSERT(zfs_uio_offset(uio) < zp->z_size);
|
2021-11-04 16:49:40 +03:00
|
|
|
#if defined(__linux__)
|
2021-09-20 20:30:50 +03:00
|
|
|
ssize_t start_offset = zfs_uio_offset(uio);
|
2021-11-04 16:49:40 +03:00
|
|
|
#endif
|
2021-01-21 08:27:30 +03:00
|
|
|
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
|
2020-10-22 00:08:06 +03:00
|
|
|
ssize_t start_resid = n;
|
|
|
|
|
|
|
|
while (n > 0) {
|
|
|
|
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
|
2021-01-21 08:27:30 +03:00
|
|
|
P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
|
2020-10-22 00:08:06 +03:00
|
|
|
#ifdef UIO_NOCOPY
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
|
2020-10-22 00:08:06 +03:00
|
|
|
error = mappedread_sf(zp, nbytes, uio);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
|
|
|
|
error = mappedread(zp, nbytes, uio);
|
|
|
|
} else {
|
|
|
|
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
|
|
|
|
uio, nbytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/* convert checksum errors into IO errors */
|
|
|
|
if (error == ECKSUM)
|
|
|
|
error = SET_ERROR(EIO);
|
2021-11-04 16:49:40 +03:00
|
|
|
|
|
|
|
#if defined(__linux__)
|
2021-09-20 20:30:50 +03:00
|
|
|
/*
|
|
|
|
* if we actually read some bytes, bubbling EFAULT
|
2021-11-04 16:49:40 +03:00
|
|
|
* up to become EAGAIN isn't what we want here...
|
|
|
|
*
|
|
|
|
* ...on Linux, at least. On FBSD, doing this breaks.
|
2021-09-20 20:30:50 +03:00
|
|
|
*/
|
|
|
|
if (error == EFAULT &&
|
|
|
|
(zfs_uio_offset(uio) - start_offset) != 0)
|
|
|
|
error = 0;
|
2021-11-04 16:49:40 +03:00
|
|
|
#endif
|
2020-10-22 00:08:06 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
n -= nbytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t nread = start_resid - n;
|
|
|
|
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
|
|
|
|
task_io_account_read(nread);
|
|
|
|
out:
|
|
|
|
zfs_rangelock_exit(lr);
|
|
|
|
|
|
|
|
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
static void
|
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
|
|
|
|
uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
zilog_t *zilog = zfsvfs->z_log;
|
|
|
|
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
|
|
|
|
|
|
|
|
ASSERT(clear_setid_bits_txgp != NULL);
|
|
|
|
ASSERT(tx != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear Set-UID/Set-GID bits on successful write if not
|
|
|
|
* privileged and at least one of the execute bits is set.
|
|
|
|
*
|
|
|
|
* It would be nice to do this after all writes have
|
|
|
|
* been done, but that would still expose the ISUID/ISGID
|
|
|
|
* to another app after the partial write is committed.
|
|
|
|
*
|
|
|
|
* Note: we don't call zfs_fuid_map_id() here because
|
|
|
|
* user 0 is not an ephemeral uid.
|
|
|
|
*/
|
|
|
|
mutex_enter(&zp->z_acl_lock);
|
|
|
|
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
|
|
|
|
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
|
|
|
|
secpolicy_vnode_setid_retain(zp, cr,
|
|
|
|
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
|
|
|
|
uint64_t newmode;
|
|
|
|
|
|
|
|
zp->z_mode &= ~(S_ISUID | S_ISGID);
|
|
|
|
newmode = zp->z_mode;
|
|
|
|
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
|
|
|
|
(void *)&newmode, sizeof (uint64_t), tx);
|
|
|
|
|
|
|
|
mutex_exit(&zp->z_acl_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure SUID/SGID bits will be removed when we replay the
|
|
|
|
* log. If the setid bits are keep coming back, don't log more
|
|
|
|
* than one TX_SETATTR per transaction group.
|
|
|
|
*/
|
|
|
|
if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
|
2022-02-25 16:26:54 +03:00
|
|
|
vattr_t va = {0};
|
2022-02-04 01:37:57 +03:00
|
|
|
|
2022-04-05 23:02:17 +03:00
|
|
|
va.va_mask = ATTR_MODE;
|
2022-02-04 01:37:57 +03:00
|
|
|
va.va_nodeid = zp->z_id;
|
|
|
|
va.va_mode = newmode;
|
2022-04-05 23:02:17 +03:00
|
|
|
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
|
|
|
|
ATTR_MODE, NULL);
|
2022-02-04 01:37:57 +03:00
|
|
|
*clear_setid_bits_txgp = dmu_tx_get_txg(tx);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mutex_exit(&zp->z_acl_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Write the bytes to a file.
|
|
|
|
*
|
|
|
|
* IN: zp - znode of file to be written to.
|
|
|
|
* uio - structure supplying write location, range info,
|
|
|
|
* and data buffer.
|
|
|
|
* ioflag - O_APPEND flag set if in append mode.
|
|
|
|
* O_DIRECT flag; used to bypass page cache.
|
|
|
|
* cr - credentials of caller.
|
|
|
|
*
|
|
|
|
* OUT: uio - updated offset and range.
|
|
|
|
*
|
|
|
|
* RETURN: 0 if success
|
|
|
|
* error code if failure
|
|
|
|
*
|
|
|
|
* Timestamps:
|
|
|
|
* ip - ctime|mtime updated if byte count > 0
|
|
|
|
*/
|
|
|
|
int
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
2020-10-22 00:08:06 +03:00
|
|
|
{
|
2022-01-21 22:54:05 +03:00
|
|
|
int error = 0, error1;
|
2021-01-21 08:27:30 +03:00
|
|
|
ssize_t start_resid = zfs_uio_resid(uio);
|
2022-02-04 01:37:57 +03:00
|
|
|
uint64_t clear_setid_bits_txg = 0;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fasttrack empty write
|
|
|
|
*/
|
|
|
|
ssize_t n = start_resid;
|
|
|
|
if (n == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
sa_bulk_attr_t bulk[4];
|
|
|
|
int count = 0;
|
|
|
|
uint64_t mtime[2], ctime[2];
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
|
|
|
|
&zp->z_size, 8);
|
|
|
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
|
|
|
|
&zp->z_pflags, 8);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callers might not be able to detect properly that we are read-only,
|
|
|
|
* so check it explicitly here.
|
|
|
|
*/
|
|
|
|
if (zfs_is_readonly(zfsvfs)) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EROFS));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-03-07 20:31:52 +03:00
|
|
|
* If immutable or not appending then return EPERM.
|
|
|
|
* Intentionally allow ZFS_READONLY through here.
|
|
|
|
* See zfs_zaccess_common()
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2021-03-07 20:31:52 +03:00
|
|
|
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
|
2020-10-22 00:08:06 +03:00
|
|
|
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
|
2021-01-21 08:27:30 +03:00
|
|
|
(zfs_uio_offset(uio) < zp->z_size))) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EPERM));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate file offset
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
if (woff < 0) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
2020-11-05 02:10:12 +03:00
|
|
|
const uint64_t max_blksz = zfsvfs->z_max_blksz;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pre-fault the pages to ensure slow (eg NFS) pages
|
|
|
|
* don't hold up txg.
|
|
|
|
* Skip this if uio contains loaned arc_buf.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EFAULT));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If in append mode, set the io offset pointer to eof.
|
|
|
|
*/
|
|
|
|
zfs_locked_range_t *lr;
|
|
|
|
if (ioflag & O_APPEND) {
|
|
|
|
/*
|
|
|
|
* Obtain an appending range lock to guarantee file append
|
|
|
|
* semantics. We reset the write offset once we have the lock.
|
|
|
|
*/
|
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
|
|
|
|
woff = lr->lr_offset;
|
|
|
|
if (lr->lr_length == UINT64_MAX) {
|
|
|
|
/*
|
|
|
|
* We overlocked the file because this write will cause
|
|
|
|
* the file block size to increase.
|
|
|
|
* Note that zp_size cannot change with this lock held.
|
|
|
|
*/
|
|
|
|
woff = zp->z_size;
|
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_setoffset(uio, woff);
|
2020-10-22 00:08:06 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Note that if the file block size will change as a result of
|
|
|
|
* this write, then this range lock will lock the entire file
|
|
|
|
* so that we can re-write the block safely.
|
|
|
|
*/
|
|
|
|
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
|
|
|
|
}
|
|
|
|
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zn_rlimit_fsize(zp, uio)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-11-14 21:16:26 +03:00
|
|
|
return (SET_ERROR(EFBIG));
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
2020-11-10 00:01:56 +03:00
|
|
|
const rlim64_t limit = MAXOFFSET_T;
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
if (woff >= limit) {
|
|
|
|
zfs_rangelock_exit(lr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (SET_ERROR(EFBIG));
|
|
|
|
}
|
|
|
|
|
2020-11-10 00:01:56 +03:00
|
|
|
if (n > limit - woff)
|
2020-10-22 00:08:06 +03:00
|
|
|
n = limit - woff;
|
|
|
|
|
|
|
|
uint64_t end_size = MAX(zp->z_size, woff + n);
|
|
|
|
zilog_t *zilog = zfsvfs->z_log;
|
|
|
|
|
2020-11-05 01:10:13 +03:00
|
|
|
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
|
|
|
|
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
|
|
|
|
const uint64_t projid = zp->z_projid;
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* Write the file in reasonable size chunks. Each chunk is written
|
|
|
|
* in a separate transaction; this keeps the intent log records small
|
|
|
|
* and allows us to do more fine-grained space accounting.
|
|
|
|
*/
|
|
|
|
while (n > 0) {
|
2021-01-21 08:27:30 +03:00
|
|
|
woff = zfs_uio_offset(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2020-11-05 01:10:13 +03:00
|
|
|
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
|
|
|
|
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
|
|
|
|
(projid != ZFS_DEFAULT_PROJID &&
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
|
2020-11-05 01:10:13 +03:00
|
|
|
projid))) {
|
2020-10-22 00:08:06 +03:00
|
|
|
error = SET_ERROR(EDQUOT);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
arc_buf_t *abuf = NULL;
|
|
|
|
if (n >= max_blksz && woff >= zp->z_size &&
|
|
|
|
P2PHASE(woff, max_blksz) == 0 &&
|
|
|
|
zp->z_blksz == max_blksz) {
|
|
|
|
/*
|
|
|
|
* This write covers a full block. "Borrow" a buffer
|
|
|
|
* from the dmu so that we can fill it before we enter
|
|
|
|
* a transaction. This avoids the possibility of
|
|
|
|
* holding up the transaction if the data copy hangs
|
|
|
|
* up on a pagefault (e.g., from an NFS server mapping).
|
|
|
|
*/
|
|
|
|
size_t cbytes;
|
|
|
|
|
|
|
|
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
|
|
|
|
max_blksz);
|
|
|
|
ASSERT(abuf != NULL);
|
|
|
|
ASSERT(arc_buf_size(abuf) == max_blksz);
|
2021-01-21 08:27:30 +03:00
|
|
|
if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
|
2020-10-22 00:08:06 +03:00
|
|
|
UIO_WRITE, uio, &cbytes))) {
|
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
break;
|
|
|
|
}
|
2020-12-18 19:48:26 +03:00
|
|
|
ASSERT3S(cbytes, ==, max_blksz);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start a transaction.
|
|
|
|
*/
|
|
|
|
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
|
|
|
|
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
|
|
|
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
|
|
|
|
DB_DNODE_ENTER(db);
|
|
|
|
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
|
|
|
|
MIN(n, max_blksz));
|
|
|
|
DB_DNODE_EXIT(db);
|
|
|
|
zfs_sa_upgrade_txholds(tx, zp);
|
|
|
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
|
|
|
if (error) {
|
|
|
|
dmu_tx_abort(tx);
|
|
|
|
if (abuf != NULL)
|
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* NB: We must call zfs_clear_setid_bits_if_necessary before
|
|
|
|
* committing the transaction!
|
|
|
|
*/
|
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
|
|
|
* If rangelock_enter() over-locked we grow the blocksize
|
|
|
|
* and then reduce the lock range. This will only happen
|
|
|
|
* on the first iteration since rangelock_reduce() will
|
|
|
|
* shrink down lr_length to the appropriate size.
|
|
|
|
*/
|
|
|
|
if (lr->lr_length == UINT64_MAX) {
|
|
|
|
uint64_t new_blksz;
|
|
|
|
|
|
|
|
if (zp->z_blksz > max_blksz) {
|
|
|
|
/*
|
|
|
|
* File's blocksize is already larger than the
|
|
|
|
* "recordsize" property. Only let it grow to
|
|
|
|
* the next power of 2.
|
|
|
|
*/
|
|
|
|
ASSERT(!ISP2(zp->z_blksz));
|
|
|
|
new_blksz = MIN(end_size,
|
|
|
|
1 << highbit64(zp->z_blksz));
|
|
|
|
} else {
|
|
|
|
new_blksz = MIN(end_size, max_blksz);
|
|
|
|
}
|
|
|
|
zfs_grow_blocksize(zp, new_blksz, tx);
|
|
|
|
zfs_rangelock_reduce(lr, woff, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX - should we really limit each write to z_max_blksz?
|
|
|
|
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
|
|
|
|
*/
|
2020-11-19 02:06:59 +03:00
|
|
|
const ssize_t nbytes =
|
|
|
|
MIN(n, max_blksz - P2PHASE(woff, max_blksz));
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
ssize_t tx_bytes;
|
|
|
|
if (abuf == NULL) {
|
2021-01-21 08:27:30 +03:00
|
|
|
tx_bytes = zfs_uio_resid(uio);
|
|
|
|
zfs_uio_fault_disable(uio, B_TRUE);
|
2020-10-22 00:08:06 +03:00
|
|
|
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
|
|
|
|
uio, nbytes, tx);
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_fault_disable(uio, B_FALSE);
|
2020-10-22 00:08:06 +03:00
|
|
|
#ifdef __linux__
|
|
|
|
if (error == EFAULT) {
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
/*
|
|
|
|
* Account for partial writes before
|
|
|
|
* continuing the loop.
|
|
|
|
* Update needs to occur before the next
|
2021-01-21 08:27:30 +03:00
|
|
|
* zfs_uio_prefaultpages, or prefaultpages may
|
2020-10-22 00:08:06 +03:00
|
|
|
* error, and we may break the loop early.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (tx_bytes != zfs_uio_resid(uio))
|
|
|
|
n -= tx_bytes - zfs_uio_resid(uio);
|
|
|
|
if (zfs_uio_prefaultpages(MIN(n, max_blksz),
|
|
|
|
uio)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2022-01-21 22:54:05 +03:00
|
|
|
/*
|
|
|
|
* On FreeBSD, EFAULT should be propagated back to the
|
|
|
|
* VFS, which will handle faulting and will retry.
|
|
|
|
*/
|
|
|
|
if (error != 0 && error != EFAULT) {
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
break;
|
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
tx_bytes -= zfs_uio_resid(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
} else {
|
2020-11-19 02:06:59 +03:00
|
|
|
/* Implied by abuf != NULL: */
|
|
|
|
ASSERT3S(n, >=, max_blksz);
|
|
|
|
ASSERT0(P2PHASE(woff, max_blksz));
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
2020-11-19 02:06:59 +03:00
|
|
|
* We can simplify nbytes to MIN(n, max_blksz) since
|
|
|
|
* P2PHASE(woff, max_blksz) is 0, and knowing
|
|
|
|
* n >= max_blksz lets us simplify further:
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2020-11-19 02:06:59 +03:00
|
|
|
ASSERT3S(nbytes, ==, max_blksz);
|
2020-10-22 00:08:06 +03:00
|
|
|
/*
|
2020-11-19 02:06:59 +03:00
|
|
|
* Thus, we're writing a full block at a block-aligned
|
|
|
|
* offset and extending the file past EOF.
|
|
|
|
*
|
|
|
|
* dmu_assign_arcbuf_by_dbuf() will directly assign the
|
|
|
|
* arc buffer to a dbuf.
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2020-11-19 02:06:59 +03:00
|
|
|
error = dmu_assign_arcbuf_by_dbuf(
|
|
|
|
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
|
|
|
|
if (error != 0) {
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* XXX This might not be necessary if
|
|
|
|
* dmu_assign_arcbuf_by_dbuf is guaranteed
|
|
|
|
* to be atomic.
|
|
|
|
*/
|
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
|
|
|
|
cr, &clear_setid_bits_txg, tx);
|
2020-11-19 02:06:59 +03:00
|
|
|
dmu_return_arcbuf(abuf);
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
break;
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
2021-01-21 08:27:30 +03:00
|
|
|
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
|
|
|
|
zfs_uioskip(uio, nbytes);
|
2020-11-19 02:06:59 +03:00
|
|
|
tx_bytes = nbytes;
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
if (tx_bytes && zn_has_cached_data(zp) &&
|
|
|
|
!(ioflag & O_DIRECT)) {
|
2020-11-05 00:47:14 +03:00
|
|
|
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we made no progress, we're done. If we made even
|
|
|
|
* partial progress, update the znode and ZIL accordingly.
|
|
|
|
*/
|
|
|
|
if (tx_bytes == 0) {
|
|
|
|
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
|
|
|
|
(void *)&zp->z_size, sizeof (uint64_t), tx);
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
ASSERT(error != 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
|
|
|
|
&clear_setid_bits_txg, tx);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the file size (zp_size) if it has changed;
|
|
|
|
* account for possible concurrent updates.
|
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
|
2020-10-22 00:08:06 +03:00
|
|
|
(void) atomic_cas_64(&zp->z_size, end_size,
|
2021-01-21 08:27:30 +03:00
|
|
|
zfs_uio_offset(uio));
|
2022-01-21 22:54:05 +03:00
|
|
|
ASSERT(error == 0 || error == EFAULT);
|
2020-10-22 00:08:06 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are replaying and eof is non zero then force
|
|
|
|
* the file size to the specified eof. Note, there's no
|
|
|
|
* concurrency during replay.
|
|
|
|
*/
|
|
|
|
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
|
|
|
|
zp->z_size = zfsvfs->z_replay_eof;
|
|
|
|
|
2022-01-21 22:54:05 +03:00
|
|
|
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
|
|
|
if (error1 != 0)
|
|
|
|
/* Avoid clobbering EFAULT. */
|
|
|
|
error = error1;
|
2020-10-22 00:08:06 +03:00
|
|
|
|
2022-02-04 01:37:57 +03:00
|
|
|
/*
|
|
|
|
* NB: During replay, the TX_SETATTR record logged by
|
|
|
|
* zfs_clear_setid_bits_if_necessary must precede any of
|
|
|
|
* the TX_WRITE records logged here.
|
|
|
|
*/
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
|
|
|
|
NULL, NULL);
|
2022-02-04 01:37:57 +03:00
|
|
|
|
2020-10-22 00:08:06 +03:00
|
|
|
dmu_tx_commit(tx);
|
|
|
|
|
|
|
|
if (error != 0)
|
|
|
|
break;
|
2020-12-18 19:48:26 +03:00
|
|
|
ASSERT3S(tx_bytes, ==, nbytes);
|
2020-10-22 00:08:06 +03:00
|
|
|
n -= nbytes;
|
|
|
|
|
|
|
|
if (n > 0) {
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
|
2020-11-14 21:16:26 +03:00
|
|
|
error = SET_ERROR(EFAULT);
|
2020-10-22 00:08:06 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-09 22:17:29 +03:00
|
|
|
zfs_znode_update_vfs(zp);
|
2020-10-22 00:08:06 +03:00
|
|
|
zfs_rangelock_exit(lr);
|
|
|
|
|
|
|
|
/*
|
2020-11-14 21:16:26 +03:00
|
|
|
* If we're in replay mode, or we made no progress, or the
|
|
|
|
* uio data is inaccessible return an error. Otherwise, it's
|
|
|
|
* at least a partial write, so it's successful.
|
2020-10-22 00:08:06 +03:00
|
|
|
*/
|
2021-01-21 08:27:30 +03:00
|
|
|
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
|
2020-11-14 21:16:26 +03:00
|
|
|
error == EFAULT) {
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ioflag & (O_SYNC | O_DSYNC) ||
|
|
|
|
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
|
|
|
zil_commit(zilog, zp->z_id);
|
|
|
|
|
2021-01-21 08:27:30 +03:00
|
|
|
const int64_t nwritten = start_resid - zfs_uio_resid(uio);
|
2020-10-22 00:08:06 +03:00
|
|
|
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
|
|
|
|
task_io_account_write(nwritten);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
|
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
|
|
|
int error;
|
|
|
|
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
|
|
|
|
zilog_t *zilog = zfsvfs->z_log;
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
|
|
|
return (error);
|
2020-10-22 00:08:06 +03:00
|
|
|
|
|
|
|
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
|
|
|
|
|
|
|
|
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
|
|
|
zil_commit(zilog, 0);
|
|
|
|
|
2022-09-16 23:36:47 +03:00
|
|
|
zfs_exit(zfsvfs, FTAG);
|
2020-10-22 00:08:06 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-11-02 23:07:07 +03:00
|
|
|
#ifdef ZFS_DEBUG
|
|
|
|
static int zil_fault_io = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void zfs_get_done(zgd_t *zgd, int error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get data to generate a TX_WRITE intent log record.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-20 08:53:31 +03:00
|
|
|
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
|
|
|
|
struct lwb *lwb, zio_t *zio)
|
2020-11-02 23:07:07 +03:00
|
|
|
{
|
|
|
|
zfsvfs_t *zfsvfs = arg;
|
|
|
|
objset_t *os = zfsvfs->z_os;
|
|
|
|
znode_t *zp;
|
|
|
|
uint64_t object = lr->lr_foid;
|
|
|
|
uint64_t offset = lr->lr_offset;
|
|
|
|
uint64_t size = lr->lr_length;
|
|
|
|
dmu_buf_t *db;
|
|
|
|
zgd_t *zgd;
|
|
|
|
int error = 0;
|
2021-03-20 08:53:31 +03:00
|
|
|
uint64_t zp_gen;
|
2020-11-02 23:07:07 +03:00
|
|
|
|
|
|
|
ASSERT3P(lwb, !=, NULL);
|
|
|
|
ASSERT3P(zio, !=, NULL);
|
|
|
|
ASSERT3U(size, !=, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Nothing to do if the file has been removed
|
|
|
|
*/
|
|
|
|
if (zfs_zget(zfsvfs, object, &zp) != 0)
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
if (zp->z_unlinked) {
|
|
|
|
/*
|
|
|
|
* Release the vnode asynchronously as we currently have the
|
|
|
|
* txg stopped from syncing.
|
|
|
|
*/
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
}
|
2021-03-20 08:53:31 +03:00
|
|
|
/* check if generation number matches */
|
|
|
|
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
|
|
|
|
sizeof (zp_gen)) != 0) {
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(EIO));
|
|
|
|
}
|
|
|
|
if (zp_gen != gen) {
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
return (SET_ERROR(ENOENT));
|
|
|
|
}
|
2020-11-02 23:07:07 +03:00
|
|
|
|
|
|
|
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
|
|
|
|
zgd->zgd_lwb = lwb;
|
|
|
|
zgd->zgd_private = zp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write records come in two flavors: immediate and indirect.
|
|
|
|
* For small writes it's cheaper to store the data with the
|
|
|
|
* log record (immediate); for large writes it's cheaper to
|
|
|
|
* sync the data and get a pointer to it (indirect) so that
|
|
|
|
* we don't have to write the data twice.
|
|
|
|
*/
|
|
|
|
if (buf != NULL) { /* immediate write */
|
|
|
|
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
|
|
|
|
offset, size, RL_READER);
|
|
|
|
/* test for truncation needs to be done while range locked */
|
|
|
|
if (offset >= zp->z_size) {
|
|
|
|
error = SET_ERROR(ENOENT);
|
|
|
|
} else {
|
|
|
|
error = dmu_read(os, object, offset, size, buf,
|
|
|
|
DMU_READ_NO_PREFETCH);
|
|
|
|
}
|
|
|
|
ASSERT(error == 0 || error == ENOENT);
|
|
|
|
} else { /* indirect write */
|
|
|
|
/*
|
|
|
|
* Have to lock the whole block to ensure when it's
|
|
|
|
* written out and its checksum is being calculated
|
|
|
|
* that no one can change the data. We need to re-check
|
|
|
|
* blocksize after we get the lock in case it's changed!
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
uint64_t blkoff;
|
|
|
|
size = zp->z_blksz;
|
|
|
|
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
|
|
|
|
offset -= blkoff;
|
|
|
|
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
|
|
|
|
offset, size, RL_READER);
|
|
|
|
if (zp->z_blksz == size)
|
|
|
|
break;
|
|
|
|
offset += blkoff;
|
|
|
|
zfs_rangelock_exit(zgd->zgd_lr);
|
|
|
|
}
|
|
|
|
/* test for truncation needs to be done while range locked */
|
|
|
|
if (lr->lr_offset >= zp->z_size)
|
|
|
|
error = SET_ERROR(ENOENT);
|
|
|
|
#ifdef ZFS_DEBUG
|
|
|
|
if (zil_fault_io) {
|
|
|
|
error = SET_ERROR(EIO);
|
|
|
|
zil_fault_io = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (error == 0)
|
|
|
|
error = dmu_buf_hold(os, object, offset, zgd, &db,
|
|
|
|
DMU_READ_NO_PREFETCH);
|
|
|
|
|
|
|
|
if (error == 0) {
|
|
|
|
blkptr_t *bp = &lr->lr_blkptr;
|
|
|
|
|
|
|
|
zgd->zgd_db = db;
|
|
|
|
zgd->zgd_bp = bp;
|
|
|
|
|
|
|
|
ASSERT(db->db_offset == offset);
|
|
|
|
ASSERT(db->db_size == size);
|
|
|
|
|
|
|
|
error = dmu_sync(zio, lr->lr_common.lrc_txg,
|
|
|
|
zfs_get_done, zgd);
|
|
|
|
ASSERT(error || lr->lr_length <= size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On success, we need to wait for the write I/O
|
|
|
|
* initiated by dmu_sync() to complete before we can
|
|
|
|
* release this dbuf. We will finish everything up
|
|
|
|
* in the zfs_get_done() callback.
|
|
|
|
*/
|
|
|
|
if (error == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
if (error == EALREADY) {
|
|
|
|
lr->lr_common.lrc_txtype = TX_WRITE2;
|
|
|
|
/*
|
|
|
|
* TX_WRITE2 relies on the data previously
|
|
|
|
* written by the TX_WRITE that caused
|
|
|
|
* EALREADY. We zero out the BP because
|
|
|
|
* it is the old, currently-on-disk BP.
|
|
|
|
*/
|
|
|
|
zgd->zgd_bp = NULL;
|
|
|
|
BP_ZERO(bp);
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_get_done(zgd, error);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
zfs_get_done(zgd_t *zgd, int error)
|
|
|
|
{
|
2022-02-16 04:38:43 +03:00
|
|
|
(void) error;
|
2020-11-02 23:07:07 +03:00
|
|
|
znode_t *zp = zgd->zgd_private;
|
|
|
|
|
|
|
|
if (zgd->zgd_db)
|
|
|
|
dmu_buf_rele(zgd->zgd_db, zgd);
|
|
|
|
|
|
|
|
zfs_rangelock_exit(zgd->zgd_lr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the vnode asynchronously as we currently have the
|
|
|
|
* txg stopped from syncing.
|
|
|
|
*/
|
|
|
|
zfs_zrele_async(zp);
|
|
|
|
|
|
|
|
kmem_free(zgd, sizeof (zgd_t));
|
|
|
|
}
|
|
|
|
|
2020-10-31 19:40:08 +03:00
|
|
|
EXPORT_SYMBOL(zfs_access);
|
2020-10-22 00:08:06 +03:00
|
|
|
EXPORT_SYMBOL(zfs_fsync);
|
2020-10-31 19:40:08 +03:00
|
|
|
EXPORT_SYMBOL(zfs_holey);
|
2020-10-22 00:08:06 +03:00
|
|
|
EXPORT_SYMBOL(zfs_read);
|
|
|
|
EXPORT_SYMBOL(zfs_write);
|
|
|
|
EXPORT_SYMBOL(zfs_getsecattr);
|
|
|
|
EXPORT_SYMBOL(zfs_setsecattr);
|
|
|
|
|
Cleanup: 64-bit kernel module parameters should use fixed width types
Various module parameters such as `zfs_arc_max` were originally
`uint64_t` on OpenSolaris/Illumos, but were changed to `unsigned long`
for Linux compatibility because Linux's kernel default module parameter
implementation did not support 64-bit types on 32-bit platforms. This
caused problems when porting OpenZFS to Windows because its LLP64 memory
model made `unsigned long` a 32-bit type on 64-bit, which created the
undesireable situation that parameters that should accept 64-bit values
could not on 64-bit Windows.
Upon inspection, it turns out that the Linux kernel module parameter
interface is extensible, such that we are allowed to define our own
types. Rather than maintaining the original type change via hacks to to
continue shrinking module parameters on 32-bit Linux, we implement
support for 64-bit module parameters on Linux.
After doing a review of all 64-bit kernel parameters (found via the man
page and also proposed changes by Andrew Innes), the kernel module
parameters fell into a few groups:
Parameters that were originally 64-bit on Illumos:
* dbuf_cache_max_bytes
* dbuf_metadata_cache_max_bytes
* l2arc_feed_min_ms
* l2arc_feed_secs
* l2arc_headroom
* l2arc_headroom_boost
* l2arc_write_boost
* l2arc_write_max
* metaslab_aliquot
* metaslab_force_ganging
* zfetch_array_rd_sz
* zfs_arc_max
* zfs_arc_meta_limit
* zfs_arc_meta_min
* zfs_arc_min
* zfs_async_block_max_blocks
* zfs_condense_max_obsolete_bytes
* zfs_condense_min_mapping_bytes
* zfs_deadman_checktime_ms
* zfs_deadman_synctime_ms
* zfs_initialize_chunk_size
* zfs_initialize_value
* zfs_lua_max_instrlimit
* zfs_lua_max_memlimit
* zil_slog_bulk
Parameters that were originally 32-bit on Illumos:
* zfs_per_txg_dirty_frees_percent
Parameters that were originally `ssize_t` on Illumos:
* zfs_immediate_write_sz
Note that `ssize_t` is `int32_t` on 32-bit and `int64_t` on 64-bit. It
has been upgraded to 64-bit.
Parameters that were `long`/`unsigned long` because of Linux/FreeBSD
influence:
* l2arc_rebuild_blocks_min_l2size
* zfs_key_max_salt_uses
* zfs_max_log_walking
* zfs_max_logsm_summary_length
* zfs_metaslab_max_size_cache_sec
* zfs_min_metaslabs_to_flush
* zfs_multihost_interval
* zfs_unflushed_log_block_max
* zfs_unflushed_log_block_min
* zfs_unflushed_log_block_pct
* zfs_unflushed_max_mem_amt
* zfs_unflushed_max_mem_ppm
New parameters that do not exist in Illumos:
* l2arc_trim_ahead
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_arc_sys_free
* zfs_deadman_ziotime_ms
* zfs_delete_blocks
* zfs_history_output_max
* zfs_livelist_max_entries
* zfs_max_async_dedup_frees
* zfs_max_nvlist_src_size
* zfs_rebuild_max_segment
* zfs_rebuild_vdev_limit
* zfs_unflushed_log_txg_max
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
* zfs_vnops_read_chunk_size
* zvol_max_discard_blocks
Rather than clutter the lists with commentary, the module parameters
that need comments are repeated below.
A few parameters were defined in Linux/FreeBSD specific code, where the
use of ulong/long is not an issue for portability, so we leave them
alone:
* zfs_delete_blocks
* zfs_key_max_salt_uses
* zvol_max_discard_blocks
The documentation for a few parameters was found to be incorrect:
* zfs_deadman_checktime_ms - incorrectly documented as int
* zfs_delete_blocks - not documented as Linux only
* zfs_history_output_max - incorrectly documented as int
* zfs_vnops_read_chunk_size - incorrectly documented as long
* zvol_max_discard_blocks - incorrectly documented as ulong
The documentation for these has been fixed, alongside the changes to
document the switch to fixed width types.
In addition, several kernel module parameters were percentages or held
ashift values, so being 64-bit never made sense for them. They have been
downgraded to 32-bit:
* vdev_file_logical_ashift
* vdev_file_physical_ashift
* zfs_arc_dnode_limit_percent
* zfs_arc_dnode_reduce_percent
* zfs_arc_meta_limit_percent
* zfs_per_txg_dirty_frees_percent
* zfs_unflushed_log_block_pct
* zfs_vdev_max_auto_ashift
* zfs_vdev_min_auto_ashift
Of special note are `zfs_vdev_max_auto_ashift` and
`zfs_vdev_min_auto_ashift`, which were already defined as `uint64_t`,
and passed to the kernel as `ulong`. This is inherently buggy on big
endian 32-bit Linux, since the values would not be written to the
correct locations. 32-bit FreeBSD was unaffected because its sysctl code
correctly treated this as a `uint64_t`.
Lastly, a code comment suggests that `zfs_arc_sys_free` is
Linux-specific, but there is nothing to indicate to me that it is
Linux-specific. Nothing was done about that.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Original-patch-by: Andrew Innes <andrew.c12@gmail.com>
Original-patch-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13984
Closes #14004
2022-10-03 22:06:54 +03:00
|
|
|
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, U64, ZMOD_RW,
|
2020-10-22 00:08:06 +03:00
|
|
|
"Bytes to read per chunk");
|