2020-04-14 21:36:28 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2020-09-18 22:13:47 +03:00
|
|
|
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
|
2020-04-14 21:36:28 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/spa.h>
|
2020-07-01 19:10:08 +03:00
|
|
|
#include <sys/file.h>
|
2020-04-14 21:36:28 +03:00
|
|
|
#include <sys/vdev_file.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/fm/fs/zfs.h>
|
|
|
|
#include <sys/abd.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Virtual device vector for files.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static taskq_t *vdev_file_taskq;
|
|
|
|
|
2020-09-18 22:13:47 +03:00
|
|
|
unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT;
|
|
|
|
unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT;
|
|
|
|
|
2020-04-14 21:36:28 +03:00
|
|
|
void
|
|
|
|
vdev_file_init(void)
|
|
|
|
{
|
|
|
|
vdev_file_taskq = taskq_create("z_vdev_file", MAX(max_ncpus, 16),
|
|
|
|
minclsyspri, max_ncpus, INT_MAX, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vdev_file_fini(void)
|
|
|
|
{
|
|
|
|
taskq_destroy(vdev_file_taskq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_file_hold(vdev_t *vd)
|
|
|
|
{
|
|
|
|
ASSERT(vd->vdev_path != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_file_rele(vdev_t *vd)
|
|
|
|
{
|
|
|
|
ASSERT(vd->vdev_path != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static mode_t
|
|
|
|
vdev_file_open_mode(spa_mode_t spa_mode)
|
|
|
|
{
|
|
|
|
mode_t mode = 0;
|
|
|
|
|
|
|
|
if ((spa_mode & SPA_MODE_READ) && (spa_mode & SPA_MODE_WRITE)) {
|
|
|
|
mode = O_RDWR;
|
|
|
|
} else if (spa_mode & SPA_MODE_READ) {
|
|
|
|
mode = O_RDONLY;
|
|
|
|
} else if (spa_mode & SPA_MODE_WRITE) {
|
|
|
|
mode = O_WRONLY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (mode | O_LARGEFILE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
|
2020-08-21 22:53:17 +03:00
|
|
|
uint64_t *logical_ashift, uint64_t *physical_ashift)
|
2020-04-14 21:36:28 +03:00
|
|
|
{
|
|
|
|
vdev_file_t *vf;
|
|
|
|
zfs_file_t *fp;
|
|
|
|
zfs_file_attr_t zfa;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rotational optimizations only make sense on block devices.
|
|
|
|
*/
|
|
|
|
vd->vdev_nonrot = B_TRUE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow TRIM on file based vdevs. This may not always be supported,
|
|
|
|
* since it depends on your kernel version and underlying filesystem
|
|
|
|
* type but it is always safe to attempt.
|
|
|
|
*/
|
|
|
|
vd->vdev_has_trim = B_TRUE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable secure TRIM on file based vdevs. There is no way to
|
|
|
|
* request this behavior from the underlying filesystem.
|
|
|
|
*/
|
|
|
|
vd->vdev_has_securetrim = B_FALSE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must have a pathname, and it must be absolute.
|
|
|
|
*/
|
|
|
|
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
|
|
|
|
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
|
|
|
|
return (SET_ERROR(EINVAL));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reopen the device if it's not currently open. Otherwise,
|
|
|
|
* just update the physical size of the device.
|
|
|
|
*/
|
|
|
|
if (vd->vdev_tsd != NULL) {
|
|
|
|
ASSERT(vd->vdev_reopening);
|
|
|
|
vf = vd->vdev_tsd;
|
|
|
|
goto skip_open;
|
|
|
|
}
|
|
|
|
|
|
|
|
vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We always open the files from the root of the global zone, even if
|
|
|
|
* we're in a local zone. If the user has gotten to this point, the
|
|
|
|
* administrator has already decided that the pool should be available
|
|
|
|
* to local zone users, so the underlying devices should be as well.
|
|
|
|
*/
|
|
|
|
ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');
|
|
|
|
|
|
|
|
error = zfs_file_open(vd->vdev_path,
|
|
|
|
vdev_file_open_mode(spa_mode(vd->vdev_spa)), 0, &fp);
|
|
|
|
if (error) {
|
|
|
|
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
vf->vf_file = fp;
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
|
|
|
/*
|
|
|
|
* Make sure it's a regular file.
|
|
|
|
*/
|
|
|
|
if (zfs_file_getattr(fp, &zfa)) {
|
|
|
|
return (SET_ERROR(ENODEV));
|
|
|
|
}
|
|
|
|
if (!S_ISREG(zfa.zfa_mode)) {
|
|
|
|
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
|
|
|
|
return (SET_ERROR(ENODEV));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
skip_open:
|
|
|
|
|
|
|
|
error = zfs_file_getattr(vf->vf_file, &zfa);
|
|
|
|
if (error) {
|
|
|
|
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
*max_psize = *psize = zfa.zfa_size;
|
2020-09-18 22:13:47 +03:00
|
|
|
*logical_ashift = vdev_file_logical_ashift;
|
|
|
|
*physical_ashift = vdev_file_physical_ashift;
|
2020-04-14 21:36:28 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_file_close(vdev_t *vd)
|
|
|
|
{
|
|
|
|
vdev_file_t *vf = vd->vdev_tsd;
|
|
|
|
|
|
|
|
if (vd->vdev_reopening || vf == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (vf->vf_file != NULL) {
|
|
|
|
zfs_file_close(vf->vf_file);
|
|
|
|
}
|
|
|
|
|
|
|
|
vd->vdev_delayed_close = B_FALSE;
|
|
|
|
kmem_free(vf, sizeof (vdev_file_t));
|
|
|
|
vd->vdev_tsd = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implements the interrupt side for file vdev types. This routine will be
|
|
|
|
* called when the I/O completes allowing us to transfer the I/O to the
|
|
|
|
* interrupt taskqs. For consistency, the code structure mimics disk vdev
|
|
|
|
* types.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vdev_file_io_intr(zio_t *zio)
|
|
|
|
{
|
|
|
|
zio_delay_interrupt(zio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_file_io_strategy(void *arg)
|
|
|
|
{
|
|
|
|
zio_t *zio = arg;
|
|
|
|
vdev_t *vd = zio->io_vd;
|
|
|
|
vdev_file_t *vf;
|
|
|
|
void *buf;
|
|
|
|
ssize_t resid;
|
|
|
|
loff_t off;
|
|
|
|
ssize_t size;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
off = zio->io_offset;
|
|
|
|
size = zio->io_size;
|
|
|
|
resid = 0;
|
|
|
|
|
|
|
|
vf = vd->vdev_tsd;
|
|
|
|
|
|
|
|
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
|
|
|
|
if (zio->io_type == ZIO_TYPE_READ) {
|
|
|
|
buf = abd_borrow_buf(zio->io_abd, zio->io_size);
|
|
|
|
err = zfs_file_pread(vf->vf_file, buf, size, off, &resid);
|
|
|
|
abd_return_buf_copy(zio->io_abd, buf, size);
|
|
|
|
} else {
|
|
|
|
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
|
|
|
|
err = zfs_file_pwrite(vf->vf_file, buf, size, off, &resid);
|
|
|
|
abd_return_buf(zio->io_abd, buf, size);
|
|
|
|
}
|
|
|
|
if (resid != 0 && zio->io_error == 0)
|
|
|
|
zio->io_error = ENOSPC;
|
|
|
|
|
|
|
|
vdev_file_io_intr(zio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_file_io_start(zio_t *zio)
|
|
|
|
{
|
|
|
|
vdev_t *vd = zio->io_vd;
|
|
|
|
vdev_file_t *vf = vd->vdev_tsd;
|
|
|
|
|
|
|
|
if (zio->io_type == ZIO_TYPE_IOCTL) {
|
|
|
|
/* XXPOLICY */
|
|
|
|
if (!vdev_readable(vd)) {
|
|
|
|
zio->io_error = SET_ERROR(ENXIO);
|
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (zio->io_cmd) {
|
|
|
|
case DKIOCFLUSHWRITECACHE:
|
|
|
|
zio->io_error = zfs_file_fsync(vf->vf_file,
|
|
|
|
O_SYNC|O_DSYNC);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
|
|
|
}
|
|
|
|
|
|
|
|
zio_execute(zio);
|
|
|
|
return;
|
|
|
|
} else if (zio->io_type == ZIO_TYPE_TRIM) {
|
|
|
|
#ifdef notyet
|
|
|
|
int mode = 0;
|
|
|
|
|
|
|
|
ASSERT3U(zio->io_size, !=, 0);
|
|
|
|
|
|
|
|
/* XXX FreeBSD has no fallocate routine in file ops */
|
|
|
|
zio->io_error = zfs_file_fallocate(vf->vf_file,
|
|
|
|
mode, zio->io_offset, zio->io_size);
|
|
|
|
#endif
|
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
|
|
|
zio_execute(zio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
|
|
|
|
zio->io_target_timestamp = zio_handle_io_delay(zio);
|
|
|
|
|
|
|
|
VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
|
|
|
|
TQ_SLEEP), !=, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
|
|
|
vdev_file_io_done(zio_t *zio)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
vdev_ops_t vdev_file_ops = {
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-14 00:51:51 +03:00
|
|
|
.vdev_op_init = NULL,
|
|
|
|
.vdev_op_fini = NULL,
|
|
|
|
.vdev_op_open = vdev_file_open,
|
|
|
|
.vdev_op_close = vdev_file_close,
|
|
|
|
.vdev_op_asize = vdev_default_asize,
|
|
|
|
.vdev_op_min_asize = vdev_default_min_asize,
|
|
|
|
.vdev_op_min_alloc = NULL,
|
|
|
|
.vdev_op_io_start = vdev_file_io_start,
|
|
|
|
.vdev_op_io_done = vdev_file_io_done,
|
|
|
|
.vdev_op_state_change = NULL,
|
|
|
|
.vdev_op_need_resilver = NULL,
|
|
|
|
.vdev_op_hold = vdev_file_hold,
|
|
|
|
.vdev_op_rele = vdev_file_rele,
|
|
|
|
.vdev_op_remap = NULL,
|
|
|
|
.vdev_op_xlate = vdev_default_xlate,
|
|
|
|
.vdev_op_rebuild_asize = NULL,
|
|
|
|
.vdev_op_metaslab_init = NULL,
|
|
|
|
.vdev_op_config_generate = NULL,
|
|
|
|
.vdev_op_nparity = NULL,
|
|
|
|
.vdev_op_ndisks = NULL,
|
|
|
|
.vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
|
|
|
|
.vdev_op_leaf = B_TRUE /* leaf vdev */
|
2020-04-14 21:36:28 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From userland we access disks just like files.
|
|
|
|
*/
|
|
|
|
#ifndef _KERNEL
|
|
|
|
|
|
|
|
vdev_ops_t vdev_disk_ops = {
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-14 00:51:51 +03:00
|
|
|
.vdev_op_init = NULL,
|
|
|
|
.vdev_op_fini = NULL,
|
|
|
|
.vdev_op_open = vdev_file_open,
|
|
|
|
.vdev_op_close = vdev_file_close,
|
|
|
|
.vdev_op_asize = vdev_default_asize,
|
|
|
|
.vdev_op_min_asize = vdev_default_min_asize,
|
|
|
|
.vdev_op_min_alloc = NULL,
|
|
|
|
.vdev_op_io_start = vdev_file_io_start,
|
|
|
|
.vdev_op_io_done = vdev_file_io_done,
|
|
|
|
.vdev_op_state_change = NULL,
|
|
|
|
.vdev_op_need_resilver = NULL,
|
|
|
|
.vdev_op_hold = vdev_file_hold,
|
|
|
|
.vdev_op_rele = vdev_file_rele,
|
|
|
|
.vdev_op_remap = NULL,
|
|
|
|
.vdev_op_xlate = vdev_default_xlate,
|
|
|
|
.vdev_op_rebuild_asize = NULL,
|
|
|
|
.vdev_op_metaslab_init = NULL,
|
|
|
|
.vdev_op_config_generate = NULL,
|
|
|
|
.vdev_op_nparity = NULL,
|
|
|
|
.vdev_op_ndisks = NULL,
|
|
|
|
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
|
|
|
|
.vdev_op_leaf = B_TRUE /* leaf vdev */
|
2020-04-14 21:36:28 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
2020-09-18 22:13:47 +03:00
|
|
|
|
|
|
|
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, logical_ashift, ULONG, ZMOD_RW,
|
|
|
|
"Logical ashift for file-based devices");
|
|
|
|
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, physical_ashift, ULONG, ZMOD_RW,
|
|
|
|
"Physical ashift for file-based devices");
|