2015-07-02 01:23:09 +03:00
|
|
|
#!/bin/ksh -p
|
|
|
|
#
|
|
|
|
# CDDL HEADER START
|
|
|
|
#
|
|
|
|
# The contents of this file are subject to the terms of the
|
|
|
|
# Common Development and Distribution License (the "License").
|
|
|
|
# You may not use this file except in compliance with the License.
|
|
|
|
#
|
|
|
|
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
# or http://www.opensolaris.org/os/licensing.
|
|
|
|
# See the License for the specific language governing permissions
|
|
|
|
# and limitations under the License.
|
|
|
|
#
|
|
|
|
# When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
# If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
# fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
# information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
#
|
|
|
|
# CDDL HEADER END
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
|
|
|
# Use is subject to license terms.
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
2017-04-06 03:18:22 +03:00
|
|
|
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
|
2020-07-03 21:05:50 +03:00
|
|
|
# Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
|
2015-07-02 01:23:09 +03:00
|
|
|
#
|
|
|
|
|
|
|
|
. $STF_SUITE/include/libtest.shlib
|
|
|
|
. $STF_SUITE/tests/functional/replacement/replacement.cfg
|
|
|
|
|
|
|
|
#
|
|
|
|
# DESCRIPTION:
|
|
|
|
# Attaching disks during I/O should pass for supported pools.
|
|
|
|
#
|
|
|
|
# STRATEGY:
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-14 00:51:51 +03:00
|
|
|
# 1. Create multidisk pools (stripe/mirror/raidz/draid) and
|
2015-07-02 01:23:09 +03:00
|
|
|
# start some random I/O
|
|
|
|
# 2. Attach a disk to the pool.
|
|
|
|
# 3. Verify the integrity of the file system and the resilvering.
|
|
|
|
#
|
2020-07-03 21:05:50 +03:00
|
|
|
# NOTE: Raidz does not support the sequential resilver (-s) option.
|
|
|
|
#
|
2015-07-02 01:23:09 +03:00
|
|
|
|
|
|
|
verify_runnable "global"
|
|
|
|
|
|
|
|
function cleanup
|
|
|
|
{
|
|
|
|
if [[ -n "$child_pids" ]]; then
|
2020-07-03 21:05:50 +03:00
|
|
|
for wait_pid in $child_pids; do
|
|
|
|
kill $wait_pid
|
2015-07-02 01:23:09 +03:00
|
|
|
done
|
|
|
|
fi
|
|
|
|
|
|
|
|
if poolexists $TESTPOOL1; then
|
|
|
|
destroy_pool $TESTPOOL1
|
|
|
|
fi
|
|
|
|
|
2017-04-06 03:18:22 +03:00
|
|
|
[[ -e $TESTDIR ]] && log_must rm -rf $TESTDIR/*
|
2015-07-02 01:23:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
log_assert "Replacing a disk during I/O completes."
|
|
|
|
|
|
|
|
options=""
|
|
|
|
options_display="default options"
|
|
|
|
|
|
|
|
log_onexit cleanup
|
|
|
|
|
|
|
|
[[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE "
|
|
|
|
|
|
|
|
[[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE "
|
|
|
|
|
|
|
|
[[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT "
|
|
|
|
|
|
|
|
[[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED "
|
|
|
|
|
|
|
|
[[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET "
|
|
|
|
|
|
|
|
options="$options -r "
|
|
|
|
|
|
|
|
[[ -n "$options" ]] && options_display=$options
|
|
|
|
|
|
|
|
child_pids=""
|
|
|
|
|
|
|
|
function attach_test
|
|
|
|
{
|
|
|
|
typeset -i iters=2
|
|
|
|
typeset -i index=0
|
|
|
|
typeset opt=$1
|
|
|
|
typeset disk1=$2
|
|
|
|
typeset disk2=$3
|
|
|
|
|
|
|
|
typeset i=0
|
|
|
|
while [[ $i -lt $iters ]]; do
|
2017-04-06 03:18:22 +03:00
|
|
|
log_note "Invoking file_trunc with: $options_display"
|
|
|
|
file_trunc $options $TESTDIR/$TESTFILE.$i &
|
2015-07-02 01:23:09 +03:00
|
|
|
typeset pid=$!
|
|
|
|
|
2017-04-06 03:18:22 +03:00
|
|
|
sleep 1
|
2015-07-02 01:23:09 +03:00
|
|
|
|
|
|
|
child_pids="$child_pids $pid"
|
|
|
|
((i = i + 1))
|
|
|
|
done
|
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
log_must zpool attach -sw $opt $TESTPOOL1 $disk1 $disk2
|
2015-07-02 01:23:09 +03:00
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
for wait_pid in $child_pids; do
|
2017-04-06 03:18:22 +03:00
|
|
|
kill $wait_pid
|
2015-07-02 01:23:09 +03:00
|
|
|
done
|
|
|
|
child_pids=""
|
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
log_must zpool export $TESTPOOL1
|
|
|
|
log_must zpool import -d $TESTDIR $TESTPOOL1
|
|
|
|
log_must zfs umount $TESTPOOL1/$TESTFS1
|
|
|
|
log_must zdb -cdui $TESTPOOL1/$TESTFS1
|
|
|
|
log_must zfs mount $TESTPOOL1/$TESTFS1
|
|
|
|
verify_pool $TESTPOOL1
|
2015-07-02 01:23:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
specials_list=""
|
|
|
|
i=0
|
2020-07-03 21:05:50 +03:00
|
|
|
while [[ $i != 3 ]]; do
|
|
|
|
truncate -s $MINVDEVSIZE $TESTDIR/$TESTFILE1.$i
|
2015-07-02 01:23:09 +03:00
|
|
|
specials_list="$specials_list $TESTDIR/$TESTFILE1.$i"
|
|
|
|
|
|
|
|
((i = i + 1))
|
|
|
|
done
|
|
|
|
|
|
|
|
#
|
|
|
|
# Create a replacement disk special file.
|
|
|
|
#
|
2020-07-03 21:05:50 +03:00
|
|
|
truncate -s $MINVDEVSIZE $TESTDIR/$REPLACEFILE
|
2015-07-02 01:23:09 +03:00
|
|
|
|
|
|
|
for op in "" "-f"; do
|
|
|
|
create_pool $TESTPOOL1 mirror $specials_list
|
2017-04-06 03:18:22 +03:00
|
|
|
log_must zfs create $TESTPOOL1/$TESTFS1
|
|
|
|
log_must zfs set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1
|
2015-07-02 01:23:09 +03:00
|
|
|
|
|
|
|
attach_test "$opt" $TESTDIR/$TESTFILE1.1 $TESTDIR/$REPLACEFILE
|
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
zpool iostat -v $TESTPOOL1 | grep "$REPLACEFILE"
|
2015-07-02 01:23:09 +03:00
|
|
|
if [[ $? -ne 0 ]]; then
|
|
|
|
log_fail "$REPLACEFILE is not present."
|
|
|
|
fi
|
|
|
|
|
|
|
|
destroy_pool $TESTPOOL1
|
|
|
|
done
|
|
|
|
|
|
|
|
log_note "Verify 'zpool attach' fails with non-mirrors."
|
|
|
|
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-14 00:51:51 +03:00
|
|
|
for type in "" "raidz" "raidz1" "draid" "draid1"; do
|
2015-07-02 01:23:09 +03:00
|
|
|
for op in "" "-f"; do
|
|
|
|
create_pool $TESTPOOL1 $type $specials_list
|
2017-04-06 03:18:22 +03:00
|
|
|
log_must zfs create $TESTPOOL1/$TESTFS1
|
|
|
|
log_must zfs set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1
|
2015-07-02 01:23:09 +03:00
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
log_mustnot zpool attach -s "$opt" $TESTDIR/$TESTFILE1.1 \
|
2015-07-02 01:23:09 +03:00
|
|
|
$TESTDIR/$REPLACEFILE
|
|
|
|
|
2020-07-03 21:05:50 +03:00
|
|
|
zpool iostat -v $TESTPOOL1 | grep "$REPLACEFILE"
|
2015-07-02 01:23:09 +03:00
|
|
|
if [[ $? -eq 0 ]]; then
|
2020-07-03 21:05:50 +03:00
|
|
|
log_fail "$REPLACEFILE should not be present."
|
2015-07-02 01:23:09 +03:00
|
|
|
fi
|
|
|
|
|
|
|
|
destroy_pool $TESTPOOL1
|
|
|
|
done
|
|
|
|
done
|
|
|
|
|
|
|
|
log_pass
|