Fix issue in receive_object() during reallocation

When receiving an object to a previously allocated interior slot
the new object should be "allocated" by setting DMU_NEW_OBJECT,
not "reallocated" with dnode_reallocate().  For resilience verify
the slot is free as required in case the stream is malformed.

Add a test case to generate more realistic incremental send streams
that force reallocation to occur during the receive.

Reviewed-by: Olaf Faaland <faaland1@llnl.gov>
Reviewed-by: Tom Caputi <tcaputi@datto.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #8067 
Closes #8614
This commit is contained in:
Brian Behlendorf 2019-04-12 14:28:04 -07:00 committed by GitHub
parent 3fa93bb8d3
commit b92f5d9f82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 218 additions and 18 deletions

View File

@ -1253,7 +1253,12 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* earlier in the stream.
*/
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
object = drro->drr_object;
if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
object = DMU_NEW_OBJECT;
} else {
/* object is free and we are about to allocate a new one */
object = DMU_NEW_OBJECT;

View File

@ -805,7 +805,7 @@ tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send_encrypted_files', 'send_encrypted_heirarchy',
'send_encrypted_props', 'send_freeobjects', 'send_realloc_dnode_size',
'send_holds', 'send_hole_birth', 'send_mixed_raw',
'send_realloc_files', 'send_holds', 'send_hole_birth', 'send_mixed_raw',
'send-wDR_encrypted_zvol']
tags = ['functional', 'rsend']

View File

@ -41,6 +41,7 @@ dist_pkgdata_SCRIPTS = \
send-cpL_varied_recsize.ksh \
send_freeobjects.ksh \
send_realloc_dnode_size.ksh \
send_realloc_files.ksh \
send_holds.ksh \
send_hole_birth.ksh \
send_mixed_raw.ksh \

View File

@ -457,6 +457,103 @@ function rm_files
echo Removed $nfiles files of random sizes up to $maxsize bytes
}
#
# Simulate a random set of operations which could be reasonably expected
# to occur on an average filesystem.
#
# $1 Number of files to modify
# $2 Maximum file size
# $3 File system to modify the file on
#
function churn_files
{
nfiles=$1
maxsize=$2
fs=$3
#
# Remove roughly half of the files in order to make it more
# likely that a dnode will be reallocated.
#
for ((i=0; i<$nfiles; i=i+1)); do
file_name="/$fs/file-$i"
if [[ -e $file_name ]]; then
if [ $((RANDOM % 2)) -eq 0 ]; then
rm $file_name || \
log_fail "Failed to remove $file_name"
fi
fi
done
#
# Remount the filesystem to simulate normal usage. This resets
# the last allocated object id allowing for new objects to be
# reallocated in the locations of previously freed objects.
#
log_must zfs unmount $fs
log_must zfs mount $fs
for i in {0..$nfiles}; do
file_name="/$fs/file-$i"
file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1))
#
# When the file exists modify it in one of five ways to
# simulate normal usage:
# - (20%) Remove and set and extended attribute on the file
# - (20%) Overwrite the existing file
# - (20%) Truncate the existing file to a random length
# - (20%) Truncate the existing file to zero length
# - (20%) Remove the file
#
# Otherwise create the missing file. 20% of the created
# files will be small and use embedded block pointers, the
# remainder with have random sizes up to the maximum size.
# Three extended attributes are attached to all of the files.
#
if [[ -e $file_name ]]; then
value=$((RANDOM % 5))
if [ $value -eq 0 ]; then
attrname="testattr$((RANDOM % 3))"
attr -qr $attrname $file_name || \
log_fail "Failed to remove $attrname"
attr -qs $attrname -V TestValue $file_name || \
log_fail "Failed to set $attrname"
elif [ $value -eq 1 ]; then
dd if=/dev/urandom of=$file_name \
bs=$file_size count=1 >/dev/null 2>&1 || \
log_fail "Failed to overwrite $file_name"
elif [ $value -eq 2 ]; then
truncate -s $file_size $file_name || \
log_fail "Failed to truncate $file_name"
elif [ $value -eq 3 ]; then
truncate -s 0 $file_name || \
log_fail "Failed to truncate $file_name"
else
rm $file_name || \
log_fail "Failed to remove $file_name"
fi
else
if [ $((RANDOM % 5)) -eq 0 ]; then
file_size=$((($RANDOM % 64) + 1))
fi
dd if=/dev/urandom of=$file_name \
bs=$file_size count=1 >/dev/null 2>&1 || \
log_fail "Failed to create $file_name"
for j in {0..2}; do
attrname="testattr$j"
attr -qs $attrname -V TestValue $file_name || \
log_fail "Failed to set $attrname"
done
fi
done
return 0
}
#
# Mess up file contents
#
@ -677,3 +774,22 @@ function resume_cleanup
cleanup_pool $POOL2
rm -f /$sendpool/initial.zsend /$sendpool/incremental.zsend
}
# Randomly set the property to one of the enumerated values.
function rand_set_prop
{
typeset dtst=$1
typeset prop=$2
shift 2
typeset value=$(random_get $@)
log_must eval "zfs set $prop='$value' $dtst"
}
# Generate a recursive checksum of a filesystems contents. Only file
# data is included in the checksum (no meta data, or xattrs).
function recursive_cksum
{
find $1 -type f -exec sha256sum {} \; | \
sort -k 2 | awk '{ print $1 }' | sha256sum
}

View File

@ -44,16 +44,6 @@
verify_runnable "global"
function rand_set_prop
{
typeset dtst=$1
typeset prop=$2
shift 2
typeset value=$(random_get $@)
log_must eval "zfs set $prop='$value' $dtst"
}
function edited_prop
{
typeset behaviour=$1

View File

@ -57,12 +57,6 @@ function cleanup
}
log_onexit cleanup
function recursive_cksum
{
find $1 -type f -exec sha256sum {} \; | \
sort -k 2 | awk '{ print $1 }' | sha256sum
}
log_assert "Verify 'zfs send -w' works with many different file layouts"
typeset keyfile=/$TESTPOOL/pkey

View File

@ -0,0 +1,94 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify incremental receive properly handles reallocation.
#
# Strategy:
# 1. Create a pool containing an encrypted filesystem.
# 2. Use 'zfs send -wp' to perform a raw send of the initial filesystem.
# 3. Repeat the followings steps N times to verify raw incremental receives.
# a) Randomly change several key dataset properties.
# b) Modify the contents of the filesystem such that dnode reallocation
# is likely during the 'zfs receive', and receive_object() exercises
# as much of its functionality as possible.
# c) Create a new snapshot and generate an raw incremental stream.
# d) Receive the raw incremental stream and verify the received contents.
# e) Destroy the incremental stream and old snapshot.
#
log_assert "Verify incremental receive handles reallocation"
function cleanup
{
rm -f $BACKDIR/fs@*
destroy_dataset $POOL/fs "-rR"
destroy_dataset $POOL/newfs "-rR"
}
log_onexit cleanup
log_must zfs create $POOL/fs
last_snap=1
log_must zfs snapshot $POOL/fs@snap${last_snap}
log_must eval "zfs send $POOL/fs@snap${last_snap} >$BACKDIR/fs@snap${last_snap}"
log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap${last_snap}"
# Set atime=off to prevent the recursive_cksum from modifying newfs.
log_must zfs set atime=off $POOL/newfs
for i in {1..5}; do
# Randomly modify several dataset properties in order to generate
# more interesting incremental send streams.
rand_set_prop $POOL/fs checksum "off" "fletcher4" "sha256"
rand_set_prop $POOL/fs compression "off" "lzjb" "gzip" "lz4"
rand_set_prop $POOL/fs recordsize "32K" "128K"
rand_set_prop $POOL/fs dnodesize "legacy" "auto" "4k"
rand_set_prop $POOL/fs xattr "on" "sa"
# Churn the filesystem in such a way that we're likely to be both
# allocating and reallocating objects in the incremental stream.
log_must churn_files 1000 524288 $POOL/fs
expected_cksum=$(recursive_cksum /$fs)
# Create a snapshot and use it to send an incremental stream.
this_snap=$((last_snap + 1))
log_must zfs snapshot $POOL/fs@snap${this_snap}
log_must eval "zfs send -i $POOL/fs@snap${last_snap} \
$POOL/fs@snap${this_snap} > $BACKDIR/fs@snap${this_snap}"
# Receive the incremental stream and verify the received contents.
log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap${this_snap}"
actual_cksum=$(recursive_cksum /$POOL/newfs)
if [[ "$expected_cksum" != "$actual_cksum" ]]; then
log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
fi
# Destroy the incremental stream and old snapshot.
rm -f $BACKDIR/fs@snap${last_snap}
log_must zfs destroy $POOL/fs@snap${last_snap}
log_must zfs destroy $POOL/newfs@snap${last_snap}
last_snap=$this_snap
done
log_pass "Verify incremental receive handles dnode reallocation"