2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2020-04-23 20:06:57 +03:00
|
|
|
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
|
2012-05-10 02:05:14 +04:00
|
|
|
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
|
2013-05-25 06:06:23 +04:00
|
|
|
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
|
2012-05-29 21:50:50 +04:00
|
|
|
* All rights reserved
|
2013-05-25 06:06:23 +04:00
|
|
|
* Copyright (c) 2013 Steven Hartland. All rights reserved.
|
2016-06-09 22:24:29 +03:00
|
|
|
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
|
2017-02-08 01:02:27 +03:00
|
|
|
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
|
2019-02-09 02:44:15 +03:00
|
|
|
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
|
2019-03-12 23:13:22 +03:00
|
|
|
* Copyright (c) 2019 Datto Inc.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <libintl.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <strings.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/mount.h>
|
2010-08-26 22:57:29 +04:00
|
|
|
#include <sys/mntent.h>
|
|
|
|
#include <sys/mnttab.h>
|
|
|
|
#include <sys/avl.h>
|
|
|
|
#include <sys/debug.h>
|
2015-03-11 21:24:46 +03:00
|
|
|
#include <sys/stat.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <umem.h>
|
2012-05-10 02:05:14 +04:00
|
|
|
#include <time.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
#include <libzfs.h>
|
2014-06-06 01:19:08 +04:00
|
|
|
#include <libzfs_core.h>
|
2018-11-05 22:22:33 +03:00
|
|
|
#include <libzutil.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
#include "zfs_namecheck.h"
|
|
|
|
#include "zfs_prop.h"
|
2010-05-29 00:45:14 +04:00
|
|
|
#include "zfs_fletcher.h"
|
2008-11-20 23:01:55 +03:00
|
|
|
#include "libzfs_impl.h"
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 20:39:55 +03:00
|
|
|
#include <cityhash.h>
|
2016-01-07 00:22:48 +03:00
|
|
|
#include <zlib.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#include <sys/zio_checksum.h>
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
#include <sys/dsl_crypt.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#include <sys/ddt.h>
|
2011-06-10 00:41:55 +04:00
|
|
|
#include <sys/socket.h>
|
2016-06-16 01:47:05 +03:00
|
|
|
#include <sys/sha2.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2015-12-22 04:31:57 +03:00
|
|
|
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
|
2020-04-23 20:06:57 +03:00
|
|
|
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
|
|
|
|
const char *, nvlist_t *);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
|
|
|
|
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
|
|
|
|
uint64_t num_redact_snaps, char *name);
|
2016-01-07 00:22:48 +03:00
|
|
|
static int guid_to_name(libzfs_handle_t *, const char *,
|
|
|
|
uint64_t, boolean_t, char *);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2012-05-10 02:05:14 +04:00
|
|
|
typedef struct progress_arg {
|
|
|
|
zfs_handle_t *pa_zhp;
|
|
|
|
int pa_fd;
|
|
|
|
boolean_t pa_parsable;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
boolean_t pa_estimate;
|
|
|
|
int pa_verbosity;
|
2012-05-10 02:05:14 +04:00
|
|
|
} progress_arg_t;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static int
|
2015-07-06 06:20:31 +03:00
|
|
|
dump_record(dmu_replay_record_t *drr, void *payload, int payload_len,
|
|
|
|
zio_cksum_t *zc, int outfd)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2015-07-06 06:20:31 +03:00
|
|
|
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
|
|
|
|
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
|
|
|
|
fletcher_4_incremental_native(drr,
|
|
|
|
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
|
|
|
|
if (drr->drr_type != DRR_BEGIN) {
|
|
|
|
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
|
|
|
|
drr_checksum.drr_checksum));
|
|
|
|
drr->drr_u.drr_checksum.drr_checksum = *zc;
|
|
|
|
}
|
|
|
|
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
|
|
|
|
sizeof (zio_cksum_t), zc);
|
|
|
|
if (write(outfd, drr, sizeof (*drr)) == -1)
|
|
|
|
return (errno);
|
|
|
|
if (payload_len != 0) {
|
|
|
|
fletcher_4_incremental_native(payload, payload_len, zc);
|
|
|
|
if (write(outfd, payload, payload_len) == -1)
|
|
|
|
return (errno);
|
|
|
|
}
|
|
|
|
return (0);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Routines for dealing with the AVL tree of fs-nvlists
|
|
|
|
*/
|
|
|
|
typedef struct fsavl_node {
|
|
|
|
avl_node_t fn_node;
|
|
|
|
nvlist_t *fn_nvfs;
|
|
|
|
char *fn_snapname;
|
|
|
|
uint64_t fn_guid;
|
|
|
|
} fsavl_node_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
fsavl_compare(const void *arg1, const void *arg2)
|
|
|
|
{
|
2016-08-27 21:12:53 +03:00
|
|
|
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
|
|
|
|
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
|
|
|
|
|
Reduce loaded range tree memory usage
This patch implements a new tree structure for ZFS, and uses it to
store range trees more efficiently.
The new structure is approximately a B-tree, though there are some
small differences from the usual characterizations. The tree has core
nodes and leaf nodes; each contain data elements, which the elements
in the core nodes acting as separators between its children. The
difference between core and leaf nodes is that the core nodes have an
array of children, while leaf nodes don't. Every node in the tree may
be only partially full; in most cases, they are all at least 50% full
(in terms of element count) except for the root node, which can be
less full. Underfull nodes will steal from their neighbors or merge to
remain full enough, while overfull nodes will split in two. The data
elements are contained in tree-controlled buffers; they are copied
into these on insertion, and overwritten on deletion. This means that
the elements are not independently allocated, which reduces overhead,
but also means they can't be shared between trees (and also that
pointers to them are only valid until a side-effectful tree operation
occurs). The overhead varies based on how dense the tree is, but is
usually on the order of about 50% of the element size; the per-node
overheads are very small, and so don't make a significant difference.
The trees can accept arbitrary records; they accept a size and a
comparator to allow them to be used for a variety of purposes.
The new trees replace the AVL trees used in the range trees today.
Currently, the range_seg_t structure contains three 8 byte integers
of payload and two 24 byte avl_tree_node_ts to handle its storage in
both an offset-sorted tree and a size-sorted tree (total size: 64
bytes). In the new model, the range seg structures are usually two 4
byte integers, but a separate one needs to exist for the size-sorted
and offset-sorted tree. Between the raw size, the 50% overhead, and
the double storage, the new btrees are expected to use 8*1.5*2 = 24
bytes per record, or 33.3% as much memory as the AVL trees (this is
for the purposes of storing metaslab range trees; for other purposes,
like scrubs, they use ~50% as much memory).
We reduced the size of the payload in the range segments by teaching
range trees about starting offsets and shifts; since metaslabs have a
fixed starting offset, and they all operate in terms of disk sectors,
we can store the ranges using 4-byte integers as long as the size of
the metaslab divided by the sector size is less than 2^32. For 512-byte
sectors, this is a 2^41 (or 2TB) metaslab, which with the default
settings corresponds to a 256PB disk. 4k sector disks can handle
metaslabs up to 2^46 bytes, or 2^63 byte disks. Since we do not
anticipate disks of this size in the near future, there should be
almost no cases where metaslabs need 64-byte integers to store their
ranges. We do still have the capability to store 64-byte integer ranges
to account for cases where we are storing per-vdev (or per-dnode) trees,
which could reasonably go above the limits discussed. We also do not
store fill information in the compact version of the node, since it
is only used for sorted scrub.
We also optimized the metaslab loading process in various other ways
to offset some inefficiencies in the btree model. While individual
operations (find, insert, remove_from) are faster for the btree than
they are for the avl tree, remove usually requires a find operation,
while in the AVL tree model the element itself suffices. Some clever
changes actually caused an overall speedup in metaslab loading; we use
approximately 40% less cpu to load metaslabs in our tests on Illumos.
Another memory and performance optimization was achieved by changing
what is stored in the size-sorted trees. When a disk is heavily
fragmented, the df algorithm used by default in ZFS will almost always
find a number of small regions in its initial cursor-based search; it
will usually only fall back to the size-sorted tree to find larger
regions. If we increase the size of the cursor-based search slightly,
and don't store segments that are smaller than a tunable size floor
in the size-sorted tree, we can further cut memory usage down to
below 20% of what the AVL trees store. This also results in further
reductions in CPU time spent loading metaslabs.
The 16KiB size floor was chosen because it results in substantial memory
usage reduction while not usually resulting in situations where we can't
find an appropriate chunk with the cursor and are forced to use an
oversized chunk from the size-sorted tree. In addition, even if we do
have to use an oversized chunk from the size-sorted tree, the chunk
would be too small to use for ZIL allocations, so it isn't as big of a
loss as it might otherwise be. And often, more small allocations will
follow the initial one, and the cursor search will now find the
remainder of the chunk we didn't use all of and use it for subsequent
allocations. Practical testing has shown little or no change in
fragmentation as a result of this change.
If the size-sorted tree becomes empty while the offset sorted one still
has entries, it will load all the entries from the offset sorted tree
and disregard the size floor until it is unloaded again. This operation
occurs rarely with the default setting, only on incredibly thoroughly
fragmented pools.
There are some other small changes to zdb to teach it to handle btrees,
but nothing major.
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed by: Sebastien Roy seb@delphix.com
Reviewed-by: Igor Kozhukhov <igor@dilos.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #9181
2019-10-09 20:36:03 +03:00
|
|
|
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given the GUID of a snapshot, find its containing filesystem and
|
|
|
|
* (optionally) name.
|
|
|
|
*/
|
|
|
|
static nvlist_t *
|
|
|
|
fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
|
|
|
|
{
|
|
|
|
fsavl_node_t fn_find;
|
|
|
|
fsavl_node_t *fn;
|
|
|
|
|
|
|
|
fn_find.fn_guid = snapguid;
|
|
|
|
|
|
|
|
fn = avl_find(avl, &fn_find, NULL);
|
|
|
|
if (fn) {
|
|
|
|
if (snapname)
|
|
|
|
*snapname = fn->fn_snapname;
|
|
|
|
return (fn->fn_nvfs);
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fsavl_destroy(avl_tree_t *avl)
|
|
|
|
{
|
|
|
|
fsavl_node_t *fn;
|
|
|
|
void *cookie;
|
|
|
|
|
|
|
|
if (avl == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cookie = NULL;
|
|
|
|
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
|
|
|
|
free(fn);
|
|
|
|
avl_destroy(avl);
|
|
|
|
free(avl);
|
|
|
|
}
|
|
|
|
|
2009-08-18 22:43:27 +04:00
|
|
|
/*
|
|
|
|
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
|
|
|
|
*/
|
2008-11-20 23:01:55 +03:00
|
|
|
static avl_tree_t *
|
|
|
|
fsavl_create(nvlist_t *fss)
|
|
|
|
{
|
|
|
|
avl_tree_t *fsavl;
|
|
|
|
nvpair_t *fselem = NULL;
|
|
|
|
|
|
|
|
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
|
|
|
|
offsetof(fsavl_node_t, fn_node));
|
|
|
|
|
|
|
|
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
|
|
|
|
nvlist_t *nvfs, *snaps;
|
|
|
|
nvpair_t *snapelem = NULL;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
nvfs = fnvpair_value_nvlist(fselem);
|
|
|
|
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
while ((snapelem =
|
|
|
|
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
|
|
|
|
fsavl_node_t *fn;
|
|
|
|
uint64_t guid;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
guid = fnvpair_value_uint64(snapelem);
|
2008-11-20 23:01:55 +03:00
|
|
|
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
|
|
|
|
fsavl_destroy(fsavl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
fn->fn_nvfs = nvfs;
|
|
|
|
fn->fn_snapname = nvpair_name(snapelem);
|
|
|
|
fn->fn_guid = guid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: if there are multiple snaps with the
|
|
|
|
* same GUID, we ignore all but one.
|
|
|
|
*/
|
|
|
|
if (avl_find(fsavl, fn, NULL) == NULL)
|
|
|
|
avl_add(fsavl, fn);
|
|
|
|
else
|
|
|
|
free(fn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (fsavl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
|
|
|
|
*/
|
|
|
|
typedef struct send_data {
|
2016-09-23 03:22:37 +03:00
|
|
|
/*
|
|
|
|
* assigned inside every recursive call,
|
|
|
|
* restored from *_save on return:
|
|
|
|
*
|
|
|
|
* guid of fromsnap snapshot in parent dataset
|
|
|
|
* txg of fromsnap snapshot in current dataset
|
|
|
|
* txg of tosnap snapshot in current dataset
|
|
|
|
*/
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t parent_fromsnap_guid;
|
2016-09-23 03:22:37 +03:00
|
|
|
uint64_t fromsnap_txg;
|
|
|
|
uint64_t tosnap_txg;
|
|
|
|
|
|
|
|
/* the nvlists get accumulated during depth-first traversal */
|
2008-11-20 23:01:55 +03:00
|
|
|
nvlist_t *parent_snaps;
|
|
|
|
nvlist_t *fss;
|
2008-12-03 23:09:06 +03:00
|
|
|
nvlist_t *snapprops;
|
2019-02-15 23:41:38 +03:00
|
|
|
nvlist_t *snapholds; /* user holds */
|
2016-09-23 03:22:37 +03:00
|
|
|
|
|
|
|
/* send-receive configuration, does not change during traversal */
|
|
|
|
const char *fsname;
|
2008-11-20 23:01:55 +03:00
|
|
|
const char *fromsnap;
|
|
|
|
const char *tosnap;
|
2010-05-29 00:45:14 +04:00
|
|
|
boolean_t recursive;
|
2019-03-12 23:13:22 +03:00
|
|
|
boolean_t raw;
|
2019-03-29 01:48:58 +03:00
|
|
|
boolean_t doall;
|
2019-03-12 23:13:22 +03:00
|
|
|
boolean_t replicate;
|
2021-04-11 22:05:35 +03:00
|
|
|
boolean_t skipmissing;
|
2016-09-23 03:22:37 +03:00
|
|
|
boolean_t verbose;
|
2019-03-12 23:13:22 +03:00
|
|
|
boolean_t backup;
|
2014-05-15 12:42:19 +04:00
|
|
|
boolean_t seenfrom;
|
|
|
|
boolean_t seento;
|
2019-02-15 23:41:38 +03:00
|
|
|
boolean_t holds; /* were holds requested with send -h */
|
|
|
|
boolean_t props;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The header nvlist is of the following format:
|
|
|
|
* {
|
|
|
|
* "tosnap" -> string
|
|
|
|
* "fromsnap" -> string (if incremental)
|
|
|
|
* "fss" -> {
|
|
|
|
* id -> {
|
|
|
|
*
|
|
|
|
* "name" -> string (full name; for debugging)
|
|
|
|
* "parentfromsnap" -> number (guid of fromsnap in parent)
|
|
|
|
*
|
|
|
|
* "props" -> { name -> value (only if set here) }
|
|
|
|
* "snaps" -> { name (lastname) -> number (guid) }
|
2008-12-03 23:09:06 +03:00
|
|
|
* "snapprops" -> { name (lastname) -> { name -> value } }
|
2019-02-15 23:41:38 +03:00
|
|
|
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
|
2008-11-20 23:01:55 +03:00
|
|
|
*
|
|
|
|
* "origin" -> number (guid) (if clone)
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
* "is_encroot" -> boolean
|
2008-11-20 23:01:55 +03:00
|
|
|
* "sent" -> boolean (not on-disk)
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
} send_data_t;
|
|
|
|
|
2018-02-21 23:32:06 +03:00
|
|
|
static void
|
|
|
|
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
send_iterate_snap(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
send_data_t *sd = arg;
|
|
|
|
uint64_t guid = zhp->zfs_dmustats.dds_guid;
|
2016-09-23 03:22:37 +03:00
|
|
|
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
|
2008-11-20 23:01:55 +03:00
|
|
|
char *snapname;
|
2008-12-03 23:09:06 +03:00
|
|
|
nvlist_t *nv;
|
2014-11-17 07:27:58 +03:00
|
|
|
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
snapname = strrchr(zhp->zfs_name, '@')+1;
|
2014-05-15 12:42:19 +04:00
|
|
|
isfromsnap = (sd->fromsnap != NULL &&
|
|
|
|
strcmp(sd->fromsnap, snapname) == 0);
|
|
|
|
istosnap = (sd->tosnap != NULL && (strcmp(sd->tosnap, snapname) == 0));
|
2014-11-17 07:27:58 +03:00
|
|
|
istosnapwithnofrom = (istosnap && sd->fromsnap == NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2016-09-23 03:22:37 +03:00
|
|
|
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
|
|
|
|
if (sd->verbose) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"skipping snapshot %s because it was created "
|
|
|
|
"after the destination snapshot (%s)\n"),
|
|
|
|
zhp->zfs_name, sd->tosnap);
|
|
|
|
}
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* NB: if there is no fromsnap here (it's a newly created fs in
|
|
|
|
* an incremental replication), we will substitute the tosnap.
|
|
|
|
*/
|
2014-05-15 12:42:19 +04:00
|
|
|
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap)) {
|
2008-11-20 23:01:55 +03:00
|
|
|
sd->parent_fromsnap_guid = guid;
|
|
|
|
}
|
|
|
|
|
2014-05-15 12:42:19 +04:00
|
|
|
if (!sd->recursive) {
|
2021-02-24 20:48:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* To allow a doall stream to work properly
|
|
|
|
* with a NULL fromsnap
|
|
|
|
*/
|
|
|
|
if (sd->doall && sd->fromsnap == NULL && !sd->seenfrom) {
|
|
|
|
sd->seenfrom = B_TRUE;
|
|
|
|
}
|
|
|
|
|
2014-05-15 12:42:19 +04:00
|
|
|
if (!sd->seenfrom && isfromsnap) {
|
|
|
|
sd->seenfrom = B_TRUE;
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-11-17 07:27:58 +03:00
|
|
|
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
|
2014-05-15 12:42:19 +04:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (istosnap)
|
|
|
|
sd->seento = B_TRUE;
|
|
|
|
}
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
nv = fnvlist_alloc();
|
2018-02-21 23:32:06 +03:00
|
|
|
send_iterate_prop(zhp, sd->backup, nv);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
|
|
|
|
fnvlist_free(nv);
|
2019-02-15 23:41:38 +03:00
|
|
|
if (sd->holds) {
|
|
|
|
nvlist_t *holds = fnvlist_alloc();
|
|
|
|
int err = lzc_get_holds(zhp->zfs_name, &holds);
|
|
|
|
if (err == 0) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
|
2019-02-15 23:41:38 +03:00
|
|
|
}
|
|
|
|
fnvlist_free(holds);
|
|
|
|
}
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-02-21 23:32:06 +03:00
|
|
|
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2018-02-21 23:32:06 +03:00
|
|
|
nvlist_t *props = NULL;
|
2008-11-20 23:01:55 +03:00
|
|
|
nvpair_t *elem = NULL;
|
|
|
|
|
2018-02-21 23:32:06 +03:00
|
|
|
if (received_only)
|
|
|
|
props = zfs_get_recvd_props(zhp);
|
|
|
|
else
|
|
|
|
props = zhp->zfs_props;
|
|
|
|
|
|
|
|
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
|
2008-11-20 23:01:55 +03:00
|
|
|
char *propname = nvpair_name(elem);
|
|
|
|
zfs_prop_t prop = zfs_name_to_prop(propname);
|
|
|
|
nvlist_t *propnv;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (!zfs_prop_user(propname)) {
|
|
|
|
/*
|
|
|
|
* Realistically, this should never happen. However,
|
|
|
|
* we want the ability to add DSL properties without
|
|
|
|
* needing to make incompatible version changes. We
|
|
|
|
* need to ignore unknown properties to allow older
|
|
|
|
* software to still send datasets containing these
|
|
|
|
* properties, with the unknown properties elided.
|
|
|
|
*/
|
|
|
|
if (prop == ZPROP_INVAL)
|
|
|
|
continue;
|
2009-07-03 02:44:48 +04:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (zfs_prop_readonly(prop))
|
|
|
|
continue;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
verify(nvpair_value_nvlist(elem, &propnv) == 0);
|
2009-08-18 22:43:27 +04:00
|
|
|
if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION ||
|
|
|
|
prop == ZFS_PROP_REFQUOTA ||
|
|
|
|
prop == ZFS_PROP_REFRESERVATION) {
|
2010-05-29 00:45:14 +04:00
|
|
|
char *source;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t value;
|
|
|
|
verify(nvlist_lookup_uint64(propnv,
|
|
|
|
ZPROP_VALUE, &value) == 0);
|
2008-12-03 23:09:06 +03:00
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
|
|
|
|
continue;
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* May have no source before SPA_VERSION_RECVD_PROPS,
|
|
|
|
* but is still modifiable.
|
|
|
|
*/
|
|
|
|
if (nvlist_lookup_string(propnv,
|
|
|
|
ZPROP_SOURCE, &source) == 0) {
|
|
|
|
if ((strcmp(source, zhp->zfs_name) != 0) &&
|
|
|
|
(strcmp(source,
|
|
|
|
ZPROP_SOURCE_VAL_RECVD) != 0))
|
|
|
|
continue;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
|
|
|
char *source;
|
|
|
|
if (nvlist_lookup_string(propnv,
|
|
|
|
ZPROP_SOURCE, &source) != 0)
|
|
|
|
continue;
|
2010-05-29 00:45:14 +04:00
|
|
|
if ((strcmp(source, zhp->zfs_name) != 0) &&
|
|
|
|
(strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0))
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zfs_prop_user(propname) ||
|
|
|
|
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
|
|
|
|
char *value;
|
2021-01-14 20:53:09 +03:00
|
|
|
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
|
|
|
|
fnvlist_add_string(nv, propname, value);
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
|
|
|
uint64_t value;
|
2021-01-14 20:53:09 +03:00
|
|
|
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
|
|
|
|
fnvlist_add_uint64(nv, propname, value);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-23 03:22:37 +03:00
|
|
|
/*
|
|
|
|
* returns snapshot creation txg
|
|
|
|
* and returns 0 if the snapshot does not exist
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
|
|
|
|
{
|
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
uint64_t txg = 0;
|
|
|
|
|
|
|
|
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
|
|
|
|
return (txg);
|
|
|
|
|
|
|
|
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
|
|
|
|
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
|
|
|
|
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (zhp != NULL) {
|
|
|
|
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
|
|
|
|
zfs_close(zhp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (txg);
|
|
|
|
}
|
|
|
|
|
2009-08-18 22:43:27 +04:00
|
|
|
/*
|
|
|
|
* recursively generate nvlists describing datasets. See comment
|
|
|
|
* for the data structure send_data_t above for description of contents
|
|
|
|
* of the nvlist.
|
|
|
|
*/
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
send_iterate_fs(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
send_data_t *sd = arg;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
nvlist_t *nvfs = NULL, *nv = NULL;
|
2010-05-29 00:45:14 +04:00
|
|
|
int rv = 0;
|
2019-03-12 23:13:22 +03:00
|
|
|
uint64_t min_txg = 0, max_txg = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
|
2016-09-23 03:22:37 +03:00
|
|
|
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
|
|
|
|
uint64_t tosnap_txg_save = sd->tosnap_txg;
|
|
|
|
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t guid = zhp->zfs_dmustats.dds_guid;
|
2016-09-23 03:22:37 +03:00
|
|
|
uint64_t fromsnap_txg, tosnap_txg;
|
2008-11-20 23:01:55 +03:00
|
|
|
char guidstring[64];
|
|
|
|
|
2016-09-23 03:22:37 +03:00
|
|
|
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
|
|
|
|
if (fromsnap_txg != 0)
|
|
|
|
sd->fromsnap_txg = fromsnap_txg;
|
|
|
|
|
|
|
|
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
|
|
|
|
if (tosnap_txg != 0)
|
|
|
|
sd->tosnap_txg = tosnap_txg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* on the send side, if the current dataset does not have tosnap,
|
|
|
|
* perform two additional checks:
|
|
|
|
*
|
|
|
|
* - skip sending the current dataset if it was created later than
|
|
|
|
* the parent tosnap
|
|
|
|
* - return error if the current dataset was created earlier than
|
2021-04-11 22:05:35 +03:00
|
|
|
* the parent tosnap, unless --skip-missing specified. Then
|
|
|
|
* just print a warning
|
2016-09-23 03:22:37 +03:00
|
|
|
*/
|
|
|
|
if (sd->tosnap != NULL && tosnap_txg == 0) {
|
|
|
|
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
|
|
|
|
if (sd->verbose) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"skipping dataset %s: snapshot %s does "
|
|
|
|
"not exist\n"), zhp->zfs_name, sd->tosnap);
|
|
|
|
}
|
2021-04-11 22:05:35 +03:00
|
|
|
} else if (sd->skipmissing) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"WARNING: skipping dataset %s and its children:"
|
|
|
|
" snapshot %s does not exist\n"),
|
|
|
|
zhp->zfs_name, sd->tosnap);
|
2016-09-23 03:22:37 +03:00
|
|
|
} else {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot send %s@%s%s: snapshot %s@%s does not "
|
|
|
|
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
|
|
|
|
dgettext(TEXT_DOMAIN, " recursively") : "",
|
|
|
|
zhp->zfs_name, sd->tosnap);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
rv = EZFS_NOENT;
|
2016-09-23 03:22:37 +03:00
|
|
|
}
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-03-12 23:13:22 +03:00
|
|
|
nvfs = fnvlist_alloc();
|
|
|
|
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
|
|
|
|
fnvlist_add_uint64(nvfs, "parentfromsnap",
|
|
|
|
sd->parent_fromsnap_guid);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (zhp->zfs_dmustats.dds_origin[0]) {
|
|
|
|
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
|
|
|
|
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
|
2016-09-23 03:22:37 +03:00
|
|
|
if (origin == NULL) {
|
|
|
|
rv = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_uint64(nvfs, "origin",
|
|
|
|
origin->zfs_dmustats.dds_guid);
|
2017-04-20 02:36:32 +03:00
|
|
|
|
|
|
|
zfs_close(origin);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* iterate over props */
|
2019-02-15 23:41:38 +03:00
|
|
|
if (sd->props || sd->backup || sd->recursive) {
|
2019-03-12 23:13:22 +03:00
|
|
|
nv = fnvlist_alloc();
|
2019-02-15 23:41:38 +03:00
|
|
|
send_iterate_prop(zhp, sd->backup, nv);
|
|
|
|
}
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
|
|
|
|
boolean_t encroot;
|
|
|
|
|
|
|
|
/* determine if this dataset is an encryption root */
|
|
|
|
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
|
|
|
|
rv = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (encroot)
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_boolean(nvfs, "is_encroot");
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Encrypted datasets can only be sent with properties if
|
|
|
|
* the raw flag is specified because the receive side doesn't
|
|
|
|
* currently have a mechanism for recursively asking the user
|
|
|
|
* for new encryption parameters.
|
|
|
|
*/
|
|
|
|
if (!sd->raw) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot send %s@%s: encrypted dataset %s may not "
|
|
|
|
"be sent with properties without the raw flag\n"),
|
|
|
|
sd->fsname, sd->tosnap, zhp->zfs_name);
|
|
|
|
rv = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-02-15 23:41:38 +03:00
|
|
|
if (nv != NULL)
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_nvlist(nvfs, "props", nv);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/* iterate over snaps, and set sd->parent_fromsnap_guid */
|
|
|
|
sd->parent_fromsnap_guid = 0;
|
2019-03-12 23:13:22 +03:00
|
|
|
sd->parent_snaps = fnvlist_alloc();
|
|
|
|
sd->snapprops = fnvlist_alloc();
|
2019-02-15 23:41:38 +03:00
|
|
|
if (sd->holds)
|
2021-01-14 20:53:09 +03:00
|
|
|
sd->snapholds = fnvlist_alloc();
|
2019-03-29 01:48:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a "doall" send, a replicate send or we're just trying
|
|
|
|
* to gather a list of previous snapshots, iterate through all the
|
|
|
|
* snaps in the txg range. Otherwise just look at the one we're
|
|
|
|
* interested in.
|
|
|
|
*/
|
|
|
|
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
|
|
|
|
if (!sd->replicate && fromsnap_txg != 0)
|
|
|
|
min_txg = fromsnap_txg;
|
|
|
|
if (!sd->replicate && tosnap_txg != 0)
|
|
|
|
max_txg = tosnap_txg;
|
|
|
|
(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd,
|
|
|
|
min_txg, max_txg);
|
|
|
|
} else {
|
|
|
|
char snapname[MAXPATHLEN] = { 0 };
|
|
|
|
zfs_handle_t *snap;
|
|
|
|
|
2019-05-01 05:41:12 +03:00
|
|
|
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
|
2019-03-29 01:48:58 +03:00
|
|
|
zhp->zfs_name, sd->tosnap);
|
|
|
|
if (sd->fromsnap != NULL)
|
|
|
|
sd->seenfrom = B_TRUE;
|
|
|
|
snap = zfs_open(zhp->zfs_hdl, snapname,
|
|
|
|
ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (snap != NULL)
|
|
|
|
(void) send_iterate_snap(snap, sd);
|
|
|
|
}
|
|
|
|
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
|
|
|
|
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
|
2019-02-15 23:41:38 +03:00
|
|
|
if (sd->holds)
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
|
|
|
|
fnvlist_free(sd->parent_snaps);
|
|
|
|
fnvlist_free(sd->snapprops);
|
|
|
|
fnvlist_free(sd->snapholds);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-08-25 21:04:20 +03:00
|
|
|
/* Do not allow the size of the properties list to exceed the limit */
|
|
|
|
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
|
|
|
|
zhp->zfs_hdl->libzfs_max_nvlist) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"warning: cannot send %s@%s: the size of the list of "
|
|
|
|
"snapshots and properties is too large to be received "
|
|
|
|
"successfully.\n"
|
|
|
|
"Select a smaller number of snapshots to send.\n"),
|
|
|
|
zhp->zfs_name, sd->tosnap);
|
|
|
|
rv = EZFS_NOSPC;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
/* add this fs to nvlist */
|
|
|
|
(void) snprintf(guidstring, sizeof (guidstring),
|
|
|
|
"0x%llx", (longlong_t)guid);
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/* iterate over children */
|
2010-05-29 00:45:14 +04:00
|
|
|
if (sd->recursive)
|
|
|
|
rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2016-09-23 03:22:37 +03:00
|
|
|
out:
|
2008-11-20 23:01:55 +03:00
|
|
|
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
|
2016-09-23 03:22:37 +03:00
|
|
|
sd->fromsnap_txg = fromsnap_txg_save;
|
|
|
|
sd->tosnap_txg = tosnap_txg_save;
|
2019-03-12 23:13:22 +03:00
|
|
|
fnvlist_free(nv);
|
|
|
|
fnvlist_free(nvfs);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
|
2019-03-29 01:48:58 +03:00
|
|
|
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
|
2021-04-11 22:05:35 +03:00
|
|
|
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
|
|
|
|
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
|
|
|
|
avl_tree_t **avlp)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
send_data_t sd = { 0 };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (EZFS_BADTYPE);
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
sd.fss = fnvlist_alloc();
|
2016-09-23 03:22:37 +03:00
|
|
|
sd.fsname = fsname;
|
2008-11-20 23:01:55 +03:00
|
|
|
sd.fromsnap = fromsnap;
|
|
|
|
sd.tosnap = tosnap;
|
2010-05-29 00:45:14 +04:00
|
|
|
sd.recursive = recursive;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
sd.raw = raw;
|
2019-03-29 01:48:58 +03:00
|
|
|
sd.doall = doall;
|
2019-03-12 23:13:22 +03:00
|
|
|
sd.replicate = replicate;
|
2021-04-11 22:05:35 +03:00
|
|
|
sd.skipmissing = skipmissing;
|
2016-09-23 03:22:37 +03:00
|
|
|
sd.verbose = verbose;
|
2018-02-21 23:32:06 +03:00
|
|
|
sd.backup = backup;
|
2019-02-15 23:41:38 +03:00
|
|
|
sd.holds = holds;
|
|
|
|
sd.props = props;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(sd.fss);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (avlp != NULL)
|
|
|
|
*avlp = NULL;
|
|
|
|
*nvlp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(sd.fss);
|
2008-11-20 23:01:55 +03:00
|
|
|
*nvlp = NULL;
|
|
|
|
return (EZFS_NOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
*nvlp = sd.fss;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines specific to "zfs send"
|
|
|
|
*/
|
|
|
|
typedef struct send_dump_data {
|
|
|
|
/* these are all just the short snapname (the part after the @) */
|
|
|
|
const char *fromsnap;
|
|
|
|
const char *tosnap;
|
2016-06-16 00:28:36 +03:00
|
|
|
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
|
2010-08-27 01:24:34 +04:00
|
|
|
uint64_t prevsnap_obj;
|
2008-11-20 23:01:55 +03:00
|
|
|
boolean_t seenfrom, seento, replicate, doall, fromorigin;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
boolean_t dryrun, parsable, progress, embed_data, std_out;
|
2019-02-15 23:41:38 +03:00
|
|
|
boolean_t large_block, compress, raw, holds;
|
2008-11-20 23:01:55 +03:00
|
|
|
int outfd;
|
|
|
|
boolean_t err;
|
|
|
|
nvlist_t *fss;
|
2013-05-25 06:06:23 +04:00
|
|
|
nvlist_t *snapholds;
|
2008-11-20 23:01:55 +03:00
|
|
|
avl_tree_t *fsavl;
|
2010-05-29 00:45:14 +04:00
|
|
|
snapfilter_cb_t *filter_cb;
|
|
|
|
void *filter_cb_arg;
|
|
|
|
nvlist_t *debugnv;
|
2016-06-16 00:28:36 +03:00
|
|
|
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
|
2010-08-27 01:24:34 +04:00
|
|
|
int cleanup_fd;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
int verbosity;
|
2011-11-17 22:14:36 +04:00
|
|
|
uint64_t size;
|
2008-11-20 23:01:55 +03:00
|
|
|
} send_dump_data_t;
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
static int
|
2017-08-31 19:00:35 +03:00
|
|
|
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
|
|
|
|
enum lzc_send_flags flags, uint64_t *spacep)
|
2011-11-17 22:14:36 +04:00
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2017-08-31 19:00:35 +03:00
|
|
|
int error;
|
2011-11-17 22:14:36 +04:00
|
|
|
|
2017-08-31 19:00:35 +03:00
|
|
|
assert(snapname != NULL);
|
|
|
|
error = lzc_send_space(snapname, from, flags, spacep);
|
2011-11-17 22:14:36 +04:00
|
|
|
|
2017-08-31 19:00:35 +03:00
|
|
|
if (error != 0) {
|
2011-11-17 22:14:36 +04:00
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
2017-08-31 19:00:35 +03:00
|
|
|
"warning: cannot estimate space for '%s'"), snapname);
|
2011-11-17 22:14:36 +04:00
|
|
|
|
2017-08-31 19:00:35 +03:00
|
|
|
switch (error) {
|
2011-11-17 22:14:36 +04:00
|
|
|
case EXDEV:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not an earlier snapshot from the same fs"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
|
|
|
|
|
|
|
|
case ENOENT:
|
2017-08-31 19:00:35 +03:00
|
|
|
if (zfs_dataset_exists(hdl, snapname,
|
2011-11-17 22:14:36 +04:00
|
|
|
ZFS_TYPE_SNAPSHOT)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2017-08-31 19:00:35 +03:00
|
|
|
"incremental source (%s) does not exist"),
|
|
|
|
snapname);
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
|
|
|
|
case EDQUOT:
|
|
|
|
case EFBIG:
|
|
|
|
case EIO:
|
|
|
|
case ENOLINK:
|
|
|
|
case ENOSPC:
|
|
|
|
case ENOSTR:
|
|
|
|
case ENXIO:
|
|
|
|
case EPIPE:
|
|
|
|
case ERANGE:
|
|
|
|
case EFAULT:
|
|
|
|
case EROFS:
|
2017-08-31 19:00:35 +03:00
|
|
|
case EINVAL:
|
|
|
|
zfs_error_aux(hdl, strerror(error));
|
2011-11-17 22:14:36 +04:00
|
|
|
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
|
|
|
|
|
|
|
|
default:
|
2017-08-31 19:00:35 +03:00
|
|
|
return (zfs_standard_error(hdl, error, errbuf));
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
|
|
|
|
* NULL) to the file descriptor specified by outfd.
|
|
|
|
*/
|
|
|
|
static int
|
2010-08-27 01:24:34 +04:00
|
|
|
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
|
2014-06-06 01:19:08 +04:00
|
|
|
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
|
|
|
|
nvlist_t *debugnv)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-11-20 23:01:55 +03:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2010-05-29 00:45:14 +04:00
|
|
|
nvlist_t *thisdbg;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
|
2010-08-27 01:24:34 +04:00
|
|
|
assert(fromsnap_obj == 0 || !fromorigin);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
zc.zc_cookie = outfd;
|
|
|
|
zc.zc_obj = fromorigin;
|
2010-08-27 01:24:34 +04:00
|
|
|
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
|
|
|
|
zc.zc_fromobj = fromsnap_obj;
|
2014-06-06 01:19:08 +04:00
|
|
|
zc.zc_flags = flags;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
thisdbg = fnvlist_alloc();
|
2010-05-29 00:45:14 +04:00
|
|
|
if (fromsnap && fromsnap[0] != '\0') {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"warning: cannot send '%s'"), zhp->zfs_name);
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_uint64(thisdbg, "error", errno);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (debugnv) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(thisdbg);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
switch (errno) {
|
|
|
|
case EXDEV:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not an earlier snapshot from the same fs"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
case EACCES:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"source key must be loaded"));
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
case ENOENT:
|
|
|
|
if (zfs_dataset_exists(hdl, zc.zc_name,
|
|
|
|
ZFS_TYPE_SNAPSHOT)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incremental source (@%s) does not exist"),
|
|
|
|
zc.zc_value);
|
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
|
|
|
|
case EDQUOT:
|
|
|
|
case EFBIG:
|
|
|
|
case EIO:
|
|
|
|
case ENOLINK:
|
|
|
|
case ENOSPC:
|
|
|
|
case ENOSTR:
|
|
|
|
case ENXIO:
|
|
|
|
case EPIPE:
|
|
|
|
case ERANGE:
|
|
|
|
case EFAULT:
|
|
|
|
case EROFS:
|
|
|
|
zfs_error_aux(hdl, strerror(errno));
|
|
|
|
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (debugnv)
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
|
|
|
|
fnvlist_free(thisdbg);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2013-05-25 06:06:23 +04:00
|
|
|
static void
|
|
|
|
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
|
2010-08-27 01:24:34 +04:00
|
|
|
{
|
|
|
|
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
|
|
|
|
|
|
|
|
/*
|
2013-05-25 06:06:23 +04:00
|
|
|
* zfs_send() only sets snapholds for sends that need them,
|
2010-08-27 01:24:34 +04:00
|
|
|
* e.g. replication and doall.
|
|
|
|
*/
|
2013-05-25 06:06:23 +04:00
|
|
|
if (sdd->snapholds == NULL)
|
|
|
|
return;
|
2010-08-27 01:24:34 +04:00
|
|
|
|
2013-05-25 06:06:23 +04:00
|
|
|
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
|
2010-08-27 01:24:34 +04:00
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
int
|
|
|
|
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
|
|
|
|
uint64_t *blocks_visited)
|
|
|
|
{
|
2020-07-10 03:47:12 +03:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
|
|
|
|
zc.zc_cookie = fd;
|
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
|
|
|
|
return (errno);
|
|
|
|
if (bytes_written != NULL)
|
|
|
|
*bytes_written = zc.zc_cookie;
|
|
|
|
if (blocks_visited != NULL)
|
|
|
|
*blocks_visited = zc.zc_objset_type;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-05-10 02:05:14 +04:00
|
|
|
static void *
|
|
|
|
send_progress_thread(void *arg)
|
|
|
|
{
|
|
|
|
progress_arg_t *pa = arg;
|
|
|
|
zfs_handle_t *zhp = pa->pa_zhp;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
uint64_t bytes;
|
|
|
|
uint64_t blocks;
|
2012-05-10 02:05:14 +04:00
|
|
|
char buf[16];
|
|
|
|
time_t t;
|
|
|
|
struct tm *tm;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
boolean_t firstloop = B_TRUE;
|
2012-05-10 02:05:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
int err;
|
2012-05-10 02:05:14 +04:00
|
|
|
(void) sleep(1);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
|
|
|
|
&blocks)) != 0) {
|
|
|
|
if (err == EINTR || err == ENOENT)
|
|
|
|
return ((void *)0);
|
|
|
|
return ((void *)(uintptr_t)err);
|
|
|
|
}
|
2012-05-10 02:05:14 +04:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (firstloop && !pa->pa_parsable) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"TIME %s %sSNAPSHOT %s\n",
|
|
|
|
pa->pa_estimate ? "BYTES" : " SENT",
|
|
|
|
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
|
|
|
|
zhp->zfs_name);
|
|
|
|
firstloop = B_FALSE;
|
|
|
|
}
|
2012-05-10 02:05:14 +04:00
|
|
|
|
|
|
|
(void) time(&t);
|
|
|
|
tm = localtime(&t);
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
|
|
|
|
tm->tm_hour, tm->tm_min, tm->tm_sec,
|
|
|
|
(u_longlong_t)bytes, (u_longlong_t)blocks,
|
|
|
|
zhp->zfs_name);
|
|
|
|
} else if (pa->pa_verbosity >= 2) {
|
|
|
|
zfs_nicenum(bytes, buf, sizeof (buf));
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"%02d:%02d:%02d %5s %8llu %s\n",
|
|
|
|
tm->tm_hour, tm->tm_min, tm->tm_sec,
|
|
|
|
buf, (u_longlong_t)blocks, zhp->zfs_name);
|
|
|
|
} else if (pa->pa_parsable) {
|
2012-05-10 02:05:14 +04:00
|
|
|
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
|
|
|
|
tm->tm_hour, tm->tm_min, tm->tm_sec,
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
(u_longlong_t)bytes, zhp->zfs_name);
|
2012-05-10 02:05:14 +04:00
|
|
|
} else {
|
2017-05-02 23:43:53 +03:00
|
|
|
zfs_nicebytes(bytes, buf, sizeof (buf));
|
2012-05-10 02:05:14 +04:00
|
|
|
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
|
|
|
|
tm->tm_hour, tm->tm_min, tm->tm_sec,
|
|
|
|
buf, zhp->zfs_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
static void
|
|
|
|
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
|
|
|
|
uint64_t size, boolean_t parsable)
|
|
|
|
{
|
|
|
|
if (parsable) {
|
|
|
|
if (fromsnap != NULL) {
|
|
|
|
(void) fprintf(fout, "incremental\t%s\t%s",
|
|
|
|
fromsnap, tosnap);
|
|
|
|
} else {
|
|
|
|
(void) fprintf(fout, "full\t%s",
|
|
|
|
tosnap);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (fromsnap != NULL) {
|
|
|
|
if (strchr(fromsnap, '@') == NULL &&
|
|
|
|
strchr(fromsnap, '#') == NULL) {
|
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
|
|
|
"send from @%s to %s"),
|
|
|
|
fromsnap, tosnap);
|
|
|
|
} else {
|
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
|
|
|
"send from %s to %s"),
|
|
|
|
fromsnap, tosnap);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
|
|
|
"full send of %s"),
|
|
|
|
tosnap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-09 01:24:31 +03:00
|
|
|
if (parsable) {
|
|
|
|
(void) fprintf(fout, "\t%llu",
|
|
|
|
(longlong_t)size);
|
|
|
|
} else if (size != 0) {
|
|
|
|
char buf[16];
|
|
|
|
zfs_nicebytes(size, buf, sizeof (buf));
|
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
|
|
|
" estimated size is %s"), buf);
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
(void) fprintf(fout, "\n");
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
dump_snapshot(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
send_dump_data_t *sdd = arg;
|
2012-05-10 02:05:14 +04:00
|
|
|
progress_arg_t pa = { 0 };
|
|
|
|
pthread_t tid;
|
2010-08-27 01:24:34 +04:00
|
|
|
char *thissnap;
|
2016-07-11 20:45:52 +03:00
|
|
|
enum lzc_send_flags flags = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
int err;
|
2011-11-17 22:14:36 +04:00
|
|
|
boolean_t isfromsnap, istosnap, fromorigin;
|
2010-05-29 00:45:14 +04:00
|
|
|
boolean_t exclude = B_FALSE;
|
2015-07-11 02:54:52 +03:00
|
|
|
FILE *fout = sdd->std_out ? stdout : stderr;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2013-05-25 06:06:23 +04:00
|
|
|
err = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
thissnap = strchr(zhp->zfs_name, '@') + 1;
|
2010-05-29 00:45:14 +04:00
|
|
|
isfromsnap = (sdd->fromsnap != NULL &&
|
|
|
|
strcmp(sdd->fromsnap, thissnap) == 0);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (!sdd->seenfrom && isfromsnap) {
|
2013-05-25 06:06:23 +04:00
|
|
|
gather_holds(zhp, sdd);
|
|
|
|
sdd->seenfrom = B_TRUE;
|
2016-09-29 22:06:14 +03:00
|
|
|
(void) strlcpy(sdd->prevsnap, thissnap,
|
|
|
|
sizeof (sdd->prevsnap));
|
2013-05-25 06:06:23 +04:00
|
|
|
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
2013-05-25 06:06:23 +04:00
|
|
|
return (0);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sdd->seento || !sdd->seenfrom) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
|
|
|
|
if (istosnap)
|
|
|
|
sdd->seento = B_TRUE;
|
|
|
|
|
2016-07-11 20:45:52 +03:00
|
|
|
if (sdd->large_block)
|
|
|
|
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
|
|
|
|
if (sdd->embed_data)
|
|
|
|
flags |= LZC_SEND_FLAG_EMBED_DATA;
|
|
|
|
if (sdd->compress)
|
|
|
|
flags |= LZC_SEND_FLAG_COMPRESS;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (sdd->raw)
|
|
|
|
flags |= LZC_SEND_FLAG_RAW;
|
2016-07-11 20:45:52 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (!sdd->doall && !isfromsnap && !istosnap) {
|
|
|
|
if (sdd->replicate) {
|
|
|
|
char *snapname;
|
|
|
|
nvlist_t *snapprops;
|
|
|
|
/*
|
|
|
|
* Filter out all intermediate snapshots except origin
|
|
|
|
* snapshots needed to replicate clones.
|
|
|
|
*/
|
|
|
|
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
|
|
|
|
zhp->zfs_dmustats.dds_guid, &snapname);
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
snapprops = fnvlist_lookup_nvlist(nvfs, "snapprops");
|
|
|
|
snapprops = fnvlist_lookup_nvlist(snapprops, thissnap);
|
2010-05-29 00:45:14 +04:00
|
|
|
exclude = !nvlist_exists(snapprops, "is_clone_origin");
|
|
|
|
} else {
|
|
|
|
exclude = B_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a filter function exists, call it to determine whether
|
|
|
|
* this snapshot will be sent.
|
|
|
|
*/
|
|
|
|
if (exclude || (sdd->filter_cb != NULL &&
|
|
|
|
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
|
|
|
|
/*
|
|
|
|
* This snapshot is filtered out. Don't send it, and don't
|
2010-08-27 01:24:34 +04:00
|
|
|
* set prevsnap_obj, so it will be as if this snapshot didn't
|
2010-05-29 00:45:14 +04:00
|
|
|
* exist, and the next accepted snapshot will be sent as
|
|
|
|
* an incremental from the last accepted one, or as the
|
|
|
|
* first (and full) snapshot in the case of a replication,
|
|
|
|
* non-incremental send.
|
|
|
|
*/
|
|
|
|
zfs_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2013-05-25 06:06:23 +04:00
|
|
|
gather_holds(zhp, sdd);
|
2011-11-17 22:14:36 +04:00
|
|
|
fromorigin = sdd->prevsnap[0] == '\0' &&
|
|
|
|
(sdd->fromorigin || sdd->replicate);
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (sdd->verbosity != 0) {
|
2016-01-07 00:22:48 +03:00
|
|
|
uint64_t size = 0;
|
2017-08-31 19:00:35 +03:00
|
|
|
char fromds[ZFS_MAX_DATASET_NAME_LEN];
|
2011-11-17 22:14:36 +04:00
|
|
|
|
2017-08-31 19:00:35 +03:00
|
|
|
if (sdd->prevsnap[0] != '\0') {
|
|
|
|
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
|
|
|
|
*(strchr(fromds, '@') + 1) = '\0';
|
|
|
|
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
|
|
|
|
}
|
|
|
|
if (zfs_send_space(zhp, zhp->zfs_name,
|
|
|
|
sdd->prevsnap[0] ? fromds : NULL, flags, &size) != 0) {
|
|
|
|
size = 0; /* cannot estimate send space */
|
|
|
|
} else {
|
|
|
|
send_print_verbose(fout, zhp->zfs_name,
|
|
|
|
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
|
|
|
|
size, sdd->parsable);
|
|
|
|
}
|
2016-01-07 00:22:48 +03:00
|
|
|
sdd->size += size;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!sdd->dryrun) {
|
2012-05-10 02:05:14 +04:00
|
|
|
/*
|
|
|
|
* If progress reporting is requested, spawn a new thread to
|
|
|
|
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
|
|
|
|
*/
|
|
|
|
if (sdd->progress) {
|
|
|
|
pa.pa_zhp = zhp;
|
|
|
|
pa.pa_fd = sdd->outfd;
|
|
|
|
pa.pa_parsable = sdd->parsable;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
pa.pa_estimate = B_FALSE;
|
|
|
|
pa.pa_verbosity = sdd->verbosity;
|
2012-05-10 02:05:14 +04:00
|
|
|
|
|
|
|
if ((err = pthread_create(&tid, NULL,
|
2017-02-08 01:02:27 +03:00
|
|
|
send_progress_thread, &pa)) != 0) {
|
2012-05-10 02:05:14 +04:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
|
2014-06-06 01:19:08 +04:00
|
|
|
fromorigin, sdd->outfd, flags, sdd->debugnv);
|
2012-05-10 02:05:14 +04:00
|
|
|
|
|
|
|
if (sdd->progress) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
void *status = NULL;
|
2012-05-10 02:05:14 +04:00
|
|
|
(void) pthread_cancel(tid);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
(void) pthread_join(tid, &status);
|
|
|
|
int error = (int)(uintptr_t)status;
|
|
|
|
if (error != 0 && status != PTHREAD_CANCELED) {
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"progress thread exited nonzero"));
|
|
|
|
return (zfs_standard_error(zhp->zfs_hdl, error,
|
|
|
|
errbuf));
|
|
|
|
}
|
2012-05-10 02:05:14 +04:00
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
(void) strcpy(sdd->prevsnap, thissnap);
|
|
|
|
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dump_filesystem(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
send_dump_data_t *sdd = arg;
|
|
|
|
boolean_t missingfrom = B_FALSE;
|
2013-09-04 16:00:57 +04:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2019-03-12 23:13:22 +03:00
|
|
|
uint64_t min_txg = 0, max_txg = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
|
|
|
|
zhp->zfs_name, sdd->tosnap);
|
2019-10-24 03:29:43 +03:00
|
|
|
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"WARNING: could not send %s@%s: does not exist\n"),
|
2008-11-20 23:01:55 +03:00
|
|
|
zhp->zfs_name, sdd->tosnap);
|
|
|
|
sdd->err = B_TRUE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sdd->replicate && sdd->fromsnap) {
|
|
|
|
/*
|
|
|
|
* If this fs does not have fromsnap, and we're doing
|
|
|
|
* recursive, we need to send a full stream from the
|
|
|
|
* beginning (or an incremental from the origin if this
|
|
|
|
* is a clone). If we're doing non-recursive, then let
|
|
|
|
* them get the error.
|
|
|
|
*/
|
|
|
|
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
|
|
|
|
zhp->zfs_name, sdd->fromsnap);
|
2019-10-24 03:29:43 +03:00
|
|
|
if (zfs_ioctl(zhp->zfs_hdl,
|
2008-11-20 23:01:55 +03:00
|
|
|
ZFS_IOC_OBJSET_STATS, &zc) != 0) {
|
|
|
|
missingfrom = B_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0;
|
2010-08-27 01:24:34 +04:00
|
|
|
sdd->prevsnap_obj = 0;
|
2010-05-29 00:45:14 +04:00
|
|
|
if (sdd->fromsnap == NULL || missingfrom)
|
|
|
|
sdd->seenfrom = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2019-03-12 23:13:22 +03:00
|
|
|
|
2019-03-29 01:48:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate through all snapshots and process the ones we will be
|
|
|
|
* sending. If we only have a "from" and "to" snapshot to deal
|
|
|
|
* with, we can avoid iterating through all the other snapshots.
|
|
|
|
*/
|
|
|
|
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
|
|
|
|
if (!sdd->replicate && sdd->fromsnap != NULL)
|
|
|
|
min_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
|
|
|
|
sdd->fromsnap);
|
|
|
|
if (!sdd->replicate && sdd->tosnap != NULL)
|
|
|
|
max_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
|
|
|
|
sdd->tosnap);
|
|
|
|
rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg,
|
|
|
|
min_txg, max_txg);
|
|
|
|
} else {
|
|
|
|
char snapname[MAXPATHLEN] = { 0 };
|
|
|
|
zfs_handle_t *snap;
|
|
|
|
|
|
|
|
if (!sdd->seenfrom) {
|
2019-05-01 05:41:12 +03:00
|
|
|
(void) snprintf(snapname, sizeof (snapname),
|
2019-03-29 01:48:58 +03:00
|
|
|
"%s@%s", zhp->zfs_name, sdd->fromsnap);
|
|
|
|
snap = zfs_open(zhp->zfs_hdl, snapname,
|
|
|
|
ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (snap != NULL)
|
|
|
|
rv = dump_snapshot(snap, sdd);
|
|
|
|
else
|
|
|
|
rv = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rv == 0) {
|
2019-05-01 05:41:12 +03:00
|
|
|
(void) snprintf(snapname, sizeof (snapname),
|
2019-03-29 01:48:58 +03:00
|
|
|
"%s@%s", zhp->zfs_name, sdd->tosnap);
|
|
|
|
snap = zfs_open(zhp->zfs_hdl, snapname,
|
|
|
|
ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (snap != NULL)
|
|
|
|
rv = dump_snapshot(snap, sdd);
|
|
|
|
else
|
|
|
|
rv = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (!sdd->seenfrom) {
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
2010-05-29 00:45:14 +04:00
|
|
|
"WARNING: could not send %s@%s:\n"
|
2011-11-17 22:14:36 +04:00
|
|
|
"incremental source (%s@%s) does not exist\n"),
|
2010-05-29 00:45:14 +04:00
|
|
|
zhp->zfs_name, sdd->tosnap,
|
|
|
|
zhp->zfs_name, sdd->fromsnap);
|
|
|
|
sdd->err = B_TRUE;
|
|
|
|
} else if (!sdd->seento) {
|
|
|
|
if (sdd->fromsnap) {
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
2008-11-20 23:01:55 +03:00
|
|
|
"WARNING: could not send %s@%s:\n"
|
2010-05-29 00:45:14 +04:00
|
|
|
"incremental source (%s@%s) "
|
2011-11-17 22:14:36 +04:00
|
|
|
"is not earlier than it\n"),
|
2008-11-20 23:01:55 +03:00
|
|
|
zhp->zfs_name, sdd->tosnap,
|
|
|
|
zhp->zfs_name, sdd->fromsnap);
|
|
|
|
} else {
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"WARNING: "
|
|
|
|
"could not send %s@%s: does not exist\n"),
|
2010-05-29 00:45:14 +04:00
|
|
|
zhp->zfs_name, sdd->tosnap);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
sdd->err = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dump_filesystems(zfs_handle_t *rzhp, void *arg)
|
|
|
|
{
|
|
|
|
send_dump_data_t *sdd = arg;
|
|
|
|
nvpair_t *fspair;
|
|
|
|
boolean_t needagain, progress;
|
|
|
|
|
|
|
|
if (!sdd->replicate)
|
|
|
|
return (dump_filesystem(rzhp, sdd));
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/* Mark the clone origin snapshots. */
|
|
|
|
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
|
|
|
|
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
|
|
|
|
nvlist_t *nvfs;
|
|
|
|
uint64_t origin_guid = 0;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
nvfs = fnvpair_value_nvlist(fspair);
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
|
|
|
|
if (origin_guid != 0) {
|
|
|
|
char *snapname;
|
|
|
|
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
|
|
|
|
origin_guid, &snapname);
|
|
|
|
if (origin_nv != NULL) {
|
|
|
|
nvlist_t *snapprops;
|
2021-01-14 20:53:09 +03:00
|
|
|
snapprops = fnvlist_lookup_nvlist(origin_nv,
|
|
|
|
"snapprops");
|
|
|
|
snapprops = fnvlist_lookup_nvlist(snapprops,
|
|
|
|
snapname);
|
|
|
|
fnvlist_add_boolean(snapprops,
|
|
|
|
"is_clone_origin");
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
again:
|
|
|
|
needagain = progress = B_FALSE;
|
|
|
|
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
|
|
|
|
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
|
2011-11-17 22:14:36 +04:00
|
|
|
nvlist_t *fslist, *parent_nv;
|
2008-11-20 23:01:55 +03:00
|
|
|
char *fsname;
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
int err;
|
|
|
|
uint64_t origin_guid = 0;
|
2011-11-17 22:14:36 +04:00
|
|
|
uint64_t parent_guid = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fslist = fnvpair_value_nvlist(fspair);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (nvlist_lookup_boolean(fslist, "sent") == 0)
|
|
|
|
continue;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fsname = fnvlist_lookup_string(fslist, "name");
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
|
|
|
|
&parent_guid);
|
|
|
|
|
|
|
|
if (parent_guid != 0) {
|
|
|
|
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
|
|
|
|
if (!nvlist_exists(parent_nv, "sent")) {
|
|
|
|
/* parent has not been sent; skip this one */
|
|
|
|
needagain = B_TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (origin_guid != 0) {
|
|
|
|
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
|
|
|
|
origin_guid, NULL);
|
|
|
|
if (origin_nv != NULL &&
|
2011-11-17 22:14:36 +04:00
|
|
|
!nvlist_exists(origin_nv, "sent")) {
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* origin has not been sent yet;
|
|
|
|
* skip this clone.
|
|
|
|
*/
|
|
|
|
needagain = B_TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (-1);
|
|
|
|
err = dump_filesystem(zhp, sdd);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_boolean(fslist, "sent");
|
2008-11-20 23:01:55 +03:00
|
|
|
progress = B_TRUE;
|
|
|
|
zfs_close(zhp);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
if (needagain) {
|
|
|
|
assert(progress);
|
|
|
|
goto again;
|
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
|
|
|
|
/* clean out the sent flags in case we reuse this fss */
|
|
|
|
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
|
|
|
|
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
|
|
|
|
nvlist_t *fslist;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fslist = fnvpair_value_nvlist(fspair);
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) nvlist_remove_all(fslist, "sent");
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
nvlist_t *
|
|
|
|
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
|
|
|
|
{
|
|
|
|
unsigned int version;
|
|
|
|
int nread, i;
|
|
|
|
unsigned long long checksum, packed_len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decode token header, which is:
|
|
|
|
* <token version>-<checksum of payload>-<uncompressed payload length>
|
|
|
|
* Note that the only supported token version is 1.
|
|
|
|
*/
|
|
|
|
nread = sscanf(token, "%u-%llx-%llx-",
|
|
|
|
&version, &checksum, &packed_len);
|
|
|
|
if (nread != 3) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt (invalid format)"));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt (invalid version %u)"),
|
|
|
|
version);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* convert hexadecimal representation to binary */
|
|
|
|
token = strrchr(token, '-') + 1;
|
|
|
|
int len = strlen(token) / 2;
|
|
|
|
unsigned char *compressed = zfs_alloc(hdl, len);
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
|
|
|
|
if (nread != 1) {
|
|
|
|
free(compressed);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt "
|
|
|
|
"(payload is not hex-encoded)"));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify checksum */
|
|
|
|
zio_cksum_t cksum;
|
2016-07-12 18:50:54 +03:00
|
|
|
fletcher_4_native_varsize(compressed, len, &cksum);
|
2016-01-07 00:22:48 +03:00
|
|
|
if (cksum.zc_word[0] != checksum) {
|
|
|
|
free(compressed);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt (incorrect checksum)"));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* uncompress */
|
|
|
|
void *packed = zfs_alloc(hdl, packed_len);
|
|
|
|
uLongf packed_len_long = packed_len;
|
|
|
|
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
|
|
|
|
packed_len_long != packed_len) {
|
|
|
|
free(packed);
|
|
|
|
free(compressed);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt (decompression failed)"));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unpack nvlist */
|
|
|
|
nvlist_t *nv;
|
|
|
|
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
|
|
|
|
free(packed);
|
|
|
|
free(compressed);
|
|
|
|
if (error != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt (nvlist_unpack failed)"));
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
return (nv);
|
|
|
|
}
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
static enum lzc_send_flags
|
|
|
|
lzc_flags_from_sendflags(const sendflags_t *flags)
|
|
|
|
{
|
|
|
|
enum lzc_send_flags lzc_flags = 0;
|
|
|
|
if (flags->largeblock)
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
|
|
|
|
if (flags->embed_data)
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
|
|
|
|
if (flags->compress)
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
|
|
|
|
if (flags->raw)
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_RAW;
|
2020-01-10 21:16:58 +03:00
|
|
|
if (flags->saved)
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_SAVED;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
return (lzc_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
|
|
|
|
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
|
|
|
|
const char *redactbook, char *errbuf)
|
|
|
|
{
|
|
|
|
uint64_t size;
|
|
|
|
FILE *fout = flags->dryrun ? stdout : stderr;
|
|
|
|
progress_arg_t pa = { 0 };
|
|
|
|
int err = 0;
|
|
|
|
pthread_t ptid;
|
|
|
|
|
|
|
|
if (flags->progress) {
|
|
|
|
pa.pa_zhp = zhp;
|
|
|
|
pa.pa_fd = fd;
|
|
|
|
pa.pa_parsable = flags->parsable;
|
|
|
|
pa.pa_estimate = B_TRUE;
|
|
|
|
pa.pa_verbosity = flags->verbosity;
|
|
|
|
|
|
|
|
err = pthread_create(&ptid, NULL,
|
|
|
|
send_progress_thread, &pa);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
|
|
|
return (zfs_error(zhp->zfs_hdl,
|
|
|
|
EZFS_THREADCREATEFAILED, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
|
|
|
|
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
|
|
|
|
redactbook, fd, &size);
|
|
|
|
|
|
|
|
if (flags->progress) {
|
|
|
|
void *status = NULL;
|
|
|
|
(void) pthread_cancel(ptid);
|
|
|
|
(void) pthread_join(ptid, &status);
|
|
|
|
int error = (int)(uintptr_t)status;
|
|
|
|
if (error != 0 && status != PTHREAD_CANCELED) {
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "progress thread exited "
|
|
|
|
"nonzero"));
|
|
|
|
return (zfs_standard_error(zhp->zfs_hdl, error,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, strerror(err));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
send_print_verbose(fout, zhp->zfs_name, from, size,
|
|
|
|
flags->parsable);
|
|
|
|
|
|
|
|
if (flags->parsable) {
|
|
|
|
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
|
|
|
|
} else {
|
|
|
|
char buf[16];
|
|
|
|
zfs_nicenum(size, buf, sizeof (buf));
|
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
|
|
|
"total estimated size is %s\n"), buf);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static boolean_t
|
|
|
|
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < num_snaps; i++) {
|
|
|
|
if (snaps[i] == guid)
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static boolean_t
|
|
|
|
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
|
|
|
|
const uint64_t *snaps2, uint64_t num_snaps2)
|
|
|
|
{
|
|
|
|
if (num_snaps1 != num_snaps2)
|
|
|
|
return (B_FALSE);
|
|
|
|
for (int i = 0; i < num_snaps1; i++) {
|
|
|
|
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the list of redaction snapshots in the bookmark matches the send
|
|
|
|
* we're resuming, and return whether or not it's complete.
|
|
|
|
*
|
|
|
|
* Note that the caller needs to free the contents of *bookname with free() if
|
|
|
|
* this function returns successfully.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
find_redact_book(libzfs_handle_t *hdl, const char *path,
|
|
|
|
const uint64_t *redact_snap_guids, int num_redact_snaps,
|
|
|
|
char **bookname)
|
|
|
|
{
|
|
|
|
char errbuf[1024];
|
|
|
|
int error = 0;
|
|
|
|
nvlist_t *props = fnvlist_alloc();
|
|
|
|
nvlist_t *bmarks;
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot resume send"));
|
|
|
|
|
|
|
|
fnvlist_add_boolean(props, "redact_complete");
|
|
|
|
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
|
|
|
|
error = lzc_get_bookmarks(path, props, &bmarks);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(props);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (error != 0) {
|
|
|
|
if (error == ESRCH) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"nonexistent redaction bookmark provided"));
|
|
|
|
} else if (error == ENOENT) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset to be sent no longer exists"));
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"unknown error: %s"), strerror(error));
|
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
nvpair_t *pair;
|
|
|
|
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
|
|
|
|
pair = nvlist_next_nvpair(bmarks, pair)) {
|
|
|
|
|
|
|
|
nvlist_t *bmark = fnvpair_value_nvlist(pair);
|
|
|
|
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
|
|
|
|
uint_t len = 0;
|
|
|
|
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
|
|
|
|
ZPROP_VALUE, &len);
|
|
|
|
if (redact_snaps_equal(redact_snap_guids,
|
|
|
|
num_redact_snaps, bmarksnaps, len)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pair == NULL) {
|
|
|
|
fnvlist_free(bmarks);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no appropriate redaction bookmark exists"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
char *name = nvpair_name(pair);
|
|
|
|
nvlist_t *bmark = fnvpair_value_nvlist(pair);
|
|
|
|
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
|
|
|
|
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
|
|
|
|
ZPROP_VALUE);
|
|
|
|
if (!complete) {
|
|
|
|
fnvlist_free(bmarks);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incomplete redaction bookmark provided"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
*bookname = strndup(name, ZFS_MAX_DATASET_NAME_LEN);
|
|
|
|
ASSERT3P(*bookname, !=, NULL);
|
|
|
|
fnvlist_free(bmarks);
|
|
|
|
return (0);
|
|
|
|
}
|
2016-01-07 00:22:48 +03:00
|
|
|
|
2020-01-10 21:16:58 +03:00
|
|
|
static int
|
|
|
|
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
|
|
|
|
nvlist_t *resume_nvl)
|
2016-01-07 00:22:48 +03:00
|
|
|
{
|
|
|
|
char errbuf[1024];
|
|
|
|
char *toname;
|
|
|
|
char *fromname = NULL;
|
|
|
|
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
int error = 0;
|
2016-06-16 00:28:36 +03:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2016-01-07 00:22:48 +03:00
|
|
|
enum lzc_send_flags lzc_flags = 0;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
|
|
|
|
uint64_t *redact_snap_guids = NULL;
|
|
|
|
int num_redact_snaps = 0;
|
|
|
|
char *redact_book = NULL;
|
2016-01-07 00:22:48 +03:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot resume send"));
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (flags->verbosity != 0) {
|
2017-10-11 01:22:05 +03:00
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
2016-01-07 00:22:48 +03:00
|
|
|
"resume token contents:\n"));
|
2017-10-11 01:22:05 +03:00
|
|
|
nvlist_print(fout, resume_nvl);
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"resume token is corrupt"));
|
|
|
|
return (zfs_error(hdl, EZFS_FAULT, errbuf));
|
|
|
|
}
|
|
|
|
fromguid = 0;
|
|
|
|
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
|
|
|
|
|
2016-07-11 20:45:52 +03:00
|
|
|
if (flags->largeblock || nvlist_exists(resume_nvl, "largeblockok"))
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
|
2016-01-07 00:22:48 +03:00
|
|
|
if (flags->embed_data || nvlist_exists(resume_nvl, "embedok"))
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
|
2016-07-11 20:45:52 +03:00
|
|
|
if (flags->compress || nvlist_exists(resume_nvl, "compressok"))
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (flags->raw || nvlist_exists(resume_nvl, "rawok"))
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_RAW;
|
2020-01-10 21:16:58 +03:00
|
|
|
if (flags->saved || nvlist_exists(resume_nvl, "savedok"))
|
|
|
|
lzc_flags |= LZC_SEND_FLAG_SAVED;
|
2016-01-07 00:22:48 +03:00
|
|
|
|
2020-01-10 21:16:58 +03:00
|
|
|
if (flags->saved) {
|
|
|
|
(void) strcpy(name, toname);
|
|
|
|
} else {
|
|
|
|
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
|
|
|
|
if (error != 0) {
|
|
|
|
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' is no longer the same snapshot "
|
|
|
|
"used in the initial send"), toname);
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' used in the initial send no "
|
|
|
|
"longer exists"), toname);
|
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
}
|
2020-01-10 21:16:58 +03:00
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"unable to access '%s'"), name);
|
|
|
|
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
|
|
|
|
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
|
|
|
|
num_redact_snaps = -1;
|
|
|
|
}
|
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
if (fromguid != 0) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
|
|
|
|
redact_snap_guids, num_redact_snaps, name) != 0) {
|
2016-01-07 00:22:48 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incremental source %#llx no longer exists"),
|
|
|
|
(longlong_t)fromguid);
|
|
|
|
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
|
|
|
|
}
|
|
|
|
fromname = name;
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
redact_snap_guids = NULL;
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64_array(resume_nvl,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
|
|
|
|
(uint_t *)&num_redact_snaps) == 0) {
|
|
|
|
char path[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
|
|
|
|
(void) strlcpy(path, toname, sizeof (path));
|
|
|
|
char *at = strchr(path, '@');
|
|
|
|
ASSERT3P(at, !=, NULL);
|
|
|
|
|
|
|
|
*at = '\0';
|
|
|
|
|
|
|
|
if ((error = find_redact_book(hdl, path, redact_snap_guids,
|
|
|
|
num_redact_snaps, &redact_book)) != 0) {
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags->verbosity != 0) {
|
|
|
|
/*
|
|
|
|
* Some of these may have come from the resume token, set them
|
|
|
|
* here for size estimate purposes.
|
|
|
|
*/
|
|
|
|
sendflags_t tmpflags = *flags;
|
|
|
|
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
|
|
|
|
tmpflags.largeblock = B_TRUE;
|
|
|
|
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
|
|
|
|
tmpflags.compress = B_TRUE;
|
|
|
|
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
|
|
|
|
tmpflags.embed_data = B_TRUE;
|
|
|
|
error = estimate_size(zhp, fromname, outfd, &tmpflags,
|
|
|
|
resumeobj, resumeoff, bytes, redact_book, errbuf);
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!flags->dryrun) {
|
|
|
|
progress_arg_t pa = { 0 };
|
|
|
|
pthread_t tid;
|
|
|
|
/*
|
|
|
|
* If progress reporting is requested, spawn a new thread to
|
|
|
|
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
|
|
|
|
*/
|
|
|
|
if (flags->progress) {
|
|
|
|
pa.pa_zhp = zhp;
|
|
|
|
pa.pa_fd = outfd;
|
|
|
|
pa.pa_parsable = flags->parsable;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
pa.pa_estimate = B_FALSE;
|
|
|
|
pa.pa_verbosity = flags->verbosity;
|
2016-01-07 00:22:48 +03:00
|
|
|
|
|
|
|
error = pthread_create(&tid, NULL,
|
|
|
|
send_progress_thread, &pa);
|
|
|
|
if (error != 0) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (redact_book != NULL)
|
|
|
|
free(redact_book);
|
2016-01-07 00:22:48 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
|
|
|
|
lzc_flags, resumeobj, resumeoff, redact_book);
|
|
|
|
if (redact_book != NULL)
|
|
|
|
free(redact_book);
|
2016-01-07 00:22:48 +03:00
|
|
|
|
|
|
|
if (flags->progress) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
void *status = NULL;
|
2016-01-07 00:22:48 +03:00
|
|
|
(void) pthread_cancel(tid);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
(void) pthread_join(tid, &status);
|
|
|
|
int error = (int)(uintptr_t)status;
|
|
|
|
if (error != 0 && status != PTHREAD_CANCELED) {
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"progress thread exited nonzero"));
|
|
|
|
return (zfs_standard_error(hdl, error, errbuf));
|
|
|
|
}
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"warning: cannot send '%s'"), zhp->zfs_name);
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
|
|
|
return (0);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
case EACCES:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"source key must be loaded"));
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
case ESRCH:
|
|
|
|
if (lzc_exists(zhp->zfs_name)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incremental source could not be found"));
|
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
case EXDEV:
|
|
|
|
case ENOENT:
|
|
|
|
case EDQUOT:
|
|
|
|
case EFBIG:
|
|
|
|
case EIO:
|
|
|
|
case ENOLINK:
|
|
|
|
case ENOSPC:
|
|
|
|
case ENOSTR:
|
|
|
|
case ENXIO:
|
|
|
|
case EPIPE:
|
|
|
|
case ERANGE:
|
|
|
|
case EFAULT:
|
|
|
|
case EROFS:
|
|
|
|
zfs_error_aux(hdl, strerror(errno));
|
|
|
|
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
}
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
} else {
|
|
|
|
if (redact_book != NULL)
|
|
|
|
free(redact_book);
|
2016-01-07 00:22:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-01-10 21:16:58 +03:00
|
|
|
int
|
|
|
|
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
|
|
|
|
const char *resume_token)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char errbuf[1024];
|
|
|
|
nvlist_t *resume_nvl;
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot resume send"));
|
|
|
|
|
|
|
|
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
|
|
|
|
if (resume_nvl == NULL) {
|
|
|
|
/*
|
|
|
|
* zfs_error_aux has already been set by
|
|
|
|
* zfs_send_resume_token_to_nvlist()
|
|
|
|
*/
|
|
|
|
return (zfs_error(hdl, EZFS_FAULT, errbuf));
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(resume_nvl);
|
2020-01-10 21:16:58 +03:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
|
|
|
|
const char *resume_token)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
|
|
|
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
|
|
|
|
uint64_t saved_guid = 0, resume_guid = 0;
|
|
|
|
uint64_t obj = 0, off = 0, bytes = 0;
|
|
|
|
char token_buf[ZFS_MAXPROPLEN];
|
|
|
|
char errbuf[1024];
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"saved send failed"));
|
|
|
|
|
|
|
|
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
|
|
|
|
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
|
|
|
|
if (saved_nvl == NULL) {
|
|
|
|
/*
|
|
|
|
* zfs_error_aux has already been set by
|
|
|
|
* zfs_send_resume_token_to_nvlist()
|
|
|
|
*/
|
|
|
|
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a resume token is provided we use the object and offset
|
|
|
|
* from that instead of the default, which starts from the
|
|
|
|
* beginning.
|
|
|
|
*/
|
|
|
|
if (resume_token != NULL) {
|
|
|
|
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
|
|
|
|
resume_token);
|
|
|
|
if (resume_nvl == NULL) {
|
|
|
|
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
|
|
|
|
nvlist_lookup_uint64(resume_nvl, "toguid",
|
|
|
|
&resume_guid) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"provided resume token is corrupt"));
|
|
|
|
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(saved_nvl, "toguid",
|
|
|
|
&saved_guid)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset's resume token is corrupt"));
|
|
|
|
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resume_guid != saved_guid) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"provided resume token does not match dataset"));
|
|
|
|
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) nvlist_remove_all(saved_nvl, "object");
|
|
|
|
fnvlist_add_uint64(saved_nvl, "object", obj);
|
|
|
|
|
|
|
|
(void) nvlist_remove_all(saved_nvl, "offset");
|
|
|
|
fnvlist_add_uint64(saved_nvl, "offset", off);
|
|
|
|
|
|
|
|
(void) nvlist_remove_all(saved_nvl, "bytes");
|
|
|
|
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
|
|
|
|
|
|
|
|
(void) nvlist_remove_all(saved_nvl, "toname");
|
|
|
|
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
|
|
|
|
|
|
|
|
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
|
|
|
|
|
|
|
|
out:
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(saved_nvl);
|
|
|
|
fnvlist_free(resume_nvl);
|
2020-01-10 21:16:58 +03:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
* This function informs the target system that the recursive send is complete.
|
|
|
|
* The record is also expected in the case of a send -p.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
send_conclusion_record(int fd, zio_cksum_t *zc)
|
|
|
|
{
|
|
|
|
dmu_replay_record_t drr = { 0 };
|
|
|
|
drr.drr_type = DRR_END;
|
|
|
|
if (zc != NULL)
|
|
|
|
drr.drr_u.drr_end.drr_checksum = *zc;
|
|
|
|
if (write(fd, &drr, sizeof (drr)) == -1) {
|
|
|
|
return (errno);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is responsible for sending the records that contain the
|
|
|
|
* necessary information for the target system's libzfs to be able to set the
|
|
|
|
* properties of the filesystem being received, or to be able to prepare for
|
|
|
|
* a recursive receive.
|
|
|
|
*
|
|
|
|
* The "zhp" argument is the handle of the snapshot we are sending
|
|
|
|
* (the "tosnap"). The "from" argument is the short snapshot name (the part
|
|
|
|
* after the @) of the incremental source.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
|
|
|
|
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
|
2021-04-11 22:05:35 +03:00
|
|
|
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
|
|
|
|
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
nvlist_t **fssp, avl_tree_t **fsavlp)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
char *packbuf = NULL;
|
|
|
|
size_t buflen = 0;
|
|
|
|
zio_cksum_t zc = { {0} };
|
|
|
|
int featureflags = 0;
|
|
|
|
/* name of filesystem/volume that contains snapshot we are sending */
|
|
|
|
char tofs[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
/* short name of snap we are sending */
|
|
|
|
char *tosnap = "";
|
|
|
|
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"warning: cannot send '%s'"), zhp->zfs_name);
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
|
|
|
|
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
|
|
|
|
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (holds)
|
|
|
|
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
|
|
|
|
|
|
|
|
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
|
|
|
|
char *at = strchr(tofs, '@');
|
|
|
|
if (at != NULL) {
|
|
|
|
*at = '\0';
|
|
|
|
tosnap = at + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gather_props) {
|
|
|
|
nvlist_t *hdrnv = fnvlist_alloc();
|
|
|
|
nvlist_t *fss = NULL;
|
|
|
|
|
|
|
|
if (from != NULL)
|
|
|
|
fnvlist_add_string(hdrnv, "fromsnap", from);
|
|
|
|
fnvlist_add_string(hdrnv, "tosnap", tosnap);
|
|
|
|
if (!recursive)
|
|
|
|
fnvlist_add_boolean(hdrnv, "not_recursive");
|
|
|
|
|
|
|
|
if (raw) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_boolean(hdrnv, "raw");
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((err = gather_nvlist(zhp->zfs_hdl, tofs,
|
2021-04-11 22:05:35 +03:00
|
|
|
from, tosnap, recursive, raw, doall, replicate, skipmissing,
|
|
|
|
verbose, backup, holds, props, &fss, fsavlp)) != 0) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
|
|
|
|
errbuf));
|
|
|
|
}
|
2020-08-25 21:04:20 +03:00
|
|
|
/*
|
|
|
|
* Do not allow the size of the properties list to exceed
|
|
|
|
* the limit
|
|
|
|
*/
|
|
|
|
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
|
|
|
|
zhp->zfs_hdl->libzfs_max_nvlist) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
|
|
|
|
"the size of the list of snapshots and properties "
|
|
|
|
"is too large to be received successfully.\n"
|
|
|
|
"Select a smaller number of snapshots to send.\n"),
|
|
|
|
zhp->zfs_name);
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
|
|
|
|
errbuf));
|
|
|
|
}
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
fnvlist_add_nvlist(hdrnv, "fss", fss);
|
|
|
|
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
|
|
|
|
0));
|
|
|
|
if (fssp != NULL) {
|
|
|
|
*fssp = fss;
|
|
|
|
} else {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(fss);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
}
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(hdrnv);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!dryrun) {
|
|
|
|
dmu_replay_record_t drr = { 0 };
|
|
|
|
/* write first begin record */
|
|
|
|
drr.drr_type = DRR_BEGIN;
|
|
|
|
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
|
|
|
|
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
|
|
|
|
drr_versioninfo, DMU_COMPOUNDSTREAM);
|
|
|
|
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
|
|
|
|
drr_versioninfo, featureflags);
|
|
|
|
if (snprintf(drr.drr_u.drr_begin.drr_toname,
|
|
|
|
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
|
|
|
|
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
drr.drr_payloadlen = buflen;
|
|
|
|
|
|
|
|
err = dump_record(&drr, packbuf, buflen, &zc, fd);
|
|
|
|
free(packbuf);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, strerror(err));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
err = send_conclusion_record(fd, &zc);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, strerror(err));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
|
|
|
|
errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate a send stream. The "zhp" argument is the filesystem/volume
|
|
|
|
* that contains the snapshot to send. The "fromsnap" argument is the
|
|
|
|
* short name (the part after the '@') of the snapshot that is the
|
|
|
|
* incremental source to send from (if non-NULL). The "tosnap" argument
|
|
|
|
* is the short name of the snapshot to send.
|
2009-08-18 22:43:27 +04:00
|
|
|
*
|
|
|
|
* The content of the send stream is the snapshot identified by
|
|
|
|
* 'tosnap'. Incremental streams are requested in two ways:
|
|
|
|
* - from the snapshot identified by "fromsnap" (if non-null) or
|
|
|
|
* - from the origin of the dataset identified by zhp, which must
|
|
|
|
* be a clone. In this case, "fromsnap" is null and "fromorigin"
|
|
|
|
* is TRUE.
|
|
|
|
*
|
|
|
|
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
|
2010-05-29 00:45:14 +04:00
|
|
|
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
|
2009-08-18 22:43:27 +04:00
|
|
|
* if "replicate" is set. If "doall" is set, dump all the intermediate
|
2010-05-29 00:45:14 +04:00
|
|
|
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
|
|
|
|
* case too. If "props" is set, send properties.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
2011-11-17 22:14:36 +04:00
|
|
|
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
|
2010-05-29 00:45:14 +04:00
|
|
|
void *cb_arg, nvlist_t **debugnvp)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
char errbuf[1024];
|
|
|
|
send_dump_data_t sdd = { 0 };
|
2011-11-17 22:14:36 +04:00
|
|
|
int err = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
nvlist_t *fss = NULL;
|
|
|
|
avl_tree_t *fsavl = NULL;
|
2010-05-29 00:45:14 +04:00
|
|
|
static uint64_t holdseq;
|
|
|
|
int spa_version;
|
|
|
|
int featureflags = 0;
|
2015-07-11 02:54:52 +03:00
|
|
|
FILE *fout;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot send '%s'"), zhp->zfs_name);
|
|
|
|
|
|
|
|
if (fromsnap && fromsnap[0] == '\0') {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"zero-length incremental source"));
|
|
|
|
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
|
|
|
|
}
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM) {
|
|
|
|
uint64_t version;
|
|
|
|
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
|
|
|
|
if (version >= ZPL_VERSION_SA) {
|
|
|
|
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-15 23:41:38 +03:00
|
|
|
if (flags->holds)
|
|
|
|
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
|
|
|
|
|
|
|
|
if (flags->replicate || flags->doall || flags->props ||
|
|
|
|
flags->holds || flags->backup) {
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
|
|
|
|
"%s@%s", zhp->zfs_name, tosnap) >=
|
|
|
|
sizeof (full_tosnap_name)) {
|
|
|
|
err = EINVAL;
|
|
|
|
goto stderr_out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
|
|
|
|
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
|
2019-07-09 01:10:23 +03:00
|
|
|
if (tosnap == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto err_out;
|
|
|
|
}
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
err = send_prelim_records(tosnap, fromsnap, outfd,
|
|
|
|
flags->replicate || flags->props || flags->holds,
|
|
|
|
flags->replicate, flags->verbosity > 0, flags->dryrun,
|
2021-04-11 22:05:35 +03:00
|
|
|
flags->raw, flags->replicate, flags->skipmissing,
|
|
|
|
flags->backup, flags->holds, flags->props, flags->doall,
|
|
|
|
&fss, &fsavl);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
zfs_close(tosnap);
|
|
|
|
if (err != 0)
|
|
|
|
goto err_out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dump each stream */
|
|
|
|
sdd.fromsnap = fromsnap;
|
|
|
|
sdd.tosnap = tosnap;
|
2020-10-08 19:43:51 +03:00
|
|
|
sdd.outfd = outfd;
|
2011-11-17 22:14:36 +04:00
|
|
|
sdd.replicate = flags->replicate;
|
|
|
|
sdd.doall = flags->doall;
|
|
|
|
sdd.fromorigin = flags->fromorigin;
|
2008-11-20 23:01:55 +03:00
|
|
|
sdd.fss = fss;
|
|
|
|
sdd.fsavl = fsavl;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
sdd.verbosity = flags->verbosity;
|
2011-11-17 22:14:36 +04:00
|
|
|
sdd.parsable = flags->parsable;
|
2012-05-10 02:05:14 +04:00
|
|
|
sdd.progress = flags->progress;
|
2011-11-17 22:14:36 +04:00
|
|
|
sdd.dryrun = flags->dryrun;
|
2014-11-03 23:15:08 +03:00
|
|
|
sdd.large_block = flags->largeblock;
|
2014-06-06 01:19:08 +04:00
|
|
|
sdd.embed_data = flags->embed_data;
|
2016-07-11 20:45:52 +03:00
|
|
|
sdd.compress = flags->compress;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
sdd.raw = flags->raw;
|
2019-02-15 23:41:38 +03:00
|
|
|
sdd.holds = flags->holds;
|
2010-05-29 00:45:14 +04:00
|
|
|
sdd.filter_cb = filter_func;
|
|
|
|
sdd.filter_cb_arg = cb_arg;
|
|
|
|
if (debugnvp)
|
|
|
|
sdd.debugnv = *debugnvp;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (sdd.verbosity != 0 && sdd.dryrun)
|
2015-07-11 02:54:52 +03:00
|
|
|
sdd.std_out = B_TRUE;
|
|
|
|
fout = sdd.std_out ? stdout : stderr;
|
2012-07-12 16:32:45 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some flags require that we place user holds on the datasets that are
|
|
|
|
* being sent so they don't get destroyed during the send. We can skip
|
|
|
|
* this step if the pool is imported read-only since the datasets cannot
|
|
|
|
* be destroyed.
|
|
|
|
*/
|
|
|
|
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
|
|
|
|
ZPOOL_PROP_READONLY, NULL) &&
|
|
|
|
zfs_spa_version(zhp, &spa_version) == 0 &&
|
|
|
|
spa_version >= SPA_VERSION_USERREFS &&
|
|
|
|
(flags->doall || flags->replicate)) {
|
2010-08-27 01:24:34 +04:00
|
|
|
++holdseq;
|
|
|
|
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
|
|
|
|
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
|
2021-04-08 23:17:38 +03:00
|
|
|
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
|
2010-08-27 01:24:34 +04:00
|
|
|
if (sdd.cleanup_fd < 0) {
|
|
|
|
err = errno;
|
|
|
|
goto stderr_out;
|
|
|
|
}
|
2013-05-25 06:06:23 +04:00
|
|
|
sdd.snapholds = fnvlist_alloc();
|
2010-08-27 01:24:34 +04:00
|
|
|
} else {
|
|
|
|
sdd.cleanup_fd = -1;
|
2013-05-25 06:06:23 +04:00
|
|
|
sdd.snapholds = NULL;
|
2010-08-27 01:24:34 +04:00
|
|
|
}
|
2019-02-15 23:41:38 +03:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
|
2011-11-17 22:14:36 +04:00
|
|
|
/*
|
|
|
|
* Do a verbose no-op dry run to get all the verbose output
|
2013-05-25 06:06:23 +04:00
|
|
|
* or to gather snapshot hold's before generating any data,
|
|
|
|
* then do a non-verbose real run to generate the streams.
|
2011-11-17 22:14:36 +04:00
|
|
|
*/
|
|
|
|
sdd.dryrun = B_TRUE;
|
|
|
|
err = dump_filesystems(zhp, &sdd);
|
2013-05-25 06:06:23 +04:00
|
|
|
|
|
|
|
if (err != 0)
|
|
|
|
goto stderr_out;
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (flags->verbosity != 0) {
|
2013-05-25 06:06:23 +04:00
|
|
|
if (flags->parsable) {
|
2015-07-11 02:54:52 +03:00
|
|
|
(void) fprintf(fout, "size\t%llu\n",
|
2013-05-25 06:06:23 +04:00
|
|
|
(longlong_t)sdd.size);
|
|
|
|
} else {
|
|
|
|
char buf[16];
|
2017-05-02 23:43:53 +03:00
|
|
|
zfs_nicebytes(sdd.size, buf, sizeof (buf));
|
2015-07-11 02:54:52 +03:00
|
|
|
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
|
2013-05-25 06:06:23 +04:00
|
|
|
"total estimated size is %s\n"), buf);
|
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
2013-05-25 06:06:23 +04:00
|
|
|
|
|
|
|
/* Ensure no snaps found is treated as an error. */
|
|
|
|
if (!sdd.seento) {
|
|
|
|
err = ENOENT;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip the second run if dryrun was requested. */
|
|
|
|
if (flags->dryrun)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
if (sdd.snapholds != NULL) {
|
|
|
|
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
|
|
|
|
if (err != 0)
|
|
|
|
goto stderr_out;
|
|
|
|
|
|
|
|
fnvlist_free(sdd.snapholds);
|
|
|
|
sdd.snapholds = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sdd.dryrun = B_FALSE;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
sdd.verbosity = 0;
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
2013-05-25 06:06:23 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
err = dump_filesystems(zhp, &sdd);
|
|
|
|
fsavl_destroy(fsavl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(fss);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2013-05-25 06:06:23 +04:00
|
|
|
/* Ensure no snaps found is treated as an error. */
|
|
|
|
if (err == 0 && !sdd.seento)
|
|
|
|
err = ENOENT;
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
if (sdd.cleanup_fd != -1) {
|
|
|
|
VERIFY(0 == close(sdd.cleanup_fd));
|
|
|
|
sdd.cleanup_fd = -1;
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!flags->dryrun && (flags->replicate || flags->doall ||
|
2019-02-15 23:41:38 +03:00
|
|
|
flags->props || flags->backup || flags->holds)) {
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* write final end record. NB: want to do this even if
|
|
|
|
* there was some error, because it might not be totally
|
|
|
|
* failed.
|
|
|
|
*/
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
err = send_conclusion_record(outfd, NULL);
|
|
|
|
if (err != 0)
|
|
|
|
return (zfs_standard_error(zhp->zfs_hdl, err, errbuf));
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return (err || sdd.err);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
stderr_out:
|
|
|
|
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
|
|
|
|
err_out:
|
2013-05-25 06:06:23 +04:00
|
|
|
fsavl_destroy(fsavl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(fss);
|
2013-05-25 06:06:23 +04:00
|
|
|
fnvlist_free(sdd.snapholds);
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
if (sdd.cleanup_fd != -1)
|
|
|
|
VERIFY(0 == close(sdd.cleanup_fd));
|
2010-05-29 00:45:14 +04:00
|
|
|
return (err);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2020-06-15 21:30:37 +03:00
|
|
|
static zfs_handle_t *
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
|
|
|
|
{
|
|
|
|
char dirname[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
|
|
|
|
char *c = strchr(dirname, '@');
|
|
|
|
if (c != NULL)
|
|
|
|
*c = '\0';
|
|
|
|
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
|
|
|
|
* an earlier snapshot in the same filesystem, or a snapshot before later's
|
|
|
|
* origin, or it's origin's origin, etc.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
|
|
|
|
{
|
|
|
|
boolean_t ret;
|
|
|
|
uint64_t later_txg =
|
|
|
|
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
|
|
|
|
later->zfs_type == ZFS_TYPE_VOLUME ?
|
|
|
|
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
|
|
|
|
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
|
|
|
|
|
|
|
|
if (earlier_txg >= later_txg)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
|
|
|
|
earlier->zfs_name);
|
|
|
|
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
|
|
|
|
later->zfs_name);
|
|
|
|
|
|
|
|
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
|
|
|
|
zfs_close(earlier_dir);
|
|
|
|
zfs_close(later_dir);
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
char clonename[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
|
|
|
|
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
|
|
|
|
zfs_close(earlier_dir);
|
|
|
|
zfs_close(later_dir);
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
|
|
|
|
ZFS_TYPE_DATASET);
|
|
|
|
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If "earlier" is exactly the origin, then
|
|
|
|
* snapshot_is_before(earlier, origin) will return false (because
|
|
|
|
* they're the same).
|
|
|
|
*/
|
|
|
|
if (origin_txg == earlier_txg &&
|
|
|
|
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
|
|
|
|
zfs_close(earlier_dir);
|
|
|
|
zfs_close(later_dir);
|
|
|
|
zfs_close(origin);
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
zfs_close(earlier_dir);
|
|
|
|
zfs_close(later_dir);
|
|
|
|
|
|
|
|
ret = snapshot_is_before(earlier, origin);
|
|
|
|
zfs_close(origin);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The "zhp" argument is the handle of the dataset to send (typically a
|
|
|
|
* snapshot). The "from" argument is the full name of the snapshot or
|
|
|
|
* bookmark that is the incremental source.
|
|
|
|
*/
|
2013-12-12 02:33:41 +04:00
|
|
|
int
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
|
|
|
|
const char *redactbook)
|
2013-12-12 02:33:41 +04:00
|
|
|
{
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
int err;
|
2013-12-12 02:33:41 +04:00
|
|
|
libzfs_handle_t *hdl = zhp->zfs_hdl;
|
2020-03-18 22:54:12 +03:00
|
|
|
char *name = zhp->zfs_name;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
int orig_fd = fd;
|
2020-04-23 20:06:57 +03:00
|
|
|
pthread_t ptid;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
progress_arg_t pa = { 0 };
|
|
|
|
|
2013-12-12 02:33:41 +04:00
|
|
|
char errbuf[1024];
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
2020-03-18 22:54:12 +03:00
|
|
|
"warning: cannot send '%s'"), name);
|
2017-09-09 01:24:31 +03:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (from != NULL && strchr(from, '@')) {
|
|
|
|
zfs_handle_t *from_zhp = zfs_open(hdl, from,
|
|
|
|
ZFS_TYPE_DATASET);
|
2019-07-09 01:10:23 +03:00
|
|
|
if (from_zhp == NULL)
|
|
|
|
return (-1);
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (!snapshot_is_before(from_zhp, zhp)) {
|
|
|
|
zfs_close(from_zhp);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not an earlier snapshot from the same fs"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
|
|
|
|
}
|
|
|
|
zfs_close(from_zhp);
|
|
|
|
}
|
2017-09-09 01:24:31 +03:00
|
|
|
|
2020-03-18 22:54:12 +03:00
|
|
|
if (redactbook != NULL) {
|
|
|
|
char bookname[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
nvlist_t *redact_snaps;
|
|
|
|
zfs_handle_t *book_zhp;
|
|
|
|
char *at, *pound;
|
|
|
|
int dsnamelen;
|
|
|
|
|
|
|
|
pound = strchr(redactbook, '#');
|
|
|
|
if (pound != NULL)
|
|
|
|
redactbook = pound + 1;
|
|
|
|
at = strchr(name, '@');
|
|
|
|
if (at == NULL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot do a redacted send to a filesystem"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
|
|
|
|
}
|
|
|
|
dsnamelen = at - name;
|
|
|
|
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
|
|
|
|
dsnamelen, name, redactbook)
|
|
|
|
>= sizeof (bookname)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid bookmark name"));
|
|
|
|
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
|
|
|
|
}
|
|
|
|
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
|
|
|
|
if (book_zhp == NULL)
|
|
|
|
return (-1);
|
|
|
|
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
|
|
|
|
&redact_snaps) != 0 || redact_snaps == NULL) {
|
|
|
|
zfs_close(book_zhp);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not a redaction bookmark"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
|
|
|
|
}
|
|
|
|
zfs_close(book_zhp);
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
/*
|
|
|
|
* Send fs properties
|
|
|
|
*/
|
|
|
|
if (flags->props || flags->holds || flags->backup) {
|
|
|
|
/*
|
|
|
|
* Note: the header generated by send_prelim_records()
|
|
|
|
* assumes that the incremental source is in the same
|
|
|
|
* filesystem/volume as the target (which is a requirement
|
|
|
|
* when doing "zfs send -R"). But that isn't always the
|
|
|
|
* case here (e.g. send from snap in origin, or send from
|
|
|
|
* bookmark). We pass from=NULL, which will omit this
|
|
|
|
* information from the prelim records; it isn't used
|
|
|
|
* when receiving this type of stream.
|
|
|
|
*/
|
|
|
|
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
|
|
|
|
flags->verbosity > 0, flags->dryrun, flags->raw,
|
2021-04-11 22:05:35 +03:00
|
|
|
flags->replicate, B_FALSE, flags->backup, flags->holds,
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
flags->props, flags->doall, NULL, NULL);
|
|
|
|
if (err != 0)
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform size estimate if verbose was specified.
|
|
|
|
*/
|
|
|
|
if (flags->verbosity != 0) {
|
|
|
|
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
|
|
|
|
errbuf);
|
|
|
|
if (err != 0)
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags->dryrun)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If progress reporting is requested, spawn a new thread to poll
|
|
|
|
* ZFS_IOC_SEND_PROGRESS at a regular interval.
|
|
|
|
*/
|
|
|
|
if (flags->progress) {
|
|
|
|
pa.pa_zhp = zhp;
|
|
|
|
pa.pa_fd = fd;
|
|
|
|
pa.pa_parsable = flags->parsable;
|
|
|
|
pa.pa_estimate = B_FALSE;
|
|
|
|
pa.pa_verbosity = flags->verbosity;
|
|
|
|
|
|
|
|
err = pthread_create(&ptid, NULL,
|
|
|
|
send_progress_thread, &pa);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
|
|
|
return (zfs_error(zhp->zfs_hdl,
|
|
|
|
EZFS_THREADCREATEFAILED, errbuf));
|
2017-09-09 01:24:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 22:54:12 +03:00
|
|
|
err = lzc_send_redacted(name, from, fd,
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
lzc_flags_from_sendflags(flags), redactbook);
|
2017-09-09 01:24:31 +03:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (flags->progress) {
|
|
|
|
void *status = NULL;
|
|
|
|
if (err != 0)
|
|
|
|
(void) pthread_cancel(ptid);
|
|
|
|
(void) pthread_join(ptid, &status);
|
|
|
|
int error = (int)(uintptr_t)status;
|
|
|
|
if (error != 0 && status != PTHREAD_CANCELED) {
|
|
|
|
char errbuf[1024];
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN, "progress thread exited "
|
|
|
|
"nonzero"));
|
|
|
|
return (zfs_standard_error(hdl, error, errbuf));
|
|
|
|
}
|
|
|
|
}
|
2013-12-12 02:33:41 +04:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (flags->props || flags->holds || flags->backup) {
|
|
|
|
/* Write the final end record. */
|
|
|
|
err = send_conclusion_record(orig_fd, NULL);
|
|
|
|
if (err != 0)
|
|
|
|
return (zfs_standard_error(hdl, err, errbuf));
|
|
|
|
}
|
2013-12-12 02:33:41 +04:00
|
|
|
if (err != 0) {
|
|
|
|
switch (errno) {
|
|
|
|
case EXDEV:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"not an earlier snapshot from the same fs"));
|
|
|
|
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
|
|
|
|
|
|
|
|
case ENOENT:
|
|
|
|
case ESRCH:
|
2020-03-18 22:54:12 +03:00
|
|
|
if (lzc_exists(name)) {
|
2013-12-12 02:33:41 +04:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incremental source (%s) does not exist"),
|
|
|
|
from);
|
|
|
|
}
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
case EACCES:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset key must be loaded"));
|
|
|
|
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
|
|
|
|
|
2013-12-12 02:33:41 +04:00
|
|
|
case EBUSY:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"target is busy; if a filesystem, "
|
|
|
|
"it must not be mounted"));
|
|
|
|
return (zfs_error(hdl, EZFS_BUSY, errbuf));
|
|
|
|
|
|
|
|
case EDQUOT:
|
2020-03-18 22:54:12 +03:00
|
|
|
case EFAULT:
|
2013-12-12 02:33:41 +04:00
|
|
|
case EFBIG:
|
2020-03-18 22:54:12 +03:00
|
|
|
case EINVAL:
|
2013-12-12 02:33:41 +04:00
|
|
|
case EIO:
|
|
|
|
case ENOLINK:
|
|
|
|
case ENOSPC:
|
|
|
|
case ENOSTR:
|
|
|
|
case ENXIO:
|
|
|
|
case EPIPE:
|
|
|
|
case ERANGE:
|
|
|
|
case EROFS:
|
|
|
|
zfs_error_aux(hdl, strerror(errno));
|
|
|
|
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (zfs_standard_error(hdl, errno, errbuf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (err != 0);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Routines specific to "zfs recv"
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
|
|
|
|
boolean_t byteswap, zio_cksum_t *zc)
|
|
|
|
{
|
|
|
|
char *cp = buf;
|
|
|
|
int rv;
|
|
|
|
int len = ilen;
|
|
|
|
|
|
|
|
do {
|
|
|
|
rv = read(fd, cp, len);
|
|
|
|
cp += rv;
|
|
|
|
len -= rv;
|
|
|
|
} while (rv > 0);
|
|
|
|
|
|
|
|
if (rv < 0 || len != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"failed to read from stream"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive")));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zc) {
|
|
|
|
if (byteswap)
|
|
|
|
fletcher_4_incremental_byteswap(buf, ilen, zc);
|
|
|
|
else
|
|
|
|
fletcher_4_incremental_native(buf, ilen, zc);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
|
|
|
|
boolean_t byteswap, zio_cksum_t *zc)
|
|
|
|
{
|
|
|
|
char *buf;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
buf = zfs_alloc(hdl, len);
|
|
|
|
if (buf == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
2020-08-25 21:04:20 +03:00
|
|
|
if (len > hdl->libzfs_max_nvlist) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
|
2020-09-18 20:23:29 +03:00
|
|
|
free(buf);
|
2020-08-25 21:04:20 +03:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
err = recv_read(hdl, fd, buf, len, byteswap, zc);
|
|
|
|
if (err != 0) {
|
|
|
|
free(buf);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nvlist_unpack(buf, len, nvp, 0);
|
|
|
|
free(buf);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
|
|
|
|
"stream (malformed nvlist)"));
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
/*
|
|
|
|
* Returns the grand origin (origin of origin of origin...) of a given handle.
|
|
|
|
* If this dataset is not a clone, it simply returns a copy of the original
|
|
|
|
* handle.
|
|
|
|
*/
|
|
|
|
static zfs_handle_t *
|
|
|
|
recv_open_grand_origin(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
char origin[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
zprop_source_t src;
|
|
|
|
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
|
|
|
|
|
|
|
|
while (ozhp != NULL) {
|
|
|
|
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
|
|
|
|
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
(void) zfs_close(ozhp);
|
|
|
|
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ozhp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-06-28 00:37:54 +03:00
|
|
|
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
zfs_handle_t *ozhp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to rename the dataset. If it fails with EACCES we have
|
|
|
|
* attempted to rename the dataset outside of its encryption root.
|
|
|
|
* Force the dataset to become an encryption root and try again.
|
|
|
|
*/
|
2018-06-28 00:37:54 +03:00
|
|
|
err = lzc_rename(name, newname);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (err == EACCES) {
|
|
|
|
ozhp = recv_open_grand_origin(zhp);
|
|
|
|
if (ozhp == NULL) {
|
|
|
|
err = ENOENT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
|
|
|
|
NULL, NULL, 0);
|
|
|
|
if (err != 0)
|
|
|
|
goto out;
|
|
|
|
|
2018-06-28 00:37:54 +03:00
|
|
|
err = lzc_rename(name, newname);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (ozhp != NULL)
|
|
|
|
zfs_close(ozhp);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
|
2011-11-17 22:14:36 +04:00
|
|
|
int baselen, char *newname, recvflags_t *flags)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
static int seq;
|
|
|
|
int err;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
prop_changelist_t *clp = NULL;
|
|
|
|
zfs_handle_t *zhp = NULL;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (zhp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-12-03 23:09:06 +03:00
|
|
|
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->force ? MS_FORCE : 0);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (clp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
err = changelist_prefix(clp);
|
|
|
|
if (err)
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (tryname) {
|
|
|
|
(void) strcpy(newname, tryname);
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("attempting rename %s to %s\n",
|
2018-06-28 00:37:54 +03:00
|
|
|
name, newname);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2018-06-28 00:37:54 +03:00
|
|
|
err = recv_rename_impl(zhp, name, newname);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (err == 0)
|
|
|
|
changelist_rename(clp, name, tryname);
|
|
|
|
} else {
|
|
|
|
err = ENOENT;
|
|
|
|
}
|
|
|
|
|
2013-09-04 16:00:57 +04:00
|
|
|
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
seq++;
|
|
|
|
|
2016-06-16 00:28:36 +03:00
|
|
|
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
|
|
|
|
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("failed - trying rename %s to %s\n",
|
2018-06-28 00:37:54 +03:00
|
|
|
name, newname);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2018-06-28 00:37:54 +03:00
|
|
|
err = recv_rename_impl(zhp, name, newname);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (err == 0)
|
|
|
|
changelist_rename(clp, name, newname);
|
2011-11-17 22:14:36 +04:00
|
|
|
if (err && flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("failed (%u) - "
|
|
|
|
"will try again on next pass\n", errno);
|
|
|
|
}
|
|
|
|
err = EAGAIN;
|
2011-11-17 22:14:36 +04:00
|
|
|
} else if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
if (err == 0)
|
|
|
|
(void) printf("success\n");
|
|
|
|
else
|
|
|
|
(void) printf("failed (%u)\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) changelist_postfix(clp);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (clp != NULL)
|
|
|
|
changelist_free(clp);
|
|
|
|
if (zhp != NULL)
|
|
|
|
zfs_close(zhp);
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
recv_promote(libzfs_handle_t *hdl, const char *fsname,
|
|
|
|
const char *origin_fsname, recvflags_t *flags)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
zfs_cmd_t zc = {"\0"};
|
|
|
|
zfs_handle_t *zhp = NULL, *ozhp = NULL;
|
|
|
|
|
|
|
|
if (flags->verbose)
|
|
|
|
(void) printf("promoting %s\n", fsname);
|
|
|
|
|
|
|
|
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
|
|
|
|
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to promote the dataset. If it fails with EACCES the
|
|
|
|
* promotion would cause this dataset to leave its encryption root.
|
|
|
|
* Force the origin to become an encryption root and try again.
|
|
|
|
*/
|
|
|
|
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
|
|
|
|
if (err == EACCES) {
|
|
|
|
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ozhp = recv_open_grand_origin(zhp);
|
|
|
|
if (ozhp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
|
|
|
|
NULL, NULL, 0);
|
|
|
|
if (err != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (zhp != NULL)
|
|
|
|
zfs_close(zhp);
|
|
|
|
if (ozhp != NULL)
|
|
|
|
zfs_close(ozhp);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
|
2011-11-17 22:14:36 +04:00
|
|
|
char *newname, recvflags_t *flags)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
prop_changelist_t *clp;
|
|
|
|
zfs_handle_t *zhp;
|
2009-08-18 22:43:27 +04:00
|
|
|
boolean_t defer = B_FALSE;
|
|
|
|
int spa_version;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL)
|
|
|
|
return (-1);
|
2008-12-03 23:09:06 +03:00
|
|
|
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->force ? MS_FORCE : 0);
|
2009-08-18 22:43:27 +04:00
|
|
|
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
|
|
|
|
zfs_spa_version(zhp, &spa_version) == 0 &&
|
|
|
|
spa_version >= SPA_VERSION_USERREFS)
|
|
|
|
defer = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
if (clp == NULL)
|
|
|
|
return (-1);
|
|
|
|
err = changelist_prefix(clp);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose)
|
2018-06-28 00:37:54 +03:00
|
|
|
(void) printf("attempting destroy %s\n", name);
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
|
|
|
|
nvlist_t *nv = fnvlist_alloc();
|
|
|
|
fnvlist_add_boolean(nv, name);
|
|
|
|
err = lzc_destroy_snaps(nv, defer, NULL);
|
|
|
|
fnvlist_free(nv);
|
|
|
|
} else {
|
|
|
|
err = lzc_destroy(name);
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
if (err == 0) {
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose)
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("success\n");
|
2018-06-28 00:37:54 +03:00
|
|
|
changelist_remove(clp, name);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
(void) changelist_postfix(clp);
|
|
|
|
changelist_free(clp);
|
|
|
|
|
2009-08-18 22:43:27 +04:00
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Deferred destroy might destroy the snapshot or only mark it to be
|
|
|
|
* destroyed later, and it returns success in either case.
|
2009-08-18 22:43:27 +04:00
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
|
|
|
|
ZFS_TYPE_SNAPSHOT))) {
|
2008-11-20 23:01:55 +03:00
|
|
|
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct guid_to_name_data {
|
|
|
|
uint64_t guid;
|
2016-01-07 00:22:48 +03:00
|
|
|
boolean_t bookmark_ok;
|
2008-11-20 23:01:55 +03:00
|
|
|
char *name;
|
2011-11-17 22:14:36 +04:00
|
|
|
char *skip;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
uint64_t *redact_snap_guids;
|
|
|
|
uint64_t num_redact_snaps;
|
2008-11-20 23:01:55 +03:00
|
|
|
} guid_to_name_data_t;
|
|
|
|
|
2020-06-15 21:30:37 +03:00
|
|
|
static boolean_t
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
|
|
|
|
{
|
|
|
|
uint64_t *bmark_snaps;
|
|
|
|
uint_t bmark_num_snaps;
|
|
|
|
nvlist_t *nvl;
|
|
|
|
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
|
|
|
|
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
|
|
|
|
&bmark_num_snaps);
|
|
|
|
if (bmark_num_snaps != gtnd->num_redact_snaps)
|
|
|
|
return (B_FALSE);
|
|
|
|
int i = 0;
|
|
|
|
for (; i < bmark_num_snaps; i++) {
|
|
|
|
int j = 0;
|
|
|
|
for (; j < bmark_num_snaps; j++) {
|
|
|
|
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j == bmark_num_snaps)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (i == bmark_num_snaps);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
|
|
|
|
{
|
|
|
|
guid_to_name_data_t *gtnd = arg;
|
2016-01-07 00:22:48 +03:00
|
|
|
const char *slash;
|
2008-11-20 23:01:55 +03:00
|
|
|
int err;
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (gtnd->skip != NULL &&
|
2016-01-07 00:22:48 +03:00
|
|
|
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
|
|
|
|
strcmp(slash + 1, gtnd->skip) == 0) {
|
|
|
|
zfs_close(zhp);
|
2011-11-17 22:14:36 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
|
|
|
|
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) strcpy(gtnd->name, zhp->zfs_name);
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_close(zhp);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
|
2016-01-07 00:22:48 +03:00
|
|
|
if (err != EEXIST && gtnd->bookmark_ok)
|
|
|
|
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, gtnd);
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
/*
|
|
|
|
* Attempt to find the local dataset associated with this guid. In the case of
|
|
|
|
* multiple matches, we attempt to find the "best" match by searching
|
|
|
|
* progressively larger portions of the hierarchy. This allows one to send a
|
|
|
|
* tree of datasets individually and guarantee that we will find the source
|
|
|
|
* guid within that hierarchy, even if there are multiple matches elsewhere.
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
*
|
|
|
|
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
|
|
|
|
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
|
|
|
|
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
|
|
|
|
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
|
|
|
|
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
|
|
|
|
* given guid. Note that a redaction bookmark can be returned if
|
|
|
|
* num_redact_snaps == -1.
|
2011-11-17 22:14:36 +04:00
|
|
|
*/
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
|
|
|
|
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
|
|
|
|
uint64_t num_redact_snaps, char *name)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2016-06-16 00:28:36 +03:00
|
|
|
char pname[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
guid_to_name_data_t gtnd;
|
|
|
|
|
|
|
|
gtnd.guid = guid;
|
2016-01-07 00:22:48 +03:00
|
|
|
gtnd.bookmark_ok = bookmark_ok;
|
2008-11-20 23:01:55 +03:00
|
|
|
gtnd.name = name;
|
2011-11-17 22:14:36 +04:00
|
|
|
gtnd.skip = NULL;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
gtnd.redact_snap_guids = redact_snap_guids;
|
|
|
|
gtnd.num_redact_snaps = num_redact_snaps;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
/*
|
2016-01-07 00:22:48 +03:00
|
|
|
* Search progressively larger portions of the hierarchy, starting
|
|
|
|
* with the filesystem specified by 'parent'. This will
|
2011-11-17 22:14:36 +04:00
|
|
|
* select the "most local" version of the origin snapshot in the case
|
|
|
|
* that there are multiple matching snapshots in the system.
|
|
|
|
*/
|
2016-01-07 00:22:48 +03:00
|
|
|
(void) strlcpy(pname, parent, sizeof (pname));
|
|
|
|
char *cp = strrchr(pname, '@');
|
|
|
|
if (cp == NULL)
|
|
|
|
cp = strchr(pname, '\0');
|
|
|
|
for (; cp != NULL; cp = strrchr(pname, '/')) {
|
2011-11-17 22:14:36 +04:00
|
|
|
/* Chop off the last component and open the parent */
|
2008-11-20 23:01:55 +03:00
|
|
|
*cp = '\0';
|
2016-01-07 00:22:48 +03:00
|
|
|
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
|
2011-11-17 22:14:36 +04:00
|
|
|
|
|
|
|
if (zhp == NULL)
|
|
|
|
continue;
|
2016-01-07 00:22:48 +03:00
|
|
|
int err = guid_to_name_cb(zfs_handle_dup(zhp), >nd);
|
|
|
|
if (err != EEXIST)
|
|
|
|
err = zfs_iter_children(zhp, guid_to_name_cb, >nd);
|
|
|
|
if (err != EEXIST && bookmark_ok)
|
|
|
|
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, >nd);
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
2011-11-17 22:14:36 +04:00
|
|
|
if (err == EEXIST)
|
|
|
|
return (0);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
/*
|
2016-01-07 00:22:48 +03:00
|
|
|
* Remember the last portion of the dataset so we skip it next
|
|
|
|
* time through (as we've already searched that portion of the
|
|
|
|
* hierarchy).
|
2011-11-17 22:14:36 +04:00
|
|
|
*/
|
2016-01-07 00:22:48 +03:00
|
|
|
gtnd.skip = strrchr(pname, '/') + 1;
|
2011-11-17 22:14:36 +04:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
return (ENOENT);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
static int
|
|
|
|
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
|
|
|
|
boolean_t bookmark_ok, char *name)
|
|
|
|
{
|
|
|
|
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
|
|
|
|
-1, name));
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2011-11-17 22:14:36 +04:00
|
|
|
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
|
|
|
|
* guid1 is after guid2.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
|
|
|
|
uint64_t guid1, uint64_t guid2)
|
|
|
|
{
|
|
|
|
nvlist_t *nvfs;
|
2015-11-09 23:10:02 +03:00
|
|
|
char *fsname = NULL, *snapname = NULL;
|
2016-06-16 00:28:36 +03:00
|
|
|
char buf[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
int rv;
|
2011-11-17 22:14:36 +04:00
|
|
|
zfs_handle_t *guid1hdl, *guid2hdl;
|
|
|
|
uint64_t create1, create2;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (guid2 == 0)
|
|
|
|
return (0);
|
|
|
|
if (guid1 == 0)
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
nvfs = fsavl_find(avl, guid1, &snapname);
|
2021-01-14 20:53:09 +03:00
|
|
|
fsname = fnvlist_lookup_string(nvfs, "name");
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
|
2011-11-17 22:14:36 +04:00
|
|
|
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (guid1hdl == NULL)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
nvfs = fsavl_find(avl, guid2, &snapname);
|
2021-01-14 20:53:09 +03:00
|
|
|
fsname = fnvlist_lookup_string(nvfs, "name");
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
|
2011-11-17 22:14:36 +04:00
|
|
|
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
|
|
|
|
if (guid2hdl == NULL) {
|
|
|
|
zfs_close(guid1hdl);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
|
|
|
|
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (create1 < create2)
|
|
|
|
rv = -1;
|
|
|
|
else if (create1 > create2)
|
|
|
|
rv = +1;
|
|
|
|
else
|
|
|
|
rv = 0;
|
|
|
|
|
|
|
|
zfs_close(guid1hdl);
|
|
|
|
zfs_close(guid2hdl);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
/*
|
2019-04-15 05:06:34 +03:00
|
|
|
* This function reestablishes the hierarchy of encryption roots after a
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
* recursive incremental receive has completed. This must be done after the
|
|
|
|
* second call to recv_incremental_replication() has renamed and promoted all
|
2019-04-15 05:06:34 +03:00
|
|
|
* sent datasets to their final locations in the dataset hierarchy.
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
*/
|
|
|
|
static int
|
2019-09-26 03:02:33 +03:00
|
|
|
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
nvlist_t *stream_nv, avl_tree_t *stream_avl)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
nvpair_t *fselem = NULL;
|
|
|
|
nvlist_t *stream_fss;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
|
|
|
while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
|
|
|
|
zfs_handle_t *zhp = NULL;
|
|
|
|
uint64_t crypt;
|
|
|
|
nvlist_t *snaps, *props, *stream_nvfs = NULL;
|
|
|
|
nvpair_t *snapel = NULL;
|
|
|
|
boolean_t is_encroot, is_clone, stream_encroot;
|
|
|
|
char *cp;
|
|
|
|
char *stream_keylocation = NULL;
|
|
|
|
char keylocation[MAXNAMELEN];
|
|
|
|
char fsname[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
|
|
|
|
keylocation[0] = '\0';
|
2021-01-14 20:53:09 +03:00
|
|
|
stream_nvfs = fnvpair_value_nvlist(fselem);
|
|
|
|
snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
|
|
|
|
props = fnvlist_lookup_nvlist(stream_nvfs, "props");
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
|
|
|
|
|
|
|
|
/* find a snapshot from the stream that exists locally */
|
|
|
|
err = ENOENT;
|
|
|
|
while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
|
|
|
|
uint64_t guid;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
guid = fnvpair_value_uint64(snapel);
|
2019-09-26 03:02:33 +03:00
|
|
|
err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
fsname);
|
|
|
|
if (err == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cp = strchr(fsname, '@');
|
|
|
|
if (cp != NULL)
|
|
|
|
*cp = '\0';
|
|
|
|
|
|
|
|
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
err = ENOENT;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
|
|
|
|
is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
|
|
|
|
(void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
|
|
|
|
|
2019-06-20 22:29:51 +03:00
|
|
|
/* we don't need to do anything for unencrypted datasets */
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (crypt == ZIO_CRYPT_OFF) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the dataset is flagged as an encryption root, was not
|
|
|
|
* received as a clone and is not currently an encryption root,
|
|
|
|
* force it to become one. Fixup the keylocation if necessary.
|
|
|
|
*/
|
|
|
|
if (stream_encroot) {
|
|
|
|
if (!is_clone && !is_encroot) {
|
|
|
|
err = lzc_change_key(fsname,
|
|
|
|
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
stream_keylocation = fnvlist_lookup_string(props,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Refresh the properties in case the call to
|
|
|
|
* lzc_change_key() changed the value.
|
|
|
|
*/
|
|
|
|
zfs_refresh_properties(zhp);
|
|
|
|
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
|
|
|
|
keylocation, sizeof (keylocation), NULL, NULL,
|
|
|
|
0, B_TRUE);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(keylocation, stream_keylocation) != 0) {
|
|
|
|
err = zfs_prop_set(zhp,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
|
|
|
|
stream_keylocation);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the dataset is not flagged as an encryption root and is
|
|
|
|
* currently an encryption root, force it to inherit from its
|
2017-09-12 23:15:11 +03:00
|
|
|
* parent. The root of a raw send should never be
|
|
|
|
* force-inherited.
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
*/
|
2017-09-12 23:15:11 +03:00
|
|
|
if (!stream_encroot && is_encroot &&
|
|
|
|
strcmp(top_zfs, fsname) != 0) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
|
|
|
|
NULL, NULL, 0);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
error:
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
|
2011-11-17 22:14:36 +04:00
|
|
|
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
|
2010-05-29 00:45:14 +04:00
|
|
|
nvlist_t *renamed)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2012-12-14 02:03:07 +04:00
|
|
|
nvlist_t *local_nv, *deleted = NULL;
|
2008-11-20 23:01:55 +03:00
|
|
|
avl_tree_t *local_avl;
|
|
|
|
nvpair_t *fselem, *nextfselem;
|
2010-05-29 00:45:14 +04:00
|
|
|
char *fromsnap;
|
2016-06-16 00:28:36 +03:00
|
|
|
char newname[ZFS_MAX_DATASET_NAME_LEN];
|
2012-12-14 02:03:07 +04:00
|
|
|
char guidname[32];
|
2008-11-20 23:01:55 +03:00
|
|
|
int error;
|
2010-05-29 00:45:14 +04:00
|
|
|
boolean_t needagain, progress, recursive;
|
|
|
|
char *s1, *s2;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
|
|
|
|
ENOENT);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->dryrun)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
again:
|
|
|
|
needagain = progress = B_FALSE;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
deleted = fnvlist_alloc();
|
2012-12-14 02:03:07 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
|
2021-04-11 22:05:35 +03:00
|
|
|
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
|
2019-03-29 01:48:58 +03:00
|
|
|
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process deletes and renames
|
|
|
|
*/
|
|
|
|
for (fselem = nvlist_next_nvpair(local_nv, NULL);
|
|
|
|
fselem; fselem = nextfselem) {
|
|
|
|
nvlist_t *nvfs, *snaps;
|
|
|
|
nvlist_t *stream_nvfs = NULL;
|
|
|
|
nvpair_t *snapelem, *nextsnapelem;
|
|
|
|
uint64_t fromguid = 0;
|
|
|
|
uint64_t originguid = 0;
|
|
|
|
uint64_t stream_originguid = 0;
|
|
|
|
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
|
|
|
|
char *fsname, *stream_fsname;
|
|
|
|
|
|
|
|
nextfselem = nvlist_next_nvpair(local_nv, fselem);
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
nvfs = fnvpair_value_nvlist(fselem);
|
|
|
|
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
|
|
|
|
fsname = fnvlist_lookup_string(nvfs, "name");
|
|
|
|
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
|
|
|
|
"parentfromsnap");
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First find the stream's fs, so we can check for
|
|
|
|
* a different origin (due to "zfs promote")
|
|
|
|
*/
|
|
|
|
for (snapelem = nvlist_next_nvpair(snaps, NULL);
|
|
|
|
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
|
|
|
|
uint64_t thisguid;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
thisguid = fnvpair_value_uint64(snapelem);
|
2008-11-20 23:01:55 +03:00
|
|
|
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
|
|
|
|
|
|
|
|
if (stream_nvfs != NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for promote */
|
|
|
|
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
|
|
|
|
&stream_originguid);
|
|
|
|
if (stream_nvfs && originguid != stream_originguid) {
|
|
|
|
switch (created_before(hdl, local_avl,
|
|
|
|
stream_originguid, originguid)) {
|
|
|
|
case 1: {
|
|
|
|
/* promote it! */
|
|
|
|
nvlist_t *origin_nvfs;
|
|
|
|
char *origin_fsname;
|
|
|
|
|
|
|
|
origin_nvfs = fsavl_find(local_avl, originguid,
|
|
|
|
NULL);
|
2021-01-14 20:53:09 +03:00
|
|
|
origin_fsname = fnvlist_lookup_string(
|
|
|
|
origin_nvfs, "name");
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
error = recv_promote(hdl, fsname, origin_fsname,
|
|
|
|
flags);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (error == 0)
|
|
|
|
progress = B_TRUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case -1:
|
|
|
|
fsavl_destroy(local_avl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(local_nv);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We had/have the wrong origin, therefore our
|
|
|
|
* list of snapshots is wrong. Need to handle
|
|
|
|
* them on the next pass.
|
|
|
|
*/
|
|
|
|
needagain = B_TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (snapelem = nvlist_next_nvpair(snaps, NULL);
|
|
|
|
snapelem; snapelem = nextsnapelem) {
|
|
|
|
uint64_t thisguid;
|
|
|
|
char *stream_snapname;
|
2008-12-03 23:09:06 +03:00
|
|
|
nvlist_t *found, *props;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
thisguid = fnvpair_value_uint64(snapelem);
|
2008-11-20 23:01:55 +03:00
|
|
|
found = fsavl_find(stream_avl, thisguid,
|
|
|
|
&stream_snapname);
|
|
|
|
|
|
|
|
/* check for delete */
|
|
|
|
if (found == NULL) {
|
2016-06-16 00:28:36 +03:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!flags->force)
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
(void) snprintf(name, sizeof (name), "%s@%s",
|
|
|
|
fsname, nvpair_name(snapelem));
|
|
|
|
|
|
|
|
error = recv_destroy(hdl, name,
|
|
|
|
strlen(fsname)+1, newname, flags);
|
|
|
|
if (error)
|
|
|
|
needagain = B_TRUE;
|
|
|
|
else
|
|
|
|
progress = B_TRUE;
|
2015-05-11 22:05:05 +03:00
|
|
|
sprintf(guidname, "%llu",
|
|
|
|
(u_longlong_t)thisguid);
|
2012-12-14 02:03:07 +04:00
|
|
|
nvlist_add_boolean(deleted, guidname);
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
stream_nvfs = found;
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
|
|
|
|
&props) && 0 == nvlist_lookup_nvlist(props,
|
|
|
|
stream_snapname, &props)) {
|
2013-09-04 16:00:57 +04:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
zc.zc_cookie = B_TRUE; /* received */
|
2008-12-03 23:09:06 +03:00
|
|
|
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
|
|
|
|
"%s@%s", fsname, nvpair_name(snapelem));
|
|
|
|
if (zcmd_write_src_nvlist(hdl, &zc,
|
|
|
|
props) == 0) {
|
|
|
|
(void) zfs_ioctl(hdl,
|
|
|
|
ZFS_IOC_SET_PROP, &zc);
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/* check for different snapname */
|
|
|
|
if (strcmp(nvpair_name(snapelem),
|
|
|
|
stream_snapname) != 0) {
|
2016-06-16 00:28:36 +03:00
|
|
|
char name[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
char tryname[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) snprintf(name, sizeof (name), "%s@%s",
|
|
|
|
fsname, nvpair_name(snapelem));
|
|
|
|
(void) snprintf(tryname, sizeof (name), "%s@%s",
|
|
|
|
fsname, stream_snapname);
|
|
|
|
|
|
|
|
error = recv_rename(hdl, name, tryname,
|
|
|
|
strlen(fsname)+1, newname, flags);
|
|
|
|
if (error)
|
|
|
|
needagain = B_TRUE;
|
|
|
|
else
|
|
|
|
progress = B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(stream_snapname, fromsnap) == 0)
|
|
|
|
fromguid = thisguid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for delete */
|
|
|
|
if (stream_nvfs == NULL) {
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!flags->force)
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
|
|
|
|
newname, flags);
|
|
|
|
if (error)
|
|
|
|
needagain = B_TRUE;
|
|
|
|
else
|
|
|
|
progress = B_TRUE;
|
2015-05-11 22:05:05 +03:00
|
|
|
sprintf(guidname, "%llu",
|
2016-12-12 21:46:26 +03:00
|
|
|
(u_longlong_t)parent_fromsnap_guid);
|
2012-12-14 02:03:07 +04:00
|
|
|
nvlist_add_boolean(deleted, guidname);
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (fromguid == 0) {
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) printf("local fs %s does not have "
|
|
|
|
"fromsnap (%s in stream); must have "
|
|
|
|
"been deleted locally; ignoring\n",
|
|
|
|
fsname, fromsnap);
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
|
|
|
|
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
|
|
|
|
stream_nvfs, "parentfromsnap");
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
s1 = strrchr(fsname, '/');
|
|
|
|
s2 = strrchr(stream_fsname, '/');
|
|
|
|
|
2012-12-14 02:03:07 +04:00
|
|
|
/*
|
|
|
|
* Check if we're going to rename based on parent guid change
|
|
|
|
* and the current parent guid was also deleted. If it was then
|
|
|
|
* rename will fail and is likely unneeded, so avoid this and
|
|
|
|
* force an early retry to determine the new
|
|
|
|
* parent_fromsnap_guid.
|
|
|
|
*/
|
|
|
|
if (stream_parent_fromsnap_guid != 0 &&
|
|
|
|
parent_fromsnap_guid != 0 &&
|
|
|
|
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
|
2015-05-11 22:05:05 +03:00
|
|
|
sprintf(guidname, "%llu",
|
2016-12-12 21:46:26 +03:00
|
|
|
(u_longlong_t)parent_fromsnap_guid);
|
2012-12-14 02:03:07 +04:00
|
|
|
if (nvlist_exists(deleted, guidname)) {
|
|
|
|
progress = B_TRUE;
|
|
|
|
needagain = B_TRUE;
|
|
|
|
goto doagain;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* Check for rename. If the exact receive path is specified, it
|
|
|
|
* does not count as a rename, but we still need to check the
|
|
|
|
* datasets beneath it.
|
|
|
|
*/
|
2008-11-20 23:01:55 +03:00
|
|
|
if ((stream_parent_fromsnap_guid != 0 &&
|
2010-05-29 00:45:14 +04:00
|
|
|
parent_fromsnap_guid != 0 &&
|
2008-11-20 23:01:55 +03:00
|
|
|
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
|
2011-11-17 22:14:36 +04:00
|
|
|
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
|
2010-05-29 00:45:14 +04:00
|
|
|
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
|
2008-11-20 23:01:55 +03:00
|
|
|
nvlist_t *parent;
|
2016-06-16 00:28:36 +03:00
|
|
|
char tryname[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
parent = fsavl_find(local_avl,
|
|
|
|
stream_parent_fromsnap_guid, NULL);
|
|
|
|
/*
|
|
|
|
* NB: parent might not be found if we used the
|
|
|
|
* tosnap for stream_parent_fromsnap_guid,
|
|
|
|
* because the parent is a newly-created fs;
|
|
|
|
* we'll be able to rename it after we recv the
|
|
|
|
* new fs.
|
|
|
|
*/
|
|
|
|
if (parent != NULL) {
|
|
|
|
char *pname;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
pname = fnvlist_lookup_string(parent, "name");
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) snprintf(tryname, sizeof (tryname),
|
|
|
|
"%s%s", pname, strrchr(stream_fsname, '/'));
|
|
|
|
} else {
|
|
|
|
tryname[0] = '\0';
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("local fs %s new parent "
|
|
|
|
"not found\n", fsname);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
newname[0] = '\0';
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
error = recv_rename(hdl, fsname, tryname,
|
|
|
|
strlen(tofs)+1, newname, flags);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (renamed != NULL && newname[0] != '\0') {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_boolean(renamed, newname);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
if (error)
|
|
|
|
needagain = B_TRUE;
|
|
|
|
else
|
|
|
|
progress = B_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-14 02:03:07 +04:00
|
|
|
doagain:
|
2008-11-20 23:01:55 +03:00
|
|
|
fsavl_destroy(local_avl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(local_nv);
|
|
|
|
fnvlist_free(deleted);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (needagain && progress) {
|
|
|
|
/* do another pass to fix up temporary names */
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose)
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("another pass:\n");
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
return (needagain || error != 0);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
|
2011-11-17 22:14:36 +04:00
|
|
|
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
|
2020-04-23 20:06:57 +03:00
|
|
|
char **top_zfs, nvlist_t *cmdprops)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
nvlist_t *stream_nv = NULL;
|
|
|
|
avl_tree_t *stream_avl = NULL;
|
|
|
|
char *fromsnap = NULL;
|
2016-06-09 22:24:29 +03:00
|
|
|
char *sendsnap = NULL;
|
2010-05-29 00:45:14 +04:00
|
|
|
char *cp;
|
2016-06-16 00:28:36 +03:00
|
|
|
char tofs[ZFS_MAX_DATASET_NAME_LEN];
|
|
|
|
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
|
2008-11-20 23:01:55 +03:00
|
|
|
char errbuf[1024];
|
|
|
|
dmu_replay_record_t drre;
|
|
|
|
int error;
|
|
|
|
boolean_t anyerr = B_FALSE;
|
|
|
|
boolean_t softerr = B_FALSE;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
boolean_t recursive, raw;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive"));
|
|
|
|
|
|
|
|
assert(drr->drr_type == DRR_BEGIN);
|
|
|
|
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
|
2010-05-29 00:45:14 +04:00
|
|
|
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
|
|
|
|
DMU_COMPOUNDSTREAM);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read in the nvlist from the stream.
|
|
|
|
*/
|
|
|
|
if (drr->drr_payloadlen != 0) {
|
|
|
|
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
|
2011-11-17 22:14:36 +04:00
|
|
|
&stream_nv, flags->byteswap, zc);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (error) {
|
|
|
|
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
|
|
|
|
ENOENT);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (recursive && strchr(destname, '@')) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot specify snapshot name for multi-snapshot stream"));
|
|
|
|
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Read in the end record and verify checksum.
|
|
|
|
*/
|
|
|
|
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->byteswap, NULL)))
|
2008-11-20 23:01:55 +03:00
|
|
|
goto out;
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->byteswap) {
|
2008-11-20 23:01:55 +03:00
|
|
|
drre.drr_type = BSWAP_32(drre.drr_type);
|
|
|
|
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
|
|
|
|
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
|
|
|
|
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
|
|
|
|
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
|
|
|
|
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
|
|
|
|
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
|
|
|
|
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
|
|
|
|
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
|
|
|
|
}
|
|
|
|
if (drre.drr_type != DRR_END) {
|
|
|
|
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incorrect header checksum"));
|
|
|
|
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
|
|
|
|
|
|
|
|
if (drr->drr_payloadlen != 0) {
|
|
|
|
nvlist_t *stream_fss;
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
|
2008-11-20 23:01:55 +03:00
|
|
|
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"couldn't allocate avl tree"));
|
|
|
|
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-04-07 23:54:29 +03:00
|
|
|
if (fromsnap != NULL && recursive) {
|
2010-05-29 00:45:14 +04:00
|
|
|
nvlist_t *renamed = NULL;
|
|
|
|
nvpair_t *pair = NULL;
|
|
|
|
|
2016-06-16 00:28:36 +03:00
|
|
|
(void) strlcpy(tofs, destname, sizeof (tofs));
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->isprefix) {
|
2010-05-29 00:45:14 +04:00
|
|
|
struct drr_begin *drrb = &drr->drr_u.drr_begin;
|
|
|
|
int i;
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->istail) {
|
2010-05-29 00:45:14 +04:00
|
|
|
cp = strrchr(drrb->drr_toname, '/');
|
|
|
|
if (cp == NULL) {
|
|
|
|
(void) strlcat(tofs, "/",
|
2016-06-16 00:28:36 +03:00
|
|
|
sizeof (tofs));
|
2010-05-29 00:45:14 +04:00
|
|
|
i = 0;
|
|
|
|
} else {
|
|
|
|
i = (cp - drrb->drr_toname);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
i = strcspn(drrb->drr_toname, "/@");
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
/* zfs_receive_one() will create_parents() */
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) strlcat(tofs, &drrb->drr_toname[i],
|
2016-06-16 00:28:36 +03:00
|
|
|
sizeof (tofs));
|
2008-11-20 23:01:55 +03:00
|
|
|
*strchr(tofs, '@') = '\0';
|
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2017-04-07 23:54:29 +03:00
|
|
|
if (!flags->dryrun && !flags->nomount) {
|
2021-01-14 20:53:09 +03:00
|
|
|
renamed = fnvlist_alloc();
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
softerr = recv_incremental_replication(hdl, tofs, flags,
|
|
|
|
stream_nv, stream_avl, renamed);
|
|
|
|
|
|
|
|
/* Unmount renamed filesystems before receiving. */
|
|
|
|
while ((pair = nvlist_next_nvpair(renamed,
|
|
|
|
pair)) != NULL) {
|
|
|
|
zfs_handle_t *zhp;
|
|
|
|
prop_changelist_t *clp = NULL;
|
|
|
|
|
|
|
|
zhp = zfs_open(hdl, nvpair_name(pair),
|
|
|
|
ZFS_TYPE_FILESYSTEM);
|
|
|
|
if (zhp != NULL) {
|
|
|
|
clp = changelist_gather(zhp,
|
2020-03-17 20:08:32 +03:00
|
|
|
ZFS_PROP_MOUNTPOINT, 0,
|
|
|
|
flags->forceunmount ? MS_FORCE : 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_close(zhp);
|
|
|
|
if (clp != NULL) {
|
|
|
|
softerr |=
|
|
|
|
changelist_prefix(clp);
|
|
|
|
changelist_free(clp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(renamed);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* Get the fs specified by the first path in the stream (the top level
|
|
|
|
* specified by 'zfs send') and pass it to each invocation of
|
|
|
|
* zfs_receive_one().
|
|
|
|
*/
|
|
|
|
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
|
2016-06-16 00:28:36 +03:00
|
|
|
sizeof (sendfs));
|
2016-06-09 22:24:29 +03:00
|
|
|
if ((cp = strchr(sendfs, '@')) != NULL) {
|
2010-05-29 00:45:14 +04:00
|
|
|
*cp = '\0';
|
2016-06-09 22:24:29 +03:00
|
|
|
/*
|
|
|
|
* Find the "sendsnap", the final snapshot in a replication
|
|
|
|
* stream. zfs_receive_one() handles certain errors
|
|
|
|
* differently, depending on if the contained stream is the
|
|
|
|
* last one or not.
|
|
|
|
*/
|
|
|
|
sendsnap = (cp + 1);
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/* Finally, receive each contained stream */
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* we should figure out if it has a recoverable
|
|
|
|
* error, in which case do a recv_skip() and drive on.
|
|
|
|
* Note, if we fail due to already having this guid,
|
|
|
|
* zfs_receive_one() will take care of it (ie,
|
|
|
|
* recv_skip() and return 0).
|
|
|
|
*/
|
2015-12-22 04:31:57 +03:00
|
|
|
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
|
2020-04-23 20:06:57 +03:00
|
|
|
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (error == ENODATA) {
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
anyerr |= error;
|
|
|
|
} while (error == 0);
|
|
|
|
|
2017-04-07 23:54:29 +03:00
|
|
|
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Now that we have the fs's they sent us, try the
|
|
|
|
* renames again.
|
|
|
|
*/
|
|
|
|
softerr = recv_incremental_replication(hdl, tofs, flags,
|
2010-05-29 00:45:14 +04:00
|
|
|
stream_nv, stream_avl, NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2019-09-26 03:02:33 +03:00
|
|
|
if (raw && softerr == 0 && *top_zfs != NULL) {
|
|
|
|
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
stream_nv, stream_avl);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
out:
|
|
|
|
fsavl_destroy(stream_avl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(stream_nv);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (softerr)
|
|
|
|
error = -2;
|
|
|
|
if (anyerr)
|
|
|
|
error = -1;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static void
|
|
|
|
trunc_prop_errs(int truncated)
|
|
|
|
{
|
|
|
|
ASSERT(truncated != 0);
|
|
|
|
|
|
|
|
if (truncated == 1)
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"1 more property could not be set\n"));
|
|
|
|
else
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
|
|
|
|
"%d more properties could not be set\n"), truncated);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
static int
|
|
|
|
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
|
|
|
|
{
|
|
|
|
dmu_replay_record_t *drr;
|
2014-11-03 23:15:08 +03:00
|
|
|
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
|
2019-09-06 02:22:05 +03:00
|
|
|
uint64_t payload_size;
|
2010-05-29 00:45:14 +04:00
|
|
|
char errbuf[1024];
|
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
2019-09-06 02:22:05 +03:00
|
|
|
"cannot receive"));
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/* XXX would be great to use lseek if possible... */
|
|
|
|
drr = buf;
|
|
|
|
|
|
|
|
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
|
|
|
|
byteswap, NULL) == 0) {
|
|
|
|
if (byteswap)
|
|
|
|
drr->drr_type = BSWAP_32(drr->drr_type);
|
|
|
|
|
|
|
|
switch (drr->drr_type) {
|
|
|
|
case DRR_BEGIN:
|
2010-05-29 00:45:14 +04:00
|
|
|
if (drr->drr_payloadlen != 0) {
|
2016-01-07 00:22:48 +03:00
|
|
|
(void) recv_read(hdl, fd, buf,
|
|
|
|
drr->drr_payloadlen, B_FALSE, NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_END:
|
|
|
|
free(buf);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case DRR_OBJECT:
|
|
|
|
if (byteswap) {
|
|
|
|
drr->drr_u.drr_object.drr_bonuslen =
|
|
|
|
BSWAP_32(drr->drr_u.drr_object.
|
|
|
|
drr_bonuslen);
|
2019-09-06 02:22:05 +03:00
|
|
|
drr->drr_u.drr_object.drr_raw_bonuslen =
|
|
|
|
BSWAP_32(drr->drr_u.drr_object.
|
|
|
|
drr_raw_bonuslen);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2019-09-06 02:22:05 +03:00
|
|
|
|
|
|
|
payload_size =
|
|
|
|
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
|
|
|
|
(void) recv_read(hdl, fd, buf, payload_size,
|
2008-11-20 23:01:55 +03:00
|
|
|
B_FALSE, NULL);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_WRITE:
|
|
|
|
if (byteswap) {
|
2016-07-11 20:45:52 +03:00
|
|
|
drr->drr_u.drr_write.drr_logical_size =
|
|
|
|
BSWAP_64(
|
|
|
|
drr->drr_u.drr_write.drr_logical_size);
|
|
|
|
drr->drr_u.drr_write.drr_compressed_size =
|
|
|
|
BSWAP_64(
|
|
|
|
drr->drr_u.drr_write.drr_compressed_size);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2019-09-06 02:22:05 +03:00
|
|
|
payload_size =
|
2016-07-11 20:45:52 +03:00
|
|
|
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
|
2020-08-25 21:04:20 +03:00
|
|
|
assert(payload_size <= SPA_MAXBLOCKSIZE);
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) recv_read(hdl, fd, buf,
|
2016-07-11 20:45:52 +03:00
|
|
|
payload_size, B_FALSE, NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
break;
|
2010-05-29 00:45:14 +04:00
|
|
|
case DRR_SPILL:
|
|
|
|
if (byteswap) {
|
2016-05-09 21:22:00 +03:00
|
|
|
drr->drr_u.drr_spill.drr_length =
|
2010-05-29 00:45:14 +04:00
|
|
|
BSWAP_64(drr->drr_u.drr_spill.drr_length);
|
2019-09-06 02:22:05 +03:00
|
|
|
drr->drr_u.drr_spill.drr_compressed_size =
|
|
|
|
BSWAP_64(drr->drr_u.drr_spill.
|
|
|
|
drr_compressed_size);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2019-09-06 02:22:05 +03:00
|
|
|
|
|
|
|
payload_size =
|
|
|
|
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
|
|
|
|
(void) recv_read(hdl, fd, buf, payload_size,
|
|
|
|
B_FALSE, NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
break;
|
2014-06-06 01:19:08 +04:00
|
|
|
case DRR_WRITE_EMBEDDED:
|
|
|
|
if (byteswap) {
|
|
|
|
drr->drr_u.drr_write_embedded.drr_psize =
|
|
|
|
BSWAP_32(drr->drr_u.drr_write_embedded.
|
|
|
|
drr_psize);
|
|
|
|
}
|
|
|
|
(void) recv_read(hdl, fd, buf,
|
|
|
|
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
|
|
|
|
8), B_FALSE, NULL);
|
|
|
|
break;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
case DRR_OBJECT_RANGE:
|
2010-05-29 00:45:14 +04:00
|
|
|
case DRR_WRITE_BYREF:
|
2008-11-20 23:01:55 +03:00
|
|
|
case DRR_FREEOBJECTS:
|
|
|
|
case DRR_FREE:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid record type"));
|
2016-10-11 20:24:18 +03:00
|
|
|
free(buf);
|
2010-05-29 00:45:14 +04:00
|
|
|
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(buf);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
static void
|
|
|
|
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
|
2020-03-17 20:30:33 +03:00
|
|
|
boolean_t resumable, boolean_t checksum)
|
2016-01-07 00:22:48 +03:00
|
|
|
{
|
2016-06-16 00:28:36 +03:00
|
|
|
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
|
2016-01-07 00:22:48 +03:00
|
|
|
|
2020-03-17 20:30:33 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
|
|
|
|
"checksum mismatch" : "incomplete stream")));
|
2016-01-07 00:22:48 +03:00
|
|
|
|
|
|
|
if (!resumable)
|
|
|
|
return;
|
|
|
|
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
|
|
|
|
*strchr(target_fs, '@') = '\0';
|
|
|
|
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
|
|
|
|
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
|
|
|
|
if (zhp == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
char token_buf[ZFS_MAXPROPLEN];
|
|
|
|
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
|
|
|
|
token_buf, sizeof (token_buf),
|
|
|
|
NULL, NULL, 0, B_TRUE);
|
|
|
|
if (error == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"checksum mismatch or incomplete stream.\n"
|
|
|
|
"Partially received snapshot is saved.\n"
|
|
|
|
"A resuming stream can be generated on the sending "
|
|
|
|
"system by running:\n"
|
|
|
|
" zfs send -t %s"),
|
|
|
|
token_buf);
|
|
|
|
}
|
|
|
|
zfs_close(zhp);
|
|
|
|
}
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/*
|
|
|
|
* Prepare a new nvlist of properties that are to override (-o) or be excluded
|
|
|
|
* (-x) from the received dataset
|
|
|
|
* recvprops: received properties from the send stream
|
|
|
|
* cmdprops: raw input properties from command line
|
|
|
|
* origprops: properties, both locally-set and received, currently set on the
|
|
|
|
* target dataset if it exists, NULL otherwise.
|
|
|
|
* oxprops: valid output override (-o) and excluded (-x) properties
|
|
|
|
*/
|
|
|
|
static int
|
2017-10-13 20:09:04 +03:00
|
|
|
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
|
|
|
|
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
|
|
|
|
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
|
|
|
|
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
|
|
|
|
uint_t *wkeylen_out, const char *errbuf)
|
2017-05-10 02:21:09 +03:00
|
|
|
{
|
|
|
|
nvpair_t *nvp;
|
|
|
|
nvlist_t *oprops, *voprops;
|
|
|
|
zfs_handle_t *zhp = NULL;
|
|
|
|
zpool_handle_t *zpool_hdl = NULL;
|
2017-10-13 20:09:04 +03:00
|
|
|
char *cp;
|
2017-05-10 02:21:09 +03:00
|
|
|
int ret = 0;
|
2017-10-13 20:09:04 +03:00
|
|
|
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
|
2017-05-10 02:21:09 +03:00
|
|
|
|
|
|
|
if (nvlist_empty(cmdprops))
|
|
|
|
return (0); /* No properties to override or exclude */
|
|
|
|
|
|
|
|
*oxprops = fnvlist_alloc();
|
|
|
|
oprops = fnvlist_alloc();
|
|
|
|
|
2017-10-13 20:09:04 +03:00
|
|
|
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get our dataset handle. The target dataset may not exist yet.
|
|
|
|
*/
|
|
|
|
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
|
|
|
|
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
ret = -1;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* open the zpool handle */
|
|
|
|
cp = strchr(namebuf, '/');
|
|
|
|
if (cp != NULL)
|
|
|
|
*cp = '\0';
|
|
|
|
zpool_hdl = zpool_open(hdl, namebuf);
|
|
|
|
if (zpool_hdl == NULL) {
|
|
|
|
ret = -1;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* restore namebuf to match fsname for later use */
|
|
|
|
if (cp != NULL)
|
|
|
|
*cp = '/';
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/*
|
|
|
|
* first iteration: process excluded (-x) properties now and gather
|
|
|
|
* added (-o) properties to be later processed by zfs_valid_proplist()
|
|
|
|
*/
|
|
|
|
nvp = NULL;
|
|
|
|
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
|
|
|
|
const char *name = nvpair_name(nvp);
|
|
|
|
zfs_prop_t prop = zfs_name_to_prop(name);
|
|
|
|
|
|
|
|
/* "origin" is processed separately, don't handle it here */
|
|
|
|
if (prop == ZFS_PROP_ORIGIN)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we're trying to override or exclude a property that does not
|
|
|
|
* make sense for this type of dataset, but we don't want to
|
|
|
|
* fail if the receive is recursive: this comes in handy when
|
|
|
|
* the send stream contains, for instance, a child ZVOL and
|
|
|
|
* we're trying to receive it with "-o atime=on"
|
|
|
|
*/
|
|
|
|
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
|
|
|
|
!zfs_prop_user(name)) {
|
|
|
|
if (recursive)
|
|
|
|
continue;
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property '%s' does not apply to datasets of this "
|
|
|
|
"type"), name);
|
|
|
|
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-10-13 20:09:04 +03:00
|
|
|
/* raw streams can't override encryption properties */
|
|
|
|
if ((zfs_prop_encryption_key_param(prop) ||
|
2019-10-24 20:51:01 +03:00
|
|
|
prop == ZFS_PROP_ENCRYPTION) && raw) {
|
2017-10-13 20:09:04 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"encryption property '%s' cannot "
|
2019-10-24 20:51:01 +03:00
|
|
|
"be set or excluded for raw streams."), name);
|
|
|
|
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* incremental streams can only exclude encryption properties */
|
|
|
|
if ((zfs_prop_encryption_key_param(prop) ||
|
|
|
|
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
|
|
|
|
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"encryption property '%s' cannot "
|
|
|
|
"be set for incremental streams."), name);
|
2017-10-13 20:09:04 +03:00
|
|
|
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
switch (nvpair_type(nvp)) {
|
|
|
|
case DATA_TYPE_BOOLEAN: /* -x property */
|
|
|
|
/*
|
|
|
|
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
|
|
|
|
* a property: this is done by forcing an explicit
|
|
|
|
* inherit on the destination so the effective value is
|
|
|
|
* not the one we received from the send stream.
|
|
|
|
* We do this only if the property is not already
|
|
|
|
* locally-set, in which case its value will take
|
|
|
|
* priority over the received anyway.
|
|
|
|
*/
|
|
|
|
if (nvlist_exists(origprops, name)) {
|
|
|
|
nvlist_t *attrs;
|
2019-10-24 20:51:01 +03:00
|
|
|
char *source = NULL;
|
2017-05-10 02:21:09 +03:00
|
|
|
|
|
|
|
attrs = fnvlist_lookup_nvlist(origprops, name);
|
2019-10-24 20:51:01 +03:00
|
|
|
if (nvlist_lookup_string(attrs,
|
|
|
|
ZPROP_SOURCE, &source) == 0 &&
|
|
|
|
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
|
2017-05-10 02:21:09 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We can't force an explicit inherit on non-inheritable
|
|
|
|
* properties: if we're asked to exclude this kind of
|
|
|
|
* values we remove them from "recvprops" input nvlist.
|
|
|
|
*/
|
|
|
|
if (!zfs_prop_inheritable(prop) &&
|
|
|
|
!zfs_prop_user(name) && /* can be inherited too */
|
|
|
|
nvlist_exists(recvprops, name))
|
|
|
|
fnvlist_remove(recvprops, name);
|
|
|
|
else
|
|
|
|
fnvlist_add_nvpair(*oxprops, nvp);
|
|
|
|
break;
|
|
|
|
case DATA_TYPE_STRING: /* -o property=value */
|
|
|
|
fnvlist_add_nvpair(oprops, nvp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property '%s' must be a string or boolean"), name);
|
|
|
|
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (toplevel) {
|
|
|
|
/* convert override strings properties to native */
|
|
|
|
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
|
2017-05-10 02:21:09 +03:00
|
|
|
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-10-13 20:09:04 +03:00
|
|
|
/*
|
|
|
|
* zfs_crypto_create() requires the parent name. Get it
|
|
|
|
* by truncating the fsname copy stored in namebuf.
|
|
|
|
*/
|
|
|
|
cp = strrchr(namebuf, '/');
|
|
|
|
if (cp != NULL)
|
|
|
|
*cp = '\0';
|
|
|
|
|
|
|
|
if (!raw && zfs_crypto_create(hdl, namebuf, voprops, NULL,
|
|
|
|
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
|
|
|
|
fnvlist_free(voprops);
|
|
|
|
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/* second pass: process "-o" properties */
|
|
|
|
fnvlist_merge(*oxprops, voprops);
|
|
|
|
fnvlist_free(voprops);
|
|
|
|
} else {
|
|
|
|
/* override props on child dataset are inherited */
|
|
|
|
nvp = NULL;
|
|
|
|
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
|
|
|
|
const char *name = nvpair_name(nvp);
|
|
|
|
fnvlist_add_boolean(*oxprops, name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
error:
|
2017-10-13 20:09:04 +03:00
|
|
|
if (zhp != NULL)
|
|
|
|
zfs_close(zhp);
|
|
|
|
if (zpool_hdl != NULL)
|
|
|
|
zpool_close(zpool_hdl);
|
2017-05-10 02:21:09 +03:00
|
|
|
fnvlist_free(oprops);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Restores a backup of tosnap from the file descriptor specified by infd.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
|
2015-12-22 04:31:57 +03:00
|
|
|
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
|
|
|
|
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
|
2020-04-23 20:06:57 +03:00
|
|
|
avl_tree_t *stream_avl, char **top_zfs,
|
|
|
|
const char *finalsnap, nvlist_t *cmdprops)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
time_t begin_time;
|
2010-05-29 00:45:14 +04:00
|
|
|
int ioctl_err, ioctl_errno, err;
|
2008-11-20 23:01:55 +03:00
|
|
|
char *cp;
|
|
|
|
struct drr_begin *drrb = &drr->drr_u.drr_begin;
|
|
|
|
char errbuf[1024];
|
2010-05-29 00:45:14 +04:00
|
|
|
const char *chopprefix;
|
2008-11-20 23:01:55 +03:00
|
|
|
boolean_t newfs = B_FALSE;
|
2020-10-03 03:47:09 +03:00
|
|
|
boolean_t stream_wantsnewfs, stream_resumingnewfs;
|
2016-06-10 03:04:12 +03:00
|
|
|
boolean_t newprops = B_FALSE;
|
|
|
|
uint64_t read_bytes = 0;
|
|
|
|
uint64_t errflags = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t parent_snapguid = 0;
|
|
|
|
prop_changelist_t *clp = NULL;
|
2008-12-03 23:09:06 +03:00
|
|
|
nvlist_t *snapprops_nvlist = NULL;
|
2019-02-15 23:41:38 +03:00
|
|
|
nvlist_t *snapholds_nvlist = NULL;
|
2010-05-29 00:45:14 +04:00
|
|
|
zprop_errflags_t prop_errflags;
|
2016-06-10 03:04:12 +03:00
|
|
|
nvlist_t *prop_errors = NULL;
|
2010-05-29 00:45:14 +04:00
|
|
|
boolean_t recursive;
|
2016-06-09 22:24:29 +03:00
|
|
|
char *snapname = NULL;
|
2016-06-10 03:04:12 +03:00
|
|
|
char destsnap[MAXPATHLEN * 2];
|
|
|
|
char origin[MAXNAMELEN];
|
|
|
|
char name[MAXPATHLEN];
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
char tmp_keylocation[MAXNAMELEN];
|
2017-05-10 02:21:09 +03:00
|
|
|
nvlist_t *rcvprops = NULL; /* props received from the send stream */
|
|
|
|
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
|
|
|
|
nvlist_t *origprops = NULL; /* original props (if destination exists) */
|
|
|
|
zfs_type_t type;
|
2018-01-31 02:54:33 +03:00
|
|
|
boolean_t toplevel = B_FALSE;
|
2017-05-10 02:21:09 +03:00
|
|
|
boolean_t zoned = B_FALSE;
|
2018-02-12 23:28:59 +03:00
|
|
|
boolean_t hastoken = B_FALSE;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
boolean_t redacted;
|
2017-10-13 20:09:04 +03:00
|
|
|
uint8_t *wkeydata = NULL;
|
|
|
|
uint_t wkeylen = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
begin_time = time(NULL);
|
2016-06-10 03:04:12 +03:00
|
|
|
bzero(origin, MAXNAMELEN);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
bzero(tmp_keylocation, MAXNAMELEN);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive"));
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
|
|
|
|
ENOENT);
|
|
|
|
|
2019-02-15 23:41:38 +03:00
|
|
|
/* Did the user request holds be skipped via zfs recv -k? */
|
|
|
|
boolean_t holds = flags->holds && !flags->skipholds;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
if (stream_avl != NULL) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
char *keylocation = NULL;
|
2016-10-07 20:05:06 +03:00
|
|
|
nvlist_t *lookup = NULL;
|
2008-12-03 23:09:06 +03:00
|
|
|
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
|
|
|
|
&snapname);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
|
|
|
|
&parent_snapguid);
|
2017-05-10 02:21:09 +03:00
|
|
|
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
|
2016-06-10 03:04:12 +03:00
|
|
|
if (err) {
|
2021-01-14 20:53:09 +03:00
|
|
|
rcvprops = fnvlist_alloc();
|
2016-06-10 03:04:12 +03:00
|
|
|
newprops = B_TRUE;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
/*
|
|
|
|
* The keylocation property may only be set on encryption roots,
|
|
|
|
* but this dataset might not become an encryption root until
|
2019-04-15 05:06:34 +03:00
|
|
|
* recv_fix_encryption_hierarchy() is called. That function
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
* will fixup the keylocation anyway, so we temporarily unset
|
|
|
|
* the keylocation for now to avoid any errors from the receive
|
|
|
|
* ioctl.
|
|
|
|
*/
|
|
|
|
err = nvlist_lookup_string(rcvprops,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
|
|
|
|
if (err == 0) {
|
|
|
|
strcpy(tmp_keylocation, keylocation);
|
|
|
|
(void) nvlist_remove_all(rcvprops,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->canmountoff) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_uint64(rcvprops,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
|
2019-02-15 23:41:38 +03:00
|
|
|
} else if (newprops) { /* nothing in rcvprops, eliminate it */
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(rcvprops);
|
2019-02-15 23:41:38 +03:00
|
|
|
rcvprops = NULL;
|
|
|
|
newprops = B_FALSE;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2016-10-07 20:05:06 +03:00
|
|
|
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
|
2021-01-14 20:53:09 +03:00
|
|
|
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
|
|
|
|
snapname);
|
2016-10-07 20:05:06 +03:00
|
|
|
}
|
2019-02-15 23:41:38 +03:00
|
|
|
if (holds) {
|
|
|
|
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
|
|
|
|
&lookup)) {
|
2021-01-14 20:53:09 +03:00
|
|
|
snapholds_nvlist = fnvlist_lookup_nvlist(
|
|
|
|
lookup, snapname);
|
2019-02-15 23:41:38 +03:00
|
|
|
}
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
cp = NULL;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Determine how much of the snapshot name stored in the stream
|
|
|
|
* we are going to tack on to the name they specified on the
|
|
|
|
* command line, and how much we are going to chop off.
|
|
|
|
*
|
|
|
|
* If they specified a snapshot, chop the entire name stored in
|
|
|
|
* the stream.
|
|
|
|
*/
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->istail) {
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* A filesystem was specified with -e. We want to tack on only
|
|
|
|
* the tail of the sent snapshot path.
|
|
|
|
*/
|
|
|
|
if (strchr(tosnap, '@')) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
|
|
|
|
"argument - snapshot not allowed with -e"));
|
2016-06-10 03:04:12 +03:00
|
|
|
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
|
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
chopprefix = strrchr(sendfs, '/');
|
|
|
|
|
|
|
|
if (chopprefix == NULL) {
|
|
|
|
/*
|
|
|
|
* The tail is the poolname, so we need to
|
|
|
|
* prepend a path separator.
|
|
|
|
*/
|
|
|
|
int len = strlen(drrb->drr_toname);
|
|
|
|
cp = malloc(len + 2);
|
|
|
|
cp[0] = '/';
|
|
|
|
(void) strcpy(&cp[1], drrb->drr_toname);
|
|
|
|
chopprefix = cp;
|
|
|
|
} else {
|
|
|
|
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
|
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
} else if (flags->isprefix) {
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* A filesystem was specified with -d. We want to tack on
|
|
|
|
* everything but the first element of the sent snapshot path
|
|
|
|
* (all but the pool name).
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
if (strchr(tosnap, '@')) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
|
|
|
|
"argument - snapshot not allowed with -d"));
|
2016-06-10 03:04:12 +03:00
|
|
|
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
chopprefix = strchr(drrb->drr_toname, '/');
|
|
|
|
if (chopprefix == NULL)
|
|
|
|
chopprefix = strchr(drrb->drr_toname, '@');
|
2008-11-20 23:01:55 +03:00
|
|
|
} else if (strchr(tosnap, '@') == NULL) {
|
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* If a filesystem was specified without -d or -e, we want to
|
|
|
|
* tack on everything after the fs specified by 'zfs send'.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
chopprefix = drrb->drr_toname + strlen(sendfs);
|
|
|
|
} else {
|
|
|
|
/* A snapshot was specified as an exact path (no -d or -e). */
|
|
|
|
if (recursive) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot specify snapshot name for multi-snapshot "
|
|
|
|
"stream"));
|
2016-06-10 03:04:12 +03:00
|
|
|
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
|
2018-12-04 20:38:55 +03:00
|
|
|
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
|
|
|
|
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
|
|
|
|
strchr(sendfs, '/') == NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
|
|
|
|
chopprefix[0] == '\0');
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
2016-06-10 03:04:12 +03:00
|
|
|
* Determine name of destination snapshot.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2016-10-14 00:02:07 +03:00
|
|
|
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
|
2010-05-29 00:45:14 +04:00
|
|
|
free(cp);
|
2016-06-10 03:04:12 +03:00
|
|
|
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
|
|
|
|
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-06-10 03:04:12 +03:00
|
|
|
* Determine the name of the origin snapshot.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2017-01-27 22:47:54 +03:00
|
|
|
if (originsnap) {
|
2018-04-04 20:16:47 +03:00
|
|
|
(void) strlcpy(origin, originsnap, sizeof (origin));
|
2017-01-27 22:47:54 +03:00
|
|
|
if (flags->verbose)
|
|
|
|
(void) printf("using provided clone origin %s\n",
|
|
|
|
origin);
|
|
|
|
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
|
2016-06-10 03:04:12 +03:00
|
|
|
if (guid_to_name(hdl, destsnap,
|
|
|
|
drrb->drr_fromguid, B_FALSE, origin) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"local origin for clone %s does not exist"),
|
2016-06-10 03:04:12 +03:00
|
|
|
destsnap);
|
|
|
|
err = zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose)
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) printf("found clone origin %s\n", origin);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2020-04-23 20:06:57 +03:00
|
|
|
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
|
Deprecate deduplicated send streams
Dedup send can only deduplicate over the set of blocks in the send
command being invoked, and it does not take advantage of the dedup table
to do so. This is a very common misconception among not only users, but
developers, and makes the feature seem more useful than it is. As a
result, many users are using the feature but not getting any benefit
from it.
Dedup send requires a nontrivial expenditure of memory and CPU to
operate, especially if the dataset(s) being sent is (are) not already
using a dedup-strength checksum.
Dedup send adds developer burden. It expands the test matrix when
developing new features, causing bugs in released code, and delaying
development efforts by forcing more testing to be done.
As a result, we are deprecating the use of `zfs send -D` and receiving
of such streams. This change adds a warning to the man page, and also
prints the warning whenever dedup send or receive are used.
In a future release, we plan to:
1. remove the kernel code for generating deduplicated streams
2. make `zfs send -D` generate regular, non-deduplicated streams
3. remove the kernel code for receiving deduplicated streams
4. make `zfs receive` of deduplicated streams process them in userland
to "re-duplicate" them, so that they can still be received.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #7887
Closes #10117
2020-03-18 23:31:10 +03:00
|
|
|
DMU_BACKUP_FEATURE_DEDUP)) {
|
|
|
|
(void) fprintf(stderr,
|
2020-04-23 20:06:57 +03:00
|
|
|
gettext("ERROR: \"zfs receive\" no longer supports "
|
|
|
|
"deduplicated send streams. Use\n"
|
|
|
|
"the \"zstream redup\" command to convert this stream "
|
|
|
|
"to a regular,\n"
|
|
|
|
"non-deduplicated stream.\n"));
|
|
|
|
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
|
|
|
|
goto out;
|
Deprecate deduplicated send streams
Dedup send can only deduplicate over the set of blocks in the send
command being invoked, and it does not take advantage of the dedup table
to do so. This is a very common misconception among not only users, but
developers, and makes the feature seem more useful than it is. As a
result, many users are using the feature but not getting any benefit
from it.
Dedup send requires a nontrivial expenditure of memory and CPU to
operate, especially if the dataset(s) being sent is (are) not already
using a dedup-strength checksum.
Dedup send adds developer burden. It expands the test matrix when
developing new features, causing bugs in released code, and delaying
development efforts by forcing more testing to be done.
As a result, we are deprecating the use of `zfs send -D` and receiving
of such streams. This change adds a warning to the man page, and also
prints the warning whenever dedup send or receive are used.
In a future release, we plan to:
1. remove the kernel code for generating deduplicated streams
2. make `zfs send -D` generate regular, non-deduplicated streams
3. remove the kernel code for receiving deduplicated streams
4. make `zfs receive` of deduplicated streams process them in userland
to "re-duplicate" them, so that they can still be received.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #7887
Closes #10117
2020-03-18 23:31:10 +03:00
|
|
|
}
|
|
|
|
|
2016-01-07 00:22:48 +03:00
|
|
|
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
|
|
|
|
DMU_BACKUP_FEATURE_RESUMING;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
|
|
|
|
DMU_BACKUP_FEATURE_RAW;
|
2017-08-24 02:54:24 +03:00
|
|
|
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
|
|
|
|
DMU_BACKUP_FEATURE_EMBED_DATA;
|
2010-08-26 20:52:39 +04:00
|
|
|
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
|
2016-01-07 00:22:48 +03:00
|
|
|
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
|
2020-10-03 03:47:09 +03:00
|
|
|
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
|
|
|
|
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (stream_wantsnewfs) {
|
|
|
|
/*
|
|
|
|
* if the parent fs does not exist, look for it based on
|
|
|
|
* the parent snap GUID
|
|
|
|
*/
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive new filesystem stream"));
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(name, destsnap);
|
|
|
|
cp = strrchr(name, '/');
|
2008-11-20 23:01:55 +03:00
|
|
|
if (cp)
|
|
|
|
*cp = '\0';
|
|
|
|
if (cp &&
|
2016-06-10 03:04:12 +03:00
|
|
|
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
|
2016-06-16 00:28:36 +03:00
|
|
|
char suffix[ZFS_MAX_DATASET_NAME_LEN];
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(suffix, strrchr(destsnap, '/'));
|
|
|
|
if (guid_to_name(hdl, name, parent_snapguid,
|
|
|
|
B_FALSE, destsnap) == 0) {
|
|
|
|
*strchr(destsnap, '@') = '\0';
|
|
|
|
(void) strcat(destsnap, suffix);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
2019-09-03 20:56:55 +03:00
|
|
|
* If the fs does not exist, look for it based on the
|
|
|
|
* fromsnap GUID.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2019-09-03 20:56:55 +03:00
|
|
|
if (resuming) {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive resume stream"));
|
|
|
|
} else {
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive incremental stream"));
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(name, destsnap);
|
|
|
|
*strchr(name, '@') = '\0';
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* If the exact receive path was specified and this is the
|
|
|
|
* topmost path in the stream, then if the fs does not exist we
|
|
|
|
* should look no further.
|
|
|
|
*/
|
2011-11-17 22:14:36 +04:00
|
|
|
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
|
2010-05-29 00:45:14 +04:00
|
|
|
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
|
2016-06-10 03:04:12 +03:00
|
|
|
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
|
2016-06-16 00:28:36 +03:00
|
|
|
char snap[ZFS_MAX_DATASET_NAME_LEN];
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(snap, strchr(destsnap, '@'));
|
|
|
|
if (guid_to_name(hdl, name, drrb->drr_fromguid,
|
|
|
|
B_FALSE, destsnap) == 0) {
|
|
|
|
*strchr(destsnap, '@') = '\0';
|
|
|
|
(void) strcat(destsnap, snap);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(name, destsnap);
|
|
|
|
*strchr(name, '@') = '\0';
|
2008-11-20 23:01:55 +03:00
|
|
|
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
|
|
|
|
DMU_BACKUP_FEATURE_REDACTED;
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
|
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_handle_t *zhp;
|
2018-02-21 23:30:11 +03:00
|
|
|
boolean_t encrypted;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(zc.zc_name, name);
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2016-01-07 00:22:48 +03:00
|
|
|
* Destination fs exists. It must be one of these cases:
|
|
|
|
* - an incremental send stream
|
|
|
|
* - the stream specifies a new fs (full stream or clone)
|
|
|
|
* and they want us to blow away the existing fs (and
|
|
|
|
* have therefore specified -F and removed any snapshots)
|
|
|
|
* - we are resuming a failed receive.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
if (stream_wantsnewfs) {
|
2019-02-09 02:44:15 +03:00
|
|
|
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!flags->force) {
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination '%s' exists\n"
|
2016-06-10 03:04:12 +03:00
|
|
|
"must specify -F to overwrite it"), name);
|
|
|
|
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2019-10-24 03:29:43 +03:00
|
|
|
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
|
2008-11-20 23:01:55 +03:00
|
|
|
&zc) == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination has snapshots (eg. %s)\n"
|
|
|
|
"must destroy them to overwrite it"),
|
2018-12-05 20:33:52 +03:00
|
|
|
zc.zc_name);
|
2016-06-10 03:04:12 +03:00
|
|
|
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2019-02-09 02:44:15 +03:00
|
|
|
if (is_volume && strrchr(name, '/') == NULL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination %s is the root dataset\n"
|
|
|
|
"cannot overwrite with a ZVOL"),
|
|
|
|
name);
|
|
|
|
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (is_volume &&
|
2019-10-24 03:29:43 +03:00
|
|
|
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
|
2019-02-09 02:44:15 +03:00
|
|
|
&zc) == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination has children (eg. %s)\n"
|
|
|
|
"cannot overwrite with a ZVOL"),
|
|
|
|
zc.zc_name);
|
|
|
|
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
if ((zhp = zfs_open(hdl, name,
|
2008-11-20 23:01:55 +03:00
|
|
|
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
|
2016-06-10 03:04:12 +03:00
|
|
|
err = -1;
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (stream_wantsnewfs &&
|
|
|
|
zhp->zfs_dmustats.dds_origin[0]) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination '%s' is a clone\n"
|
2016-06-10 03:04:12 +03:00
|
|
|
"must destroy it to overwrite it"), name);
|
|
|
|
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
/*
|
2018-02-21 23:30:11 +03:00
|
|
|
* Raw sends can not be performed as an incremental on top
|
2019-09-03 03:53:27 +03:00
|
|
|
* of existing unencrypted datasets. zfs recv -F can't be
|
2018-02-21 23:30:11 +03:00
|
|
|
* used to blow away an existing encrypted filesystem. This
|
|
|
|
* is because it would require the dsl dir to point to the
|
|
|
|
* new key (or lack of a key) and the old key at the same
|
|
|
|
* time. The -F flag may still be used for deleting
|
|
|
|
* intermediate snapshots that would otherwise prevent the
|
|
|
|
* receive from working.
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
*/
|
2018-02-21 23:30:11 +03:00
|
|
|
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
|
|
|
|
ZIO_CRYPT_OFF;
|
|
|
|
if (!stream_wantsnewfs && !encrypted && raw) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2018-02-21 23:30:11 +03:00
|
|
|
"cannot perform raw receive on top of "
|
|
|
|
"existing unencrypted dataset"));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-02-21 23:30:11 +03:00
|
|
|
if (stream_wantsnewfs && flags->force &&
|
|
|
|
((raw && !encrypted) || encrypted)) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"zfs receive -F cannot be used to destroy an "
|
|
|
|
"encrypted filesystem or overwrite an "
|
|
|
|
"unencrypted one with an encrypted one"));
|
|
|
|
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
|
2020-10-03 03:47:09 +03:00
|
|
|
(stream_wantsnewfs || stream_resumingnewfs)) {
|
2008-11-20 23:01:55 +03:00
|
|
|
/* We can't do online recv in this case */
|
2020-03-17 20:08:32 +03:00
|
|
|
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
|
|
|
|
flags->forceunmount ? MS_FORCE : 0);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (clp == NULL) {
|
2009-08-18 22:43:27 +04:00
|
|
|
zfs_close(zhp);
|
2016-06-10 03:04:12 +03:00
|
|
|
err = -1;
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
if (changelist_prefix(clp) != 0) {
|
|
|
|
changelist_free(clp);
|
2009-08-18 22:43:27 +04:00
|
|
|
zfs_close(zhp);
|
2016-06-10 03:04:12 +03:00
|
|
|
err = -1;
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
2016-01-07 00:22:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are resuming a newfs, set newfs here so that we will
|
|
|
|
* mount it if the recv succeeds this time. We can tell
|
|
|
|
* that it was a newfs on the first recv because the fs
|
|
|
|
* itself will be inconsistent (if the fs existed when we
|
|
|
|
* did the first recv, we would have received it into
|
|
|
|
* .../%recv).
|
|
|
|
*/
|
|
|
|
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
|
|
|
|
newfs = B_TRUE;
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/* we want to know if we're zoned when validating -o|-x props */
|
|
|
|
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
|
|
|
|
|
2018-02-12 23:28:59 +03:00
|
|
|
/* may need this info later, get it now we have zhp around */
|
|
|
|
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
|
|
|
|
NULL, NULL, 0, B_TRUE) == 0)
|
|
|
|
hastoken = B_TRUE;
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/* gather existing properties on destination */
|
|
|
|
origprops = fnvlist_alloc();
|
|
|
|
fnvlist_merge(origprops, zhp->zfs_props);
|
|
|
|
fnvlist_merge(origprops, zhp->zfs_user_props);
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_close(zhp);
|
|
|
|
} else {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
zfs_handle_t *zhp;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Destination filesystem does not exist. Therefore we better
|
|
|
|
* be creating a new filesystem (either from a full backup, or
|
|
|
|
* a clone). It would therefore be invalid if the user
|
|
|
|
* specified only the pool name (i.e. if the destination name
|
|
|
|
* contained no slash character).
|
|
|
|
*/
|
2016-07-19 19:24:24 +03:00
|
|
|
cp = strrchr(name, '/');
|
|
|
|
|
|
|
|
if (!stream_wantsnewfs || cp == NULL) {
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2016-06-10 03:04:12 +03:00
|
|
|
"destination '%s' does not exist"), name);
|
|
|
|
err = zfs_error(hdl, EZFS_NOENT, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trim off the final dataset component so we perform the
|
|
|
|
* recvbackup ioctl to the filesystems's parent.
|
|
|
|
*/
|
|
|
|
*cp = '\0';
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->isprefix && !flags->istail && !flags->dryrun &&
|
2016-06-10 03:04:12 +03:00
|
|
|
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
|
|
|
|
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
goto out;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2019-02-09 02:44:15 +03:00
|
|
|
/* validate parent */
|
|
|
|
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"parent '%s' is not a filesystem"), name);
|
|
|
|
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_close(zhp);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
newfs = B_TRUE;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
*cp = '/';
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("%s %s stream of %s into %s\n",
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->dryrun ? "would receive" : "receiving",
|
2008-11-20 23:01:55 +03:00
|
|
|
drrb->drr_fromguid ? "incremental" : "full",
|
2016-06-10 03:04:12 +03:00
|
|
|
drrb->drr_toname, destsnap);
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) fflush(stdout);
|
|
|
|
}
|
|
|
|
|
2019-09-26 03:02:33 +03:00
|
|
|
/*
|
|
|
|
* If this is the top-level dataset, record it so we can use it
|
|
|
|
* for recursive operations later.
|
|
|
|
*/
|
|
|
|
if (top_zfs != NULL &&
|
|
|
|
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
|
2018-01-31 02:54:33 +03:00
|
|
|
toplevel = B_TRUE;
|
2019-09-26 03:02:33 +03:00
|
|
|
if (*top_zfs == NULL)
|
|
|
|
*top_zfs = zfs_strdup(hdl, name);
|
|
|
|
}
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
if (drrb->drr_type == DMU_OST_ZVOL) {
|
|
|
|
type = ZFS_TYPE_VOLUME;
|
|
|
|
} else if (drrb->drr_type == DMU_OST_ZFS) {
|
|
|
|
type = ZFS_TYPE_FILESYSTEM;
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid record type: 0x%d"), drrb->drr_type);
|
|
|
|
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
goto out;
|
|
|
|
}
|
2017-10-13 20:09:04 +03:00
|
|
|
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
|
|
|
|
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
|
|
|
|
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
|
2017-05-10 02:21:09 +03:00
|
|
|
goto out;
|
|
|
|
|
2019-06-20 22:29:51 +03:00
|
|
|
/*
|
|
|
|
* When sending with properties (zfs send -p), the encryption property
|
|
|
|
* is not included because it is a SETONCE property and therefore
|
|
|
|
* treated as read only. However, we are always able to determine its
|
|
|
|
* value because raw sends will include it in the DRR_BDEGIN payload
|
|
|
|
* and non-raw sends with properties are not allowed for encrypted
|
|
|
|
* datasets. Therefore, if this is a non-raw properties stream, we can
|
|
|
|
* infer that the value should be ZIO_CRYPT_OFF and manually add that
|
|
|
|
* to the received properties.
|
|
|
|
*/
|
|
|
|
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
|
|
|
|
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
|
|
|
|
if (oxprops == NULL)
|
|
|
|
oxprops = fnvlist_alloc();
|
|
|
|
fnvlist_add_uint64(oxprops,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
|
|
|
|
}
|
|
|
|
|
2021-04-12 19:35:55 +03:00
|
|
|
if (flags->dryrun) {
|
|
|
|
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have read the DRR_BEGIN record, but we have
|
|
|
|
* not yet read the payload. For non-dryrun sends
|
|
|
|
* this will be done by the kernel, so we must
|
|
|
|
* emulate that here, before attempting to read
|
|
|
|
* more records.
|
|
|
|
*/
|
|
|
|
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
|
|
|
|
flags->byteswap, NULL);
|
|
|
|
free(buf);
|
|
|
|
if (err != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = recv_skip(hdl, infd, flags->byteswap);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-10-13 20:09:04 +03:00
|
|
|
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
|
|
|
|
oxprops, wkeydata, wkeylen, origin, flags->force, flags->resumable,
|
2020-04-23 20:06:57 +03:00
|
|
|
raw, infd, drr_noswap, -1, &read_bytes, &errflags,
|
|
|
|
NULL, &prop_errors);
|
2016-06-10 03:04:12 +03:00
|
|
|
ioctl_errno = ioctl_err;
|
|
|
|
prop_errflags = errflags;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (err == 0) {
|
|
|
|
nvpair_t *prop_err = NULL;
|
|
|
|
|
|
|
|
while ((prop_err = nvlist_next_nvpair(prop_errors,
|
|
|
|
prop_err)) != NULL) {
|
|
|
|
char tbuf[1024];
|
|
|
|
zfs_prop_t prop;
|
|
|
|
int intval;
|
|
|
|
|
|
|
|
prop = zfs_name_to_prop(nvpair_name(prop_err));
|
|
|
|
(void) nvpair_value_int32(prop_err, &intval);
|
|
|
|
if (strcmp(nvpair_name(prop_err),
|
|
|
|
ZPROP_N_MORE_ERRORS) == 0) {
|
|
|
|
trunc_prop_errs(intval);
|
|
|
|
break;
|
2016-06-09 22:24:29 +03:00
|
|
|
} else if (snapname == NULL || finalsnap == NULL ||
|
|
|
|
strcmp(finalsnap, snapname) == 0 ||
|
|
|
|
strcmp(nvpair_name(prop_err),
|
|
|
|
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
|
|
|
|
/*
|
|
|
|
* Skip the special case of, for example,
|
|
|
|
* "refquota", errors on intermediate
|
|
|
|
* snapshots leading up to a final one.
|
|
|
|
* That's why we have all of the checks above.
|
|
|
|
*
|
|
|
|
* See zfs_ioctl.c's extract_delay_props() for
|
|
|
|
* a list of props which can fail on
|
|
|
|
* intermediate snapshots, but shouldn't
|
|
|
|
* affect the overall receive.
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) snprintf(tbuf, sizeof (tbuf),
|
|
|
|
dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive %s property on %s"),
|
2016-06-10 03:04:12 +03:00
|
|
|
nvpair_name(prop_err), name);
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_setprop_error(hdl, prop, intval, tbuf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
if (err == 0 && snapprops_nvlist) {
|
2016-06-10 03:04:12 +03:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
(void) strcpy(zc.zc_name, destsnap);
|
|
|
|
zc.zc_cookie = B_TRUE; /* received */
|
|
|
|
if (zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist) == 0) {
|
|
|
|
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
|
|
|
|
zcmd_free_nvlists(&zc);
|
2008-12-03 23:09:06 +03:00
|
|
|
}
|
|
|
|
}
|
2019-02-15 23:41:38 +03:00
|
|
|
if (err == 0 && snapholds_nvlist) {
|
|
|
|
nvpair_t *pair;
|
|
|
|
nvlist_t *holds, *errors = NULL;
|
|
|
|
int cleanup_fd = -1;
|
|
|
|
|
|
|
|
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
|
|
|
|
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
|
|
|
|
pair != NULL;
|
|
|
|
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
|
2019-02-15 23:41:38 +03:00
|
|
|
}
|
|
|
|
(void) lzc_hold(holds, cleanup_fd, &errors);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(snapholds_nvlist);
|
|
|
|
fnvlist_free(holds);
|
2019-02-15 23:41:38 +03:00
|
|
|
}
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* It may be that this snapshot already exists,
|
|
|
|
* in which case we want to consume & ignore it
|
|
|
|
* rather than failing.
|
|
|
|
*/
|
|
|
|
avl_tree_t *local_avl;
|
|
|
|
nvlist_t *local_nv, *fs;
|
2016-06-10 03:04:12 +03:00
|
|
|
cp = strchr(destsnap, '@');
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX Do this faster by just iterating over snaps in
|
|
|
|
* this fs. Also if zc_value does not exist, we will
|
|
|
|
* get a strange "does not exist" error message.
|
|
|
|
*/
|
|
|
|
*cp = '\0';
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
|
2021-04-11 22:05:35 +03:00
|
|
|
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
|
|
|
|
B_TRUE, &local_nv, &local_avl) == 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
*cp = '@';
|
|
|
|
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
|
|
|
|
fsavl_destroy(local_avl);
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(local_nv);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (fs != NULL) {
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("snap %s already exists; "
|
2016-06-10 03:04:12 +03:00
|
|
|
"ignoring\n", destsnap);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
err = ioctl_err = recv_skip(hdl, infd,
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->byteswap);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*cp = '@';
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ioctl_err != 0) {
|
|
|
|
switch (ioctl_errno) {
|
|
|
|
case ENODEV:
|
2016-06-10 03:04:12 +03:00
|
|
|
cp = strchr(destsnap, '@');
|
2008-11-20 23:01:55 +03:00
|
|
|
*cp = '\0';
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"most recent snapshot of %s does not\n"
|
2016-06-10 03:04:12 +03:00
|
|
|
"match incremental source"), destsnap);
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
*cp = '@';
|
|
|
|
break;
|
|
|
|
case ETXTBSY:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination %s has been modified\n"
|
2016-06-10 03:04:12 +03:00
|
|
|
"since most recent snapshot"), name);
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
|
|
|
|
break;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
case EACCES:
|
|
|
|
if (raw && stream_wantsnewfs) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"failed to create encryption key"));
|
|
|
|
} else if (raw && !stream_wantsnewfs) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"encryption key does not match "
|
|
|
|
"existing key"));
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"inherited key must be loaded"));
|
|
|
|
}
|
|
|
|
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
|
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
case EEXIST:
|
2016-06-10 03:04:12 +03:00
|
|
|
cp = strchr(destsnap, '@');
|
2008-11-20 23:01:55 +03:00
|
|
|
if (newfs) {
|
|
|
|
/* it's the containing fs that exists */
|
|
|
|
*cp = '\0';
|
|
|
|
}
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination already exists"));
|
|
|
|
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
|
|
|
|
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
|
2016-06-10 03:04:12 +03:00
|
|
|
destsnap);
|
2008-11-20 23:01:55 +03:00
|
|
|
*cp = '@';
|
|
|
|
break;
|
|
|
|
case EINVAL:
|
2019-07-03 03:30:00 +03:00
|
|
|
if (flags->resumable) {
|
2016-06-10 03:04:12 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"kernel modules must be upgraded to "
|
|
|
|
"receive this stream."));
|
2019-07-03 03:30:00 +03:00
|
|
|
} else if (embedded && !raw) {
|
2017-08-24 02:54:24 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incompatible embedded data stream "
|
|
|
|
"feature with encrypted receive."));
|
2019-07-03 03:30:00 +03:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
|
|
|
case ECKSUM:
|
2020-03-17 20:30:33 +03:00
|
|
|
case ZFS_ERR_STREAM_TRUNCATED:
|
|
|
|
recv_ecksum_set_aux(hdl, destsnap, flags->resumable,
|
|
|
|
ioctl_err == ECKSUM);
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
File incorrectly zeroed when receiving incremental stream that toggles -L
Background:
By increasing the recordsize property above the default of 128KB, a
filesystem may have "large" blocks. By default, a send stream of such a
filesystem does not contain large WRITE records, instead it decreases
objects' block sizes to 128KB and splits the large blocks into 128KB
blocks, allowing the large-block filesystem to be received by a system
that does not support the `large_blocks` feature. A send stream
generated by `zfs send -L` (or `--large-block`) preserves the large
block size on the receiving system, by using large WRITE records.
When receiving an incremental send stream for a filesystem with large
blocks, if the send stream's -L flag was toggled, a bug is encountered
in which the file's contents are incorrectly zeroed out. The contents
of any blocks that were not modified by this send stream will be lost.
"Toggled" means that the previous send used `-L`, but this incremental
does not use `-L` (-L to no-L); or that the previous send did not use
`-L`, but this incremental does use `-L` (no-L to -L).
Changes:
This commit addresses the problem with several changes to the semantics
of zfs send/receive:
1. "-L to no-L" incrementals are rejected. If the previous send used
`-L`, but this incremental does not use `-L`, the `zfs receive` will
fail with this error message:
incremental send stream requires -L (--large-block), to match
previous receive.
2. "no-L to -L" incrementals are handled correctly, preserving the
smaller (128KB) block size of any already-received files that used large
blocks on the sending system but were split by `zfs send` without the
`-L` flag.
3. A new send stream format flag is added, `SWITCH_TO_LARGE_BLOCKS`.
This feature indicates that we can correctly handle "no-L to -L"
incrementals. This flag is currently not set on any send streams. In
the future, we intend for incremental send streams of snapshots that
have large blocks to use `-L` by default, and these streams will also
have the `SWITCH_TO_LARGE_BLOCKS` feature set. This ensures that streams
from the default use of `zfs send` won't encounter the bug mentioned
above, because they can't be received by software with the bug.
Implementation notes:
To facilitate accessing the ZPL's generation number,
`zfs_space_delta_cb()` has been renamed to `zpl_get_file_info()` and
restructured to fill in a struct with ZPL-specific info including owner
and generation.
In the "no-L to -L" case, if this is a compressed send stream (from
`zfs send -cL`), large WRITE records that are being written to small
(128KB) blocksize files need to be decompressed so that they can be
written split up into multiple blocks. The zio pipeline will recompress
each smaller block individually.
A new test case, `send-L_toggle`, is added, which tests the "no-L to -L"
case and verifies that we get an error for the "-L to no-L" case.
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #6224
Closes #10383
2020-06-09 20:41:01 +03:00
|
|
|
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"incremental send stream requires -L "
|
|
|
|
"(--large-block), to match previous receive."));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
2010-05-29 00:45:14 +04:00
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool must be upgraded to receive this stream."));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
case EDQUOT:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2018-02-12 23:28:59 +03:00
|
|
|
"destination %s space quota exceeded."), name);
|
2011-11-17 22:14:36 +04:00
|
|
|
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
|
2010-05-29 00:45:14 +04:00
|
|
|
break;
|
2019-02-04 22:24:55 +03:00
|
|
|
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
2019-03-19 20:22:39 +03:00
|
|
|
"IV set guid missing. See errata %u at "
|
2020-08-27 07:43:06 +03:00
|
|
|
"https://openzfs.github.io/openzfs-docs/msg/"
|
|
|
|
"ZFS-8000-ER."),
|
2019-02-04 22:24:55 +03:00
|
|
|
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
|
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"IV set guid mismatch. See the 'zfs receive' "
|
|
|
|
"man page section\n discussing the limitations "
|
|
|
|
"of raw encrypted send streams."));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
2019-05-08 01:18:44 +03:00
|
|
|
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"Spill block flag missing for raw send.\n"
|
|
|
|
"The zfs software on the sending system must "
|
|
|
|
"be updated."));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
|
|
|
|
break;
|
2018-02-12 23:28:59 +03:00
|
|
|
case EBUSY:
|
|
|
|
if (hastoken) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"destination %s contains "
|
|
|
|
"partially-complete state from "
|
|
|
|
"\"zfs receive -s\"."), name);
|
|
|
|
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fallthru */
|
2008-11-20 23:01:55 +03:00
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Mount the target filesystem (if created). Also mount any
|
|
|
|
* children of the target filesystem if we did a replication
|
|
|
|
* receive (indicated by stream_avl being non-NULL).
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
if (clp) {
|
2017-01-26 23:42:48 +03:00
|
|
|
if (!flags->nomount)
|
|
|
|
err |= changelist_postfix(clp);
|
2008-11-20 23:01:55 +03:00
|
|
|
changelist_free(clp);
|
|
|
|
}
|
|
|
|
|
2019-09-26 03:02:33 +03:00
|
|
|
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
|
|
|
|
flags->domount = B_TRUE;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
|
2016-06-10 03:04:12 +03:00
|
|
|
"failed to clear unreceived properties on %s"), name);
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
if (prop_errflags & ZPROP_ERR_NORESTORE) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
|
2016-06-10 03:04:12 +03:00
|
|
|
"failed to restore original properties on %s"), name);
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
if (err || ioctl_err) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-08-27 01:24:34 +04:00
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->verbose) {
|
2008-11-20 23:01:55 +03:00
|
|
|
char buf1[64];
|
|
|
|
char buf2[64];
|
2016-06-10 03:04:12 +03:00
|
|
|
uint64_t bytes = read_bytes;
|
2008-11-20 23:01:55 +03:00
|
|
|
time_t delta = time(NULL) - begin_time;
|
|
|
|
if (delta == 0)
|
|
|
|
delta = 1;
|
2017-05-02 23:43:53 +03:00
|
|
|
zfs_nicebytes(bytes, buf1, sizeof (buf1));
|
|
|
|
zfs_nicebytes(bytes/delta, buf2, sizeof (buf1));
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-05-21 22:53:13 +03:00
|
|
|
(void) printf("received %s stream in %lld seconds (%s/sec)\n",
|
|
|
|
buf1, (longlong_t)delta, buf2);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
if (prop_errors != NULL)
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(prop_errors);
|
2016-06-10 03:04:12 +03:00
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
if (tmp_keylocation[0] != '\0') {
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_add_string(rcvprops,
|
|
|
|
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
}
|
|
|
|
|
2016-06-10 03:04:12 +03:00
|
|
|
if (newprops)
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(rcvprops);
|
2017-05-10 02:21:09 +03:00
|
|
|
|
2021-01-14 20:53:09 +03:00
|
|
|
fnvlist_free(oxprops);
|
|
|
|
fnvlist_free(origprops);
|
2016-06-10 03:04:12 +03:00
|
|
|
|
|
|
|
return (err);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/*
|
|
|
|
* Check properties we were asked to override (both -o|-x)
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
|
|
|
|
const char *errbuf)
|
|
|
|
{
|
|
|
|
nvpair_t *nvp;
|
|
|
|
zfs_prop_t prop;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
nvp = NULL;
|
|
|
|
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
|
|
|
|
name = nvpair_name(nvp);
|
|
|
|
prop = zfs_name_to_prop(name);
|
|
|
|
|
|
|
|
if (prop == ZPROP_INVAL) {
|
|
|
|
if (!zfs_prop_user(name)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property '%s'"), name);
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* "origin" is readonly but is used to receive datasets as
|
|
|
|
* clones so we don't raise an error here
|
|
|
|
*/
|
|
|
|
if (prop == ZFS_PROP_ORIGIN)
|
|
|
|
continue;
|
|
|
|
|
2017-10-13 20:09:04 +03:00
|
|
|
/* encryption params have their own verification later */
|
|
|
|
if (prop == ZFS_PROP_ENCRYPTION ||
|
|
|
|
zfs_prop_encryption_key_param(prop))
|
|
|
|
continue;
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/*
|
|
|
|
* cannot override readonly, set-once and other specific
|
|
|
|
* settable properties
|
|
|
|
*/
|
|
|
|
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
|
|
|
|
prop == ZFS_PROP_VOLSIZE) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property '%s'"), name);
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
static int
|
2015-12-22 04:31:57 +03:00
|
|
|
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
|
|
|
|
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
|
2020-04-23 20:06:57 +03:00
|
|
|
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
|
|
|
|
const char *finalsnap, nvlist_t *cmdprops)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
dmu_replay_record_t drr, drr_noswap;
|
|
|
|
struct drr_begin *drrb = &drr.drr_u.drr_begin;
|
|
|
|
char errbuf[1024];
|
2010-08-26 20:52:41 +04:00
|
|
|
zio_cksum_t zcksum = { { 0 } };
|
2010-05-29 00:45:14 +04:00
|
|
|
uint64_t featureflags;
|
|
|
|
int hdrtype;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
|
|
|
|
"cannot receive"));
|
|
|
|
|
2017-05-10 02:21:09 +03:00
|
|
|
/* check cmdline props, raise an error if they cannot be received */
|
|
|
|
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf)) {
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
|
|
|
|
}
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
if (flags->isprefix &&
|
2008-11-20 23:01:55 +03:00
|
|
|
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
|
|
|
|
"(%s) does not exist"), tosnap);
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
}
|
2015-12-22 04:31:57 +03:00
|
|
|
if (originsnap &&
|
|
|
|
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
|
|
|
|
"(%s) does not exist"), originsnap);
|
|
|
|
return (zfs_error(hdl, EZFS_NOENT, errbuf));
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/* read in the BEGIN record */
|
|
|
|
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
|
|
|
|
&zcksum)))
|
|
|
|
return (err);
|
|
|
|
|
|
|
|
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
|
|
|
|
/* It's the double end record at the end of a package */
|
|
|
|
return (ENODATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the kernel needs the non-byteswapped begin record */
|
|
|
|
drr_noswap = drr;
|
|
|
|
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->byteswap = B_FALSE;
|
2008-11-20 23:01:55 +03:00
|
|
|
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
|
|
|
|
/*
|
|
|
|
* We computed the checksum in the wrong byteorder in
|
|
|
|
* recv_read() above; do it again correctly.
|
|
|
|
*/
|
|
|
|
bzero(&zcksum, sizeof (zio_cksum_t));
|
|
|
|
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
|
2011-11-17 22:14:36 +04:00
|
|
|
flags->byteswap = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
drr.drr_type = BSWAP_32(drr.drr_type);
|
|
|
|
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
|
|
|
|
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
|
2010-05-29 00:45:14 +04:00
|
|
|
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
|
2008-11-20 23:01:55 +03:00
|
|
|
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
|
|
|
|
drrb->drr_type = BSWAP_32(drrb->drr_type);
|
|
|
|
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
|
|
|
|
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
|
|
|
|
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
|
|
|
|
"stream (bad magic number)"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
|
|
|
|
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
|
|
|
|
|
|
|
|
if (!DMU_STREAM_SUPPORTED(featureflags) ||
|
|
|
|
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"stream has unsupported feature, feature flags = %lx"),
|
|
|
|
featureflags);
|
|
|
|
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
|
|
|
|
}
|
|
|
|
|
2019-02-15 23:41:38 +03:00
|
|
|
/* Holds feature is set once in the compound stream header. */
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 20:39:55 +03:00
|
|
|
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
|
2019-02-15 23:41:38 +03:00
|
|
|
flags->holds = B_TRUE;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
if (strchr(drrb->drr_toname, '@') == NULL) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
|
|
|
|
"stream (bad snapshot name)"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
|
2016-06-16 00:28:36 +03:00
|
|
|
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
|
2010-05-29 00:45:14 +04:00
|
|
|
if (sendfs == NULL) {
|
|
|
|
/*
|
|
|
|
* We were not called from zfs_receive_package(). Get
|
|
|
|
* the fs specified by 'zfs send'.
|
|
|
|
*/
|
|
|
|
char *cp;
|
|
|
|
(void) strlcpy(nonpackage_sendfs,
|
2016-06-16 00:28:36 +03:00
|
|
|
drr.drr_u.drr_begin.drr_toname,
|
|
|
|
sizeof (nonpackage_sendfs));
|
2010-05-29 00:45:14 +04:00
|
|
|
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
|
|
|
|
*cp = '\0';
|
|
|
|
sendfs = nonpackage_sendfs;
|
2016-06-09 22:24:29 +03:00
|
|
|
VERIFY(finalsnap == NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2015-12-22 04:31:57 +03:00
|
|
|
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
|
|
|
|
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
|
2020-04-23 20:06:57 +03:00
|
|
|
finalsnap, cmdprops));
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
|
|
|
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
|
|
|
|
DMU_COMPOUNDSTREAM);
|
2015-12-22 04:31:57 +03:00
|
|
|
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
|
2020-04-23 20:06:57 +03:00
|
|
|
&zcksum, top_zfs, cmdprops));
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
}
|
2008-12-03 23:09:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Restores a backup of tosnap from the file descriptor specified by infd.
|
|
|
|
* Return 0 on total success, -2 if some things couldn't be
|
|
|
|
* destroyed/renamed/promoted, -1 if some things couldn't be received.
|
2016-01-07 00:22:48 +03:00
|
|
|
* (-1 will override -2, if -1 and the resumable flag was specified the
|
|
|
|
* transfer can be resumed if the sending side supports it).
|
2008-12-03 23:09:06 +03:00
|
|
|
*/
|
|
|
|
int
|
2015-12-22 04:31:57 +03:00
|
|
|
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
|
|
|
|
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
|
2008-12-03 23:09:06 +03:00
|
|
|
{
|
|
|
|
char *top_zfs = NULL;
|
|
|
|
int err;
|
2015-03-11 21:24:46 +03:00
|
|
|
struct stat sb;
|
2015-12-22 04:31:57 +03:00
|
|
|
char *originsnap = NULL;
|
2015-03-11 21:24:46 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The only way fstat can fail is if we do not have a valid file
|
|
|
|
* descriptor.
|
|
|
|
*/
|
|
|
|
if (fstat(infd, &sb) == -1) {
|
|
|
|
perror("fstat");
|
|
|
|
return (-2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is not uncommon for gigabytes to be processed in zfs receive.
|
2019-10-03 20:33:16 +03:00
|
|
|
* Speculatively increase the buffer size if supported by the platform.
|
2015-03-11 21:24:46 +03:00
|
|
|
*/
|
2019-10-03 20:33:16 +03:00
|
|
|
if (S_ISFIFO(sb.st_mode))
|
|
|
|
libzfs_set_pipe_max(infd);
|
2010-08-27 01:24:34 +04:00
|
|
|
|
2015-12-22 04:31:57 +03:00
|
|
|
if (props) {
|
|
|
|
err = nvlist_lookup_string(props, "origin", &originsnap);
|
|
|
|
if (err && err != ENOENT)
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
|
2020-04-23 20:06:57 +03:00
|
|
|
stream_avl, &top_zfs, NULL, props);
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2019-09-26 03:02:33 +03:00
|
|
|
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
|
2016-07-26 22:08:51 +03:00
|
|
|
zfs_handle_t *zhp = NULL;
|
|
|
|
prop_changelist_t *clp = NULL;
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2019-09-26 03:02:33 +03:00
|
|
|
zhp = zfs_open(hdl, top_zfs,
|
|
|
|
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
|
|
|
|
if (zhp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
|
|
|
|
zfs_close(zhp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
|
2020-03-17 20:08:32 +03:00
|
|
|
CL_GATHER_MOUNT_ALWAYS,
|
|
|
|
flags->forceunmount ? MS_FORCE : 0);
|
2008-12-03 23:09:06 +03:00
|
|
|
zfs_close(zhp);
|
2019-09-26 03:02:33 +03:00
|
|
|
if (clp == NULL) {
|
|
|
|
err = -1;
|
|
|
|
goto out;
|
2008-12-03 23:09:06 +03:00
|
|
|
}
|
2019-09-26 03:02:33 +03:00
|
|
|
|
|
|
|
/* mount and share received datasets */
|
|
|
|
err = changelist_postfix(clp);
|
|
|
|
changelist_free(clp);
|
|
|
|
if (err != 0)
|
|
|
|
err = -1;
|
2008-12-03 23:09:06 +03:00
|
|
|
}
|
|
|
|
}
|
2019-09-26 03:02:33 +03:00
|
|
|
|
|
|
|
out:
|
2008-12-03 23:09:06 +03:00
|
|
|
if (top_zfs)
|
|
|
|
free(top_zfs);
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|