2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2010-05-29 00:45:14 +04:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
|
2019-06-12 23:13:09 +03:00
|
|
|
* Copyright (c) 2018 by Delphix. All rights reserved.
|
2023-07-03 08:16:02 +03:00
|
|
|
* Copyright (c) 2023, Klara Inc.
|
2010-05-29 00:45:14 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/ddt.h>
|
2023-06-30 06:35:18 +03:00
|
|
|
#include <sys/ddt_impl.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#include <sys/zap.h>
|
|
|
|
#include <sys/dmu_tx.h>
|
2023-06-30 05:48:45 +03:00
|
|
|
#include <sys/zio_compress.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2023-06-30 19:42:02 +03:00
|
|
|
static unsigned int ddt_zap_default_bs = 15;
|
|
|
|
static unsigned int ddt_zap_default_ibs = 15;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2023-06-30 05:48:45 +03:00
|
|
|
#define DDT_ZAP_COMPRESS_BYTEORDER_MASK 0x80
|
|
|
|
#define DDT_ZAP_COMPRESS_FUNCTION_MASK 0x7f
|
|
|
|
|
|
|
|
#define DDT_KEY_WORDS (sizeof (ddt_key_t) / sizeof (uint64_t))
|
|
|
|
|
|
|
|
static size_t
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_compress(const void *src, uchar_t *dst, size_t s_len, size_t d_len)
|
2023-06-30 05:48:45 +03:00
|
|
|
{
|
|
|
|
uchar_t *version = dst++;
|
|
|
|
int cpfunc = ZIO_COMPRESS_ZLE;
|
|
|
|
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
|
|
|
|
size_t c_len;
|
|
|
|
|
|
|
|
ASSERT3U(d_len, >=, s_len + 1); /* no compression plus version byte */
|
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
c_len = ci->ci_compress((void *)src, dst, s_len, d_len - 1,
|
|
|
|
ci->ci_level);
|
2023-06-30 05:48:45 +03:00
|
|
|
|
|
|
|
if (c_len == s_len) {
|
|
|
|
cpfunc = ZIO_COMPRESS_OFF;
|
|
|
|
memcpy(dst, src, s_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
*version = cpfunc;
|
|
|
|
if (ZFS_HOST_BYTEORDER)
|
|
|
|
*version |= DDT_ZAP_COMPRESS_BYTEORDER_MASK;
|
|
|
|
|
|
|
|
return (c_len + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ddt_zap_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len)
|
|
|
|
{
|
|
|
|
uchar_t version = *src++;
|
|
|
|
int cpfunc = version & DDT_ZAP_COMPRESS_FUNCTION_MASK;
|
|
|
|
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
|
|
|
|
|
|
|
|
if (ci->ci_decompress != NULL)
|
|
|
|
(void) ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
|
|
|
|
else
|
|
|
|
memcpy(dst, src, d_len);
|
|
|
|
|
|
|
|
if (((version & DDT_ZAP_COMPRESS_BYTEORDER_MASK) != 0) !=
|
|
|
|
(ZFS_HOST_BYTEORDER != 0))
|
|
|
|
byteswap_uint64_array(dst, d_len);
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static int
|
|
|
|
ddt_zap_create(objset_t *os, uint64_t *objectp, dmu_tx_t *tx, boolean_t prehash)
|
|
|
|
{
|
|
|
|
zap_flags_t flags = ZAP_FLAG_HASH64 | ZAP_FLAG_UINT64_KEY;
|
|
|
|
|
|
|
|
if (prehash)
|
|
|
|
flags |= ZAP_FLAG_PRE_HASHED_KEY;
|
|
|
|
|
|
|
|
*objectp = zap_create_flags(os, 0, flags, DMU_OT_DDT_ZAP,
|
2023-06-30 19:42:02 +03:00
|
|
|
ddt_zap_default_bs, ddt_zap_default_ibs,
|
2010-05-29 00:45:14 +04:00
|
|
|
DMU_OT_NONE, 0, tx);
|
2023-07-03 16:28:46 +03:00
|
|
|
if (*objectp == 0)
|
|
|
|
return (SET_ERROR(ENOTSUP));
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
return (0);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ddt_zap_destroy(objset_t *os, uint64_t object, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
return (zap_destroy(os, object, tx));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_lookup(objset_t *os, uint64_t object,
|
ddt: add "flat phys" feature
Traditional dedup keeps a separate ddt_phys_t "type" for each possible
count of DVAs (that is, copies=) parameter. Each of these are tracked
independently of each other, and have their own set of DVAs. This leads
to an (admittedly rare) situation where you can create as many as six
copies of the data, by changing the copies= parameter between copying.
This is both a waste of storage on disk, but also a waste of space in
the stored DDT entries, since there never needs to be more than three
DVAs to handle all possible values of copies=.
This commit adds a new FDT feature, DDT_FLAG_FLAT. When active, only the
first ddt_phys_t is used. Each time a block is written with the dedup
bit set, this single phys is checked to see if it has enough DVAs to
fulfill the request. If it does, the block is filled with the saved DVAs
as normal. If not, an adjusted write is issued to create as many extra
copies as are needed to fulfill the request, which are then saved into
the entry too.
Because a single phys is no longer an all-or-nothing, but can be
transitioning from fewer to more DVAs, the write path now has to keep a
copy of the previous "known good" DVA set so we can revert to it in case
an error occurs. zio_ddt_write() has been restructured and heavily
commented to make it much easier to see what's happening.
Backwards compatibility is maintained simply by allocating four
ddt_phys_t when the DDT_FLAG_FLAT flag is not set, and updating the phys
selection macros to check the flag. In the old arrangement, each number
of copies gets a whole phys, so it will always have either zero or all
necessary DVAs filled, with no in-between, so the old behaviour
naturally falls out of the new code.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Co-authored-by: Don Brady <don.brady@klarasystems.com>
Sponsored-by: Klara, Inc.
Sponsored-by: iXsystems, Inc.
Closes #15893
2023-06-20 04:09:48 +03:00
|
|
|
const ddt_key_t *ddk, void *phys, size_t psize)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2011-05-26 01:13:18 +04:00
|
|
|
uchar_t *cbuf;
|
2010-05-29 00:45:14 +04:00
|
|
|
uint64_t one, csize;
|
|
|
|
int error;
|
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
error = zap_length_uint64(os, object, (uint64_t *)ddk,
|
2010-05-29 00:45:14 +04:00
|
|
|
DDT_KEY_WORDS, &one, &csize);
|
|
|
|
if (error)
|
2024-01-18 01:51:41 +03:00
|
|
|
return (error);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2024-02-15 11:37:38 +03:00
|
|
|
ASSERT3U(one, ==, 1);
|
2023-07-03 16:28:46 +03:00
|
|
|
ASSERT3U(csize, <=, psize + 1);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2024-01-18 01:51:41 +03:00
|
|
|
cbuf = kmem_alloc(csize, KM_SLEEP);
|
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
error = zap_lookup_uint64(os, object, (uint64_t *)ddk,
|
2010-05-29 00:45:14 +04:00
|
|
|
DDT_KEY_WORDS, 1, csize, cbuf);
|
2024-01-18 01:51:41 +03:00
|
|
|
if (error == 0)
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_decompress(cbuf, phys, csize, psize);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2024-01-18 01:51:41 +03:00
|
|
|
kmem_free(cbuf, csize);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2011-05-26 01:13:18 +04:00
|
|
|
return (error);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
static int
|
|
|
|
ddt_zap_contains(objset_t *os, uint64_t object, const ddt_key_t *ddk)
|
|
|
|
{
|
|
|
|
return (zap_length_uint64(os, object, (uint64_t *)ddk, DDT_KEY_WORDS,
|
|
|
|
NULL, NULL));
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static void
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_prefetch(objset_t *os, uint64_t object, const ddt_key_t *ddk)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2023-07-03 16:28:46 +03:00
|
|
|
(void) zap_prefetch_uint64(os, object, (uint64_t *)ddk, DDT_KEY_WORDS);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2024-07-26 19:16:18 +03:00
|
|
|
static void
|
|
|
|
ddt_zap_prefetch_all(objset_t *os, uint64_t object)
|
|
|
|
{
|
|
|
|
(void) zap_prefetch_object(os, object);
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static int
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_update(objset_t *os, uint64_t object, const ddt_key_t *ddk,
|
ddt: add "flat phys" feature
Traditional dedup keeps a separate ddt_phys_t "type" for each possible
count of DVAs (that is, copies=) parameter. Each of these are tracked
independently of each other, and have their own set of DVAs. This leads
to an (admittedly rare) situation where you can create as many as six
copies of the data, by changing the copies= parameter between copying.
This is both a waste of storage on disk, but also a waste of space in
the stored DDT entries, since there never needs to be more than three
DVAs to handle all possible values of copies=.
This commit adds a new FDT feature, DDT_FLAG_FLAT. When active, only the
first ddt_phys_t is used. Each time a block is written with the dedup
bit set, this single phys is checked to see if it has enough DVAs to
fulfill the request. If it does, the block is filled with the saved DVAs
as normal. If not, an adjusted write is issued to create as many extra
copies as are needed to fulfill the request, which are then saved into
the entry too.
Because a single phys is no longer an all-or-nothing, but can be
transitioning from fewer to more DVAs, the write path now has to keep a
copy of the previous "known good" DVA set so we can revert to it in case
an error occurs. zio_ddt_write() has been restructured and heavily
commented to make it much easier to see what's happening.
Backwards compatibility is maintained simply by allocating four
ddt_phys_t when the DDT_FLAG_FLAT flag is not set, and updating the phys
selection macros to check the flag. In the old arrangement, each number
of copies gets a whole phys, so it will always have either zero or all
necessary DVAs filled, with no in-between, so the old behaviour
naturally falls out of the new code.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Co-authored-by: Don Brady <don.brady@klarasystems.com>
Sponsored-by: Klara, Inc.
Sponsored-by: iXsystems, Inc.
Closes #15893
2023-06-20 04:09:48 +03:00
|
|
|
const void *phys, size_t psize, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2023-07-03 16:28:46 +03:00
|
|
|
const size_t cbuf_size = psize + 1;
|
2024-01-18 01:51:41 +03:00
|
|
|
|
|
|
|
uchar_t *cbuf = kmem_alloc(cbuf_size, KM_SLEEP);
|
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
uint64_t csize = ddt_zap_compress(phys, cbuf, psize, cbuf_size);
|
2024-01-18 01:51:41 +03:00
|
|
|
|
2023-07-03 16:28:46 +03:00
|
|
|
int error = zap_update_uint64(os, object, (uint64_t *)ddk,
|
2024-01-18 01:51:41 +03:00
|
|
|
DDT_KEY_WORDS, 1, csize, cbuf, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2024-01-18 01:51:41 +03:00
|
|
|
kmem_free(cbuf, cbuf_size);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2024-01-18 01:51:41 +03:00
|
|
|
return (error);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_remove(objset_t *os, uint64_t object, const ddt_key_t *ddk,
|
|
|
|
dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2023-07-03 16:28:46 +03:00
|
|
|
return (zap_remove_uint64(os, object, (uint64_t *)ddk,
|
2010-05-29 00:45:14 +04:00
|
|
|
DDT_KEY_WORDS, tx));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_walk(objset_t *os, uint64_t object, uint64_t *walk, ddt_key_t *ddk,
|
ddt: add "flat phys" feature
Traditional dedup keeps a separate ddt_phys_t "type" for each possible
count of DVAs (that is, copies=) parameter. Each of these are tracked
independently of each other, and have their own set of DVAs. This leads
to an (admittedly rare) situation where you can create as many as six
copies of the data, by changing the copies= parameter between copying.
This is both a waste of storage on disk, but also a waste of space in
the stored DDT entries, since there never needs to be more than three
DVAs to handle all possible values of copies=.
This commit adds a new FDT feature, DDT_FLAG_FLAT. When active, only the
first ddt_phys_t is used. Each time a block is written with the dedup
bit set, this single phys is checked to see if it has enough DVAs to
fulfill the request. If it does, the block is filled with the saved DVAs
as normal. If not, an adjusted write is issued to create as many extra
copies as are needed to fulfill the request, which are then saved into
the entry too.
Because a single phys is no longer an all-or-nothing, but can be
transitioning from fewer to more DVAs, the write path now has to keep a
copy of the previous "known good" DVA set so we can revert to it in case
an error occurs. zio_ddt_write() has been restructured and heavily
commented to make it much easier to see what's happening.
Backwards compatibility is maintained simply by allocating four
ddt_phys_t when the DDT_FLAG_FLAT flag is not set, and updating the phys
selection macros to check the flag. In the old arrangement, each number
of copies gets a whole phys, so it will always have either zero or all
necessary DVAs filled, with no in-between, so the old behaviour
naturally falls out of the new code.
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Co-authored-by: Don Brady <don.brady@klarasystems.com>
Sponsored-by: Klara, Inc.
Sponsored-by: iXsystems, Inc.
Closes #15893
2023-06-20 04:09:48 +03:00
|
|
|
void *phys, size_t psize)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
zap_cursor_t zc;
|
|
|
|
zap_attribute_t za;
|
|
|
|
int error;
|
|
|
|
|
2019-06-12 23:13:09 +03:00
|
|
|
if (*walk == 0) {
|
|
|
|
/*
|
|
|
|
* We don't want to prefetch the entire ZAP object, because
|
|
|
|
* it can be enormous. Also the primary use of DDT iteration
|
|
|
|
* is for scrubbing, in which case we will be issuing many
|
|
|
|
* scrub I/Os for each ZAP block that we read in, so
|
|
|
|
* reading the ZAP is unlikely to be the bottleneck.
|
|
|
|
*/
|
|
|
|
zap_cursor_init_noprefetch(&zc, os, object);
|
|
|
|
} else {
|
|
|
|
zap_cursor_init_serialized(&zc, os, object, *walk);
|
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
if ((error = zap_cursor_retrieve(&zc, &za)) == 0) {
|
|
|
|
uint64_t csize = za.za_num_integers;
|
2024-01-18 01:51:41 +03:00
|
|
|
|
2024-02-15 11:37:38 +03:00
|
|
|
ASSERT3U(za.za_integer_length, ==, 1);
|
2023-07-03 16:28:46 +03:00
|
|
|
ASSERT3U(csize, <=, psize + 1);
|
2024-01-18 01:51:41 +03:00
|
|
|
|
|
|
|
uchar_t *cbuf = kmem_alloc(csize, KM_SLEEP);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
error = zap_lookup_uint64(os, object, (uint64_t *)za.za_name,
|
|
|
|
DDT_KEY_WORDS, 1, csize, cbuf);
|
2024-02-15 11:37:38 +03:00
|
|
|
ASSERT0(error);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (error == 0) {
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_decompress(cbuf, phys, csize, psize);
|
|
|
|
*ddk = *(ddt_key_t *)za.za_name;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2024-01-18 01:51:41 +03:00
|
|
|
|
|
|
|
kmem_free(cbuf, csize);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
zap_cursor_advance(&zc);
|
|
|
|
*walk = zap_cursor_serialize(&zc);
|
|
|
|
}
|
|
|
|
zap_cursor_fini(&zc);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2012-10-26 21:01:49 +04:00
|
|
|
static int
|
|
|
|
ddt_zap_count(objset_t *os, uint64_t object, uint64_t *count)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2013-11-01 23:26:11 +04:00
|
|
|
return (zap_count(os, object, count));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
const ddt_ops_t ddt_zap_ops = {
|
|
|
|
"zap",
|
|
|
|
ddt_zap_create,
|
|
|
|
ddt_zap_destroy,
|
|
|
|
ddt_zap_lookup,
|
2023-07-03 16:28:46 +03:00
|
|
|
ddt_zap_contains,
|
2010-05-29 00:45:14 +04:00
|
|
|
ddt_zap_prefetch,
|
2024-07-26 19:16:18 +03:00
|
|
|
ddt_zap_prefetch_all,
|
2010-05-29 00:45:14 +04:00
|
|
|
ddt_zap_update,
|
|
|
|
ddt_zap_remove,
|
|
|
|
ddt_zap_walk,
|
|
|
|
ddt_zap_count,
|
|
|
|
};
|
2023-06-30 19:42:02 +03:00
|
|
|
|
|
|
|
/* BEGIN CSTYLED */
|
|
|
|
ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_bs, UINT, ZMOD_RW,
|
|
|
|
"DDT ZAP leaf blockshift");
|
|
|
|
ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_ibs, UINT, ZMOD_RW,
|
|
|
|
"DDT ZAP indirect blockshift");
|
|
|
|
/* END CSTYLED */
|