a010b40938
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
125 lines
4.7 KiB
Diff
125 lines
4.7 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Tom Caputi <tcaputi@datto.com>
|
|
Date: Thu, 28 Jun 2018 17:55:11 -0400
|
|
Subject: [PATCH] Fix 'zfs recv' of non large_dnode send streams
|
|
|
|
Currently, there is a bug where older send streams without the
|
|
DMU_BACKUP_FEATURE_LARGE_DNODE flag are not handled correctly.
|
|
The code in receive_object() fails to handle cases where
|
|
drro->drr_dn_slots is set to 0, which is always the case when the
|
|
sending code does not support this feature flag. This patch fixes
|
|
the issue by ensuring that that a value of 0 is treated as
|
|
DNODE_MIN_SLOTS.
|
|
|
|
Tested-by: DHE <git@dehacked.net>
|
|
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
|
|
Signed-off-by: Tom Caputi <tcaputi@datto.com>
|
|
Closes #7617
|
|
Closes #7662
|
|
|
|
Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
|
|
---
|
|
module/zfs/dmu_object.c | 3 +++
|
|
module/zfs/dmu_send.c | 33 +++++++++++++++++++++++++++------
|
|
2 files changed, 30 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c
|
|
index 1fc71d10..40c25362 100644
|
|
--- a/module/zfs/dmu_object.c
|
|
+++ b/module/zfs/dmu_object.c
|
|
@@ -261,6 +261,9 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
|
|
int dn_slots = dnodesize >> DNODE_SHIFT;
|
|
int err;
|
|
|
|
+ if (dn_slots == 0)
|
|
+ dn_slots = DNODE_MIN_SLOTS;
|
|
+
|
|
if (object == DMU_META_DNODE_OBJECT)
|
|
return (SET_ERROR(EBADF));
|
|
|
|
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
|
|
index 1de0f316..13aae960 100644
|
|
--- a/module/zfs/dmu_send.c
|
|
+++ b/module/zfs/dmu_send.c
|
|
@@ -2139,6 +2139,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
dmu_tx_t *tx;
|
|
uint64_t object;
|
|
int err;
|
|
+ uint8_t dn_slots = drro->drr_dn_slots != 0 ?
|
|
+ drro->drr_dn_slots : DNODE_MIN_SLOTS;
|
|
|
|
if (drro->drr_type == DMU_OT_NONE ||
|
|
!DMU_OT_IS_VALID(drro->drr_type) ||
|
|
@@ -2150,7 +2152,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
|
|
drro->drr_bonuslen >
|
|
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
|
|
- drro->drr_dn_slots >
|
|
+ dn_slots >
|
|
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
|
|
return (SET_ERROR(EINVAL));
|
|
}
|
|
@@ -2177,12 +2179,31 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
|
|
if (drro->drr_blksz != doi.doi_data_block_size ||
|
|
nblkptr < doi.doi_nblkptr ||
|
|
- drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
|
|
+ dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
|
|
err = dmu_free_long_range(rwa->os, drro->drr_object,
|
|
0, DMU_OBJECT_END);
|
|
if (err != 0)
|
|
return (SET_ERROR(EINVAL));
|
|
}
|
|
+
|
|
+ /*
|
|
+ * The dmu does not currently support decreasing nlevels
|
|
+ * on an object. For non-raw sends, this does not matter
|
|
+ * and the new object can just use the previous one's nlevels.
|
|
+ * For raw sends, however, the structure of the received dnode
|
|
+ * (including nlevels) must match that of the send side.
|
|
+ * Therefore, instead of using dmu_object_reclaim(), we must
|
|
+ * free the object completely and call dmu_object_claim_dnsize()
|
|
+ * instead.
|
|
+ */
|
|
+ if (dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
|
|
+ err = dmu_free_long_object(rwa->os, drro->drr_object);
|
|
+ if (err != 0)
|
|
+ return (SET_ERROR(EINVAL));
|
|
+
|
|
+ txg_wait_synced(dmu_objset_pool(rwa->os), 0);
|
|
+ object = DMU_NEW_OBJECT;
|
|
+ }
|
|
} else if (err == EEXIST) {
|
|
/*
|
|
* The object requested is currently an interior slot of a
|
|
@@ -2204,9 +2225,9 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
* another object from the previous snapshot. We must free
|
|
* these objects before we attempt to allocate the new dnode.
|
|
*/
|
|
- if (drro->drr_dn_slots > 1) {
|
|
+ if (dn_slots > 1) {
|
|
for (uint64_t slot = drro->drr_object + 1;
|
|
- slot < drro->drr_object + drro->drr_dn_slots;
|
|
+ slot < drro->drr_object + dn_slots;
|
|
slot++) {
|
|
dmu_object_info_t slot_doi;
|
|
|
|
@@ -2238,7 +2259,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
|
|
drro->drr_type, drro->drr_blksz,
|
|
drro->drr_bonustype, drro->drr_bonuslen,
|
|
- drro->drr_dn_slots << DNODE_SHIFT, tx);
|
|
+ dn_slots << DNODE_SHIFT, tx);
|
|
} else if (drro->drr_type != doi.doi_type ||
|
|
drro->drr_blksz != doi.doi_data_block_size ||
|
|
drro->drr_bonustype != doi.doi_bonus_type ||
|
|
@@ -2247,7 +2268,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
|
|
drro->drr_type, drro->drr_blksz,
|
|
drro->drr_bonustype, drro->drr_bonuslen,
|
|
- drro->drr_dn_slots << DNODE_SHIFT, tx);
|
|
+ dn_slots << DNODE_SHIFT, tx);
|
|
}
|
|
if (err != 0) {
|
|
dmu_tx_commit(tx);
|