mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 10:01:01 +03:00
Illumos #3122 zfs destroy filesystem should prefetch blocks
3122 zfs destroy filesystem should prefetch blocks Reviewed by: Christopher Siden <chris.siden@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Adam Leventhal <ahl@delphix.com> Approved by: Garrett D'Amore <garrett@damore.org> References: illumos/illumos-gate@b4709335aa https://www.illumos.org/issues/3122 Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1565
This commit is contained in:
parent
3db3ff4a78
commit
96b89346c0
@ -20,6 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* Copyright (c) 2012 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _SYS_DNODE_H
|
#ifndef _SYS_DNODE_H
|
||||||
@ -276,7 +277,6 @@ void dnode_byteswap(dnode_phys_t *dnp);
|
|||||||
void dnode_buf_byteswap(void *buf, size_t size);
|
void dnode_buf_byteswap(void *buf, size_t size);
|
||||||
void dnode_verify(dnode_t *dn);
|
void dnode_verify(dnode_t *dn);
|
||||||
int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
|
int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
|
||||||
uint64_t dnode_current_max_length(dnode_t *dn);
|
|
||||||
void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
|
void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
|
||||||
void dnode_clear_range(dnode_t *dn, uint64_t blkid,
|
void dnode_clear_range(dnode_t *dn, uint64_t blkid,
|
||||||
uint64_t nblks, dmu_tx_t *tx);
|
uint64_t nblks, dmu_tx_t *tx);
|
||||||
|
@ -189,7 +189,8 @@ bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,
|
err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,
|
||||||
bte.be_birth_txg, &bte.be_zb, TRAVERSE_POST,
|
bte.be_birth_txg, &bte.be_zb,
|
||||||
|
TRAVERSE_PREFETCH_METADATA | TRAVERSE_POST,
|
||||||
bptree_visit_cb, &ba);
|
bptree_visit_cb, &ba);
|
||||||
if (free) {
|
if (free) {
|
||||||
ASSERT(err == 0 || err == ERESTART);
|
ASSERT(err == 0 || err == ERESTART);
|
||||||
|
@ -63,6 +63,8 @@ typedef struct traverse_data {
|
|||||||
|
|
||||||
static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
||||||
arc_buf_t *buf, uint64_t objset, uint64_t object);
|
arc_buf_t *buf, uint64_t objset, uint64_t object);
|
||||||
|
static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
|
||||||
|
arc_buf_t *buf, uint64_t objset, uint64_t object);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
|
traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
|
||||||
@ -178,9 +180,34 @@ traverse_pause(traverse_data_t *td, const zbookmark_t *zb)
|
|||||||
bcopy(zb, td->td_resume, sizeof (*td->td_resume));
|
bcopy(zb, td->td_resume, sizeof (*td->td_resume));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
traverse_prefetch_metadata(traverse_data_t *td,
|
||||||
|
arc_buf_t *pbuf, const blkptr_t *bp, const zbookmark_t *zb)
|
||||||
|
{
|
||||||
|
uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
|
||||||
|
|
||||||
|
if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
|
||||||
|
return;
|
||||||
|
/*
|
||||||
|
* If we are in the process of resuming, don't prefetch, because
|
||||||
|
* some children will not be needed (and in fact may have already
|
||||||
|
* been freed).
|
||||||
|
*/
|
||||||
|
if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
|
||||||
|
return;
|
||||||
|
if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
|
||||||
|
return;
|
||||||
|
if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
(void) arc_read(NULL, td->td_spa, bp,
|
||||||
|
pbuf, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
|
||||||
|
ZIO_FLAG_CANFAIL, &flags, zb);
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
||||||
arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
|
arc_buf_t *pbuf, const blkptr_t *bp, const zbookmark_t *zb)
|
||||||
{
|
{
|
||||||
zbookmark_t czb;
|
zbookmark_t czb;
|
||||||
int err = 0, lasterr = 0;
|
int err = 0, lasterr = 0;
|
||||||
@ -243,14 +270,21 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
|||||||
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
|
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
|
||||||
if (err)
|
if (err)
|
||||||
return (err);
|
return (err);
|
||||||
|
|
||||||
/* recursively visitbp() blocks below this */
|
|
||||||
cbp = buf->b_data;
|
cbp = buf->b_data;
|
||||||
for (i = 0; i < epb; i++, cbp++) {
|
|
||||||
|
for (i = 0; i < epb; i++) {
|
||||||
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
|
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
|
||||||
zb->zb_level - 1,
|
zb->zb_level - 1,
|
||||||
zb->zb_blkid * epb + i);
|
zb->zb_blkid * epb + i);
|
||||||
err = traverse_visitbp(td, dnp, buf, cbp, &czb);
|
traverse_prefetch_metadata(td, buf, &cbp[i], &czb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* recursively visitbp() blocks below this */
|
||||||
|
for (i = 0; i < epb; i++) {
|
||||||
|
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
|
||||||
|
zb->zb_level - 1,
|
||||||
|
zb->zb_blkid * epb + i);
|
||||||
|
err = traverse_visitbp(td, dnp, buf, &cbp[i], &czb);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (!hard)
|
if (!hard)
|
||||||
break;
|
break;
|
||||||
@ -267,11 +301,16 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
|||||||
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
|
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
|
||||||
if (err)
|
if (err)
|
||||||
return (err);
|
return (err);
|
||||||
|
dnp = buf->b_data;
|
||||||
|
|
||||||
|
for (i = 0; i < epb; i++) {
|
||||||
|
prefetch_dnode_metadata(td, &dnp[i], buf, zb->zb_objset,
|
||||||
|
zb->zb_blkid * epb + i);
|
||||||
|
}
|
||||||
|
|
||||||
/* recursively visitbp() blocks below this */
|
/* recursively visitbp() blocks below this */
|
||||||
dnp = buf->b_data;
|
for (i = 0; i < epb; i++) {
|
||||||
for (i = 0; i < epb; i++, dnp++) {
|
err = traverse_dnode(td, &dnp[i], buf, zb->zb_objset,
|
||||||
err = traverse_dnode(td, dnp, buf, zb->zb_objset,
|
|
||||||
zb->zb_blkid * epb + i);
|
zb->zb_blkid * epb + i);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (!hard)
|
if (!hard)
|
||||||
@ -292,6 +331,15 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
|||||||
|
|
||||||
osp = buf->b_data;
|
osp = buf->b_data;
|
||||||
dnp = &osp->os_meta_dnode;
|
dnp = &osp->os_meta_dnode;
|
||||||
|
prefetch_dnode_metadata(td, dnp, buf, zb->zb_objset,
|
||||||
|
DMU_META_DNODE_OBJECT);
|
||||||
|
if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
|
||||||
|
prefetch_dnode_metadata(td, &osp->os_userused_dnode,
|
||||||
|
buf, zb->zb_objset, DMU_USERUSED_OBJECT);
|
||||||
|
prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
|
||||||
|
buf, zb->zb_objset, DMU_USERUSED_OBJECT);
|
||||||
|
}
|
||||||
|
|
||||||
err = traverse_dnode(td, dnp, buf, zb->zb_objset,
|
err = traverse_dnode(td, dnp, buf, zb->zb_objset,
|
||||||
DMU_META_DNODE_OBJECT);
|
DMU_META_DNODE_OBJECT);
|
||||||
if (err && hard) {
|
if (err && hard) {
|
||||||
@ -334,6 +382,24 @@ post:
|
|||||||
return (err != 0 ? err : lasterr);
|
return (err != 0 ? err : lasterr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
|
||||||
|
arc_buf_t *buf, uint64_t objset, uint64_t object)
|
||||||
|
{
|
||||||
|
int j;
|
||||||
|
zbookmark_t czb;
|
||||||
|
|
||||||
|
for (j = 0; j < dnp->dn_nblkptr; j++) {
|
||||||
|
SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
|
||||||
|
traverse_prefetch_metadata(td, buf, &dnp->dn_blkptr[j], &czb);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
|
||||||
|
SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
|
||||||
|
traverse_prefetch_metadata(td, buf, &dnp->dn_spill, &czb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
||||||
arc_buf_t *buf, uint64_t objset, uint64_t object)
|
arc_buf_t *buf, uint64_t objset, uint64_t object)
|
||||||
@ -344,8 +410,7 @@ traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
|||||||
|
|
||||||
for (j = 0; j < dnp->dn_nblkptr; j++) {
|
for (j = 0; j < dnp->dn_nblkptr; j++) {
|
||||||
SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
|
SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
|
||||||
err = traverse_visitbp(td, dnp, buf,
|
err = traverse_visitbp(td, dnp, buf, &dnp->dn_blkptr[j], &czb);
|
||||||
(blkptr_t *)&dnp->dn_blkptr[j], &czb);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
if (!hard)
|
if (!hard)
|
||||||
break;
|
break;
|
||||||
@ -354,10 +419,8 @@ traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
|
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
|
||||||
SET_BOOKMARK(&czb, objset,
|
SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
|
||||||
object, 0, DMU_SPILL_BLKID);
|
err = traverse_visitbp(td, dnp, buf, &dnp->dn_spill, &czb);
|
||||||
err = traverse_visitbp(td, dnp, buf,
|
|
||||||
(blkptr_t *)&dnp->dn_spill, &czb);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
if (!hard)
|
if (!hard)
|
||||||
return (err);
|
return (err);
|
||||||
@ -438,6 +501,12 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
|
|||||||
ASSERT(ds == NULL || objset == ds->ds_object);
|
ASSERT(ds == NULL || objset == ds->ds_object);
|
||||||
ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
|
ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The data prefetching mechanism (the prefetch thread) is incompatible
|
||||||
|
* with resuming from a bookmark.
|
||||||
|
*/
|
||||||
|
ASSERT(resume == NULL || !(flags & TRAVERSE_PREFETCH_DATA));
|
||||||
|
|
||||||
td = kmem_alloc(sizeof(traverse_data_t), KM_PUSHPAGE);
|
td = kmem_alloc(sizeof(traverse_data_t), KM_PUSHPAGE);
|
||||||
pd = kmem_zalloc(sizeof(prefetch_data_t), KM_PUSHPAGE);
|
pd = kmem_zalloc(sizeof(prefetch_data_t), KM_PUSHPAGE);
|
||||||
czb = kmem_alloc(sizeof(zbookmark_t), KM_PUSHPAGE);
|
czb = kmem_alloc(sizeof(zbookmark_t), KM_PUSHPAGE);
|
||||||
@ -468,7 +537,7 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
|
|||||||
traverse_zil(td, &os->os_zil_header);
|
traverse_zil(td, &os->os_zil_header);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(flags & TRAVERSE_PREFETCH) ||
|
if (!(flags & TRAVERSE_PREFETCH_DATA) ||
|
||||||
0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
|
0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
|
||||||
td, TQ_NOQUEUE))
|
td, TQ_NOQUEUE))
|
||||||
pd->pd_exited = B_TRUE;
|
pd->pd_exited = B_TRUE;
|
||||||
|
Loading…
Reference in New Issue
Block a user