mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2026-01-14 17:22:05 +03:00
Pass flags to more DMU write/hold functions
Over the time many of DMU functions got flags argument to control prefetch, caching, etc. Few functions though left without it, even though closer look shown that many of them do not require prefetch due to their access pattern. This patch adds the flags argument to dmu_write(), dmu_buf_hold_array() and dmu_buf_hold_array_by_bonus(), passing DMU_READ_NO_PREFETCH where applicable. I am going to also pass DMU_UNCACHEDIO to some of them later. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Rob Norris <robn@despairlabs.com> Signed-off-by: Alexander Motin <alexander.motin@TrueNAS.com> Closes #17872
This commit is contained in:
parent
9a9e06e5dd
commit
5847626175
15
cmd/ztest.c
15
cmd/ztest.c
@ -2306,7 +2306,8 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
}
|
||||
|
||||
if (abuf == NULL) {
|
||||
dmu_write(os, lr->lr_foid, offset, length, data, tx);
|
||||
dmu_write(os, lr->lr_foid, offset, length, data, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
} else {
|
||||
memcpy(abuf->b_data, data, length);
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx, 0));
|
||||
@ -5243,7 +5244,8 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
* We've verified all the old bufwads, and made new ones.
|
||||
* Now write them out.
|
||||
*/
|
||||
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
|
||||
dmu_write(os, packobj, packoff, packsize, packbuf, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
|
||||
if (freeit) {
|
||||
if (ztest_opts.zo_verbose >= 7) {
|
||||
@ -5258,7 +5260,8 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
" txg %"PRIx64"\n",
|
||||
bigoff, bigsize, txg);
|
||||
}
|
||||
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
|
||||
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
}
|
||||
|
||||
dmu_tx_commit(tx);
|
||||
@ -5513,7 +5516,8 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
* We've verified all the old bufwads, and made new ones.
|
||||
* Now write them out.
|
||||
*/
|
||||
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
|
||||
dmu_write(os, packobj, packoff, packsize, packbuf, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
if (ztest_opts.zo_verbose >= 7) {
|
||||
(void) printf("writing offset %"PRIx64" size %"PRIx64""
|
||||
" txg %"PRIx64"\n",
|
||||
@ -6119,7 +6123,8 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
"future leak: got %"PRIu64", open txg is %"PRIu64"",
|
||||
old_txg, txg);
|
||||
|
||||
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
|
||||
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
|
||||
(void) mutex_enter(&zcl.zcl_callbacks_lock);
|
||||
|
||||
|
||||
@ -625,7 +625,7 @@ int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
|
||||
const void *tag, dmu_buf_t **, dmu_flags_t flags);
|
||||
int dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
|
||||
uint64_t length, int read, const void *tag, int *numbufsp,
|
||||
dmu_buf_t ***dbpp);
|
||||
dmu_buf_t ***dbpp, dmu_flags_t flags);
|
||||
int dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
|
||||
const void *tag, dmu_buf_t **dbp);
|
||||
int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
|
||||
@ -668,7 +668,7 @@ uint64_t dmu_buf_user_refcount(dmu_buf_t *db);
|
||||
*/
|
||||
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
|
||||
uint64_t length, boolean_t read, const void *tag,
|
||||
int *numbufsp, dmu_buf_t ***dbpp);
|
||||
int *numbufsp, dmu_buf_t ***dbpp, dmu_flags_t flags);
|
||||
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, const void *tag);
|
||||
|
||||
typedef void dmu_buf_evict_func_t(void *user_ptr);
|
||||
@ -924,7 +924,7 @@ int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
|
||||
dmu_flags_t flags);
|
||||
void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
const void *buf, dmu_tx_t *tx);
|
||||
const void *buf, dmu_tx_t *tx, dmu_flags_t flags);
|
||||
int dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
|
||||
const void *buf, dmu_tx_t *tx, dmu_flags_t flags);
|
||||
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
|
||||
@ -76,7 +76,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
return (0);
|
||||
|
||||
err = dmu_buf_hold_array(os, object, offset, size,
|
||||
FALSE, FTAG, &numbufs, &dbp);
|
||||
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
@ -147,7 +147,8 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
ASSERT3S(last_size, <=, PAGE_SIZE);
|
||||
|
||||
err = dmu_buf_hold_array(os, object, IDX_TO_OFF(ma[0]->pindex),
|
||||
IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp);
|
||||
IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp,
|
||||
DMU_READ_PREFETCH);
|
||||
if (err != 0)
|
||||
return (err);
|
||||
|
||||
|
||||
@ -1262,7 +1262,8 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
|
||||
if (aclnode->z_ace_count == 0)
|
||||
continue;
|
||||
dmu_write(zfsvfs->z_os, aoid, off,
|
||||
aclnode->z_size, aclnode->z_acldata, tx);
|
||||
aclnode->z_size, aclnode->z_acldata, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
off += aclnode->z_size;
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -4481,7 +4481,8 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
|
||||
for (i = 0; wlen > 0; woff += tocopy, wlen -= tocopy, i++) {
|
||||
tocopy = MIN(PAGE_SIZE, wlen);
|
||||
va = zfs_map_page(ma[i], &sf);
|
||||
dmu_write(zfsvfs->z_os, zp->z_id, woff, tocopy, va, tx);
|
||||
dmu_write(zfsvfs->z_os, zp->z_id, woff, tocopy, va, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
zfs_unmap_page(sf);
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -1447,7 +1447,8 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
|
||||
if (aclnode->z_ace_count == 0)
|
||||
continue;
|
||||
dmu_write(zfsvfs->z_os, aoid, off,
|
||||
aclnode->z_size, aclnode->z_acldata, tx);
|
||||
aclnode->z_size, aclnode->z_acldata, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
off += aclnode->z_size;
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -3892,7 +3892,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
|
||||
|
||||
va = kmap(pp);
|
||||
ASSERT3U(pglen, <=, PAGE_SIZE);
|
||||
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
|
||||
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
kunmap(pp);
|
||||
|
||||
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
|
||||
|
||||
@ -752,7 +752,8 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
|
||||
}
|
||||
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
|
||||
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
|
||||
numsubsub * sizeof (subobj), subdb->db_data, tx);
|
||||
numsubsub * sizeof (subobj), subdb->db_data, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
dmu_buf_rele(subdb, FTAG);
|
||||
bpo->bpo_phys->bpo_num_subobjs += numsubsub;
|
||||
|
||||
@ -777,7 +778,7 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
|
||||
dmu_write(bpo->bpo_os, bpo->bpo_object,
|
||||
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t),
|
||||
numbps * sizeof (blkptr_t),
|
||||
bps->db_data, tx);
|
||||
bps->db_data, tx, DMU_READ_NO_PREFETCH);
|
||||
dmu_buf_rele(bps, FTAG);
|
||||
bpo->bpo_phys->bpo_num_blkptrs += numbps;
|
||||
|
||||
@ -794,7 +795,7 @@ bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
|
||||
|
||||
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
|
||||
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
|
||||
sizeof (subobj), &subobj, tx);
|
||||
sizeof (subobj), &subobj, tx, DMU_READ_NO_PREFETCH);
|
||||
bpo->bpo_phys->bpo_num_subobjs++;
|
||||
}
|
||||
|
||||
|
||||
@ -137,7 +137,8 @@ bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg,
|
||||
bte = kmem_zalloc(sizeof (*bte), KM_SLEEP);
|
||||
bte->be_birth_txg = birth_txg;
|
||||
bte->be_bp = *bp;
|
||||
dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx);
|
||||
dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
kmem_free(bte, sizeof (*bte));
|
||||
|
||||
dmu_buf_will_dirty(db, tx);
|
||||
@ -247,7 +248,8 @@ bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
|
||||
ZB_DESTROYED_OBJSET);
|
||||
ASSERT0(bte.be_zb.zb_level);
|
||||
dmu_write(os, obj, i * sizeof (bte),
|
||||
sizeof (bte), &bte, tx);
|
||||
sizeof (bte), &bte, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
if (err == EIO || err == ECKSUM ||
|
||||
err == ENXIO) {
|
||||
/*
|
||||
@ -269,7 +271,8 @@ bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
|
||||
*/
|
||||
bte.be_birth_txg = UINT64_MAX;
|
||||
dmu_write(os, obj, i * sizeof (bte),
|
||||
sizeof (bte), &bte, tx);
|
||||
sizeof (bte), &bte, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
}
|
||||
|
||||
if (!ioerr) {
|
||||
|
||||
@ -809,7 +809,7 @@ brt_vdev_sync(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
|
||||
*/
|
||||
dmu_write(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
|
||||
brtvd->bv_size * sizeof (brtvd->bv_entcount[0]),
|
||||
brtvd->bv_entcount, tx);
|
||||
brtvd->bv_entcount, tx, DMU_READ_NO_PREFETCH);
|
||||
uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
|
||||
memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(nblocks));
|
||||
brtvd->bv_entcount_dirty = FALSE;
|
||||
|
||||
@ -635,7 +635,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
|
||||
int
|
||||
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
|
||||
uint64_t length, int read, const void *tag, int *numbufsp,
|
||||
dmu_buf_t ***dbpp)
|
||||
dmu_buf_t ***dbpp, dmu_flags_t flags)
|
||||
{
|
||||
dnode_t *dn;
|
||||
int err;
|
||||
@ -645,7 +645,7 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
|
||||
return (err);
|
||||
|
||||
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
|
||||
numbufsp, dbpp, DMU_READ_PREFETCH);
|
||||
numbufsp, dbpp, flags);
|
||||
|
||||
dnode_rele(dn, FTAG);
|
||||
|
||||
@ -655,14 +655,14 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
|
||||
int
|
||||
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
|
||||
uint64_t length, boolean_t read, const void *tag, int *numbufsp,
|
||||
dmu_buf_t ***dbpp)
|
||||
dmu_buf_t ***dbpp, dmu_flags_t flags)
|
||||
{
|
||||
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
|
||||
int err;
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
err = dmu_buf_hold_array_by_dnode(DB_DNODE(db), offset, length, read,
|
||||
tag, numbufsp, dbpp, DMU_READ_PREFETCH);
|
||||
tag, numbufsp, dbpp, flags);
|
||||
DB_DNODE_EXIT(db);
|
||||
|
||||
return (err);
|
||||
@ -1293,7 +1293,7 @@ dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
|
||||
|
||||
void
|
||||
dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
const void *buf, dmu_tx_t *tx)
|
||||
const void *buf, dmu_tx_t *tx, dmu_flags_t flags)
|
||||
{
|
||||
dmu_buf_t **dbp;
|
||||
int numbufs;
|
||||
@ -1302,8 +1302,8 @@ dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
return;
|
||||
|
||||
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
|
||||
FALSE, FTAG, &numbufs, &dbp));
|
||||
dmu_write_impl(dbp, numbufs, offset, size, buf, tx, DMU_READ_PREFETCH);
|
||||
FALSE, FTAG, &numbufs, &dbp, flags));
|
||||
dmu_write_impl(dbp, numbufs, offset, size, buf, tx, flags);
|
||||
dmu_buf_rele_array(dbp, numbufs, FTAG);
|
||||
}
|
||||
|
||||
@ -1346,7 +1346,7 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
return;
|
||||
|
||||
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
|
||||
FALSE, FTAG, &numbufs, &dbp));
|
||||
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
|
||||
|
||||
for (i = 0; i < numbufs; i++) {
|
||||
dmu_buf_t *db = dbp[i];
|
||||
@ -1383,7 +1383,7 @@ dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
dmu_buf_t **dbp;
|
||||
|
||||
VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
|
||||
&numbufs, &dbp));
|
||||
&numbufs, &dbp, DMU_READ_PREFETCH));
|
||||
for (i = 0; i < numbufs; i++)
|
||||
dmu_buf_redact(dbp[i], tx);
|
||||
dmu_buf_rele_array(dbp, numbufs, FTAG);
|
||||
@ -2592,7 +2592,7 @@ dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
|
||||
int error, numbufs;
|
||||
|
||||
error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
|
||||
&numbufs, &dbp);
|
||||
&numbufs, &dbp, DMU_READ_PREFETCH);
|
||||
if (error != 0) {
|
||||
if (error == ESRCH) {
|
||||
error = SET_ERROR(ENXIO);
|
||||
@ -2693,7 +2693,7 @@ dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
|
||||
spa = os->os_spa;
|
||||
|
||||
VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
|
||||
&numbufs, &dbp));
|
||||
&numbufs, &dbp, DMU_READ_PREFETCH));
|
||||
ASSERT3U(nbps, ==, numbufs);
|
||||
|
||||
/*
|
||||
|
||||
@ -544,7 +544,8 @@ redaction_list_update_sync(void *arg, dmu_tx_t *tx)
|
||||
if (index == bufsize) {
|
||||
dmu_write(mos, rl->rl_object,
|
||||
rl->rl_phys->rlp_num_entries * sizeof (*buf),
|
||||
bufsize * sizeof (*buf), buf, tx);
|
||||
bufsize * sizeof (*buf), buf, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
rl->rl_phys->rlp_num_entries += bufsize;
|
||||
index = 0;
|
||||
}
|
||||
@ -552,7 +553,8 @@ redaction_list_update_sync(void *arg, dmu_tx_t *tx)
|
||||
}
|
||||
if (index > 0) {
|
||||
dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
|
||||
sizeof (*buf), index * sizeof (*buf), buf, tx);
|
||||
sizeof (*buf), index * sizeof (*buf), buf, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
rl->rl_phys->rlp_num_entries += index;
|
||||
}
|
||||
kmem_free(buf, bufsize * sizeof (*buf));
|
||||
|
||||
@ -3966,7 +3966,8 @@ metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
|
||||
object = space_map_object(msp->ms_sm);
|
||||
dmu_write(spa->spa_meta_objset,
|
||||
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
|
||||
msp->ms_id, sizeof (uint64_t), &object, tx);
|
||||
msp->ms_id, sizeof (uint64_t), &object, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4292,7 +4293,8 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||
VERIFY3U(new_object, !=, 0);
|
||||
|
||||
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
|
||||
msp->ms_id, sizeof (uint64_t), &new_object, tx);
|
||||
msp->ms_id, sizeof (uint64_t), &new_object, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
|
||||
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
|
||||
msp->ms_start, msp->ms_size, vd->vdev_ashift));
|
||||
@ -6328,7 +6330,7 @@ metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
|
||||
}
|
||||
|
||||
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
|
||||
&entry, tx);
|
||||
&entry, tx, DMU_READ_NO_PREFETCH);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@ -9601,7 +9601,8 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
|
||||
KM_SLEEP));
|
||||
memset(packed + nvsize, 0, bufsize - nvsize);
|
||||
|
||||
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
|
||||
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
|
||||
vmem_free(packed, bufsize);
|
||||
|
||||
|
||||
@ -427,7 +427,7 @@ spa_checkpoint_discard_thread(void *arg, zthr_t *zthr)
|
||||
*/
|
||||
int error = dmu_buf_hold_array_by_bonus(
|
||||
checkpoint_sm->sm_dbuf, offset, size,
|
||||
B_TRUE, FTAG, &numbufs, &dbp);
|
||||
B_TRUE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
|
||||
if (error != 0) {
|
||||
zfs_panic_recover("zfs: error %d was returned "
|
||||
"while prefetching checkpoint space map "
|
||||
|
||||
@ -169,13 +169,14 @@ spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp,
|
||||
phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);
|
||||
firstwrite = MIN(len, shpp->sh_phys_max_off - phys_eof);
|
||||
shpp->sh_eof += len;
|
||||
dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx);
|
||||
dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
|
||||
len -= firstwrite;
|
||||
if (len > 0) {
|
||||
/* write out the rest at the beginning of physical file */
|
||||
dmu_write(mos, spa->spa_history, shpp->sh_pool_create_len,
|
||||
len, (char *)buf + firstwrite, tx);
|
||||
len, (char *)buf + firstwrite, tx, DMU_READ_NO_PREFETCH);
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
@ -537,7 +537,7 @@ space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
|
||||
SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
|
||||
|
||||
dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
|
||||
sizeof (dentry), &dentry, tx);
|
||||
sizeof (dentry), &dentry, tx, DMU_READ_NO_PREFETCH);
|
||||
|
||||
sm->sm_phys->smp_length += sizeof (dentry);
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ vdev_indirect_births_add_entry(vdev_indirect_births_t *vib,
|
||||
|
||||
old_size = vdev_indirect_births_size_impl(vib);
|
||||
dmu_write(vib->vib_objset, vib->vib_object, old_size, sizeof (vibe),
|
||||
&vibe, tx);
|
||||
&vibe, tx, DMU_READ_NO_PREFETCH);
|
||||
vib->vib_phys->vib_count++;
|
||||
new_size = vdev_indirect_births_size_impl(vib);
|
||||
|
||||
|
||||
@ -459,13 +459,14 @@ vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t *vim,
|
||||
dmu_write(vim->vim_objset, vim->vim_object,
|
||||
vim->vim_phys->vimp_num_entries * sizeof (*mapbuf),
|
||||
i * sizeof (*mapbuf),
|
||||
mapbuf, tx);
|
||||
mapbuf, tx, DMU_READ_NO_PREFETCH);
|
||||
if (vim->vim_havecounts) {
|
||||
dmu_write(vim->vim_objset,
|
||||
vim->vim_phys->vimp_counts_object,
|
||||
vim->vim_phys->vimp_num_entries *
|
||||
sizeof (*countbuf),
|
||||
i * sizeof (*countbuf), countbuf, tx);
|
||||
i * sizeof (*countbuf), countbuf, tx,
|
||||
DMU_READ_NO_PREFETCH);
|
||||
}
|
||||
vim->vim_phys->vimp_num_entries += i;
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
|
||||
nvlist_free(nvp);
|
||||
zfsvfs->z_fuid_size = nvsize;
|
||||
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
|
||||
zfsvfs->z_fuid_size, packed, tx);
|
||||
zfsvfs->z_fuid_size, packed, tx, DMU_READ_NO_PREFETCH);
|
||||
kmem_free(packed, zfsvfs->z_fuid_size);
|
||||
VERIFY0(dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db));
|
||||
dmu_buf_will_dirty(db, tx);
|
||||
|
||||
@ -547,7 +547,8 @@ zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
|
||||
dmu_write(os, ZVOL_OBJ, offset, length, data, tx,
|
||||
DMU_READ_PREFETCH);
|
||||
(void) zil_replaying(zv->zv_zilog, tx);
|
||||
dmu_tx_commit(tx);
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user