mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2025-01-27 02:14:28 +03:00
Use new FreeBSD API to largely eliminate object locking
Propagate changes in HEAD that mostly eliminate object locking. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <mav@FreeBSD.org> Signed-off-by: Matt Macy <mmacy@FreeBSD.org> Closes #10205
This commit is contained in:
parent
9249f1272e
commit
c614fd6e12
@ -41,6 +41,22 @@ void zfs_vmobject_assert_wlocked(vm_object_t object);
|
||||
void zfs_vmobject_wlock(vm_object_t object);
|
||||
void zfs_vmobject_wunlock(vm_object_t object);
|
||||
|
||||
#if __FreeBSD_version >= 1300081
|
||||
#define zfs_vmobject_assert_wlocked_12(x)
|
||||
#define zfs_vmobject_wlock_12(x)
|
||||
#define zfs_vmobject_wunlock_12(x)
|
||||
#else
|
||||
#define zfs_vmobject_assert_wlocked_12(x) \
|
||||
zfs_vmobject_assert_wlocked((x))
|
||||
#define zfs_vmobject_wlock_12(x) \
|
||||
zfs_vmobject_wlock(x)
|
||||
#define zfs_vmobject_wunlock_12(x) \
|
||||
zfs_vmobject_wunlock(x)
|
||||
#define vm_page_grab_unlocked(obj, idx, flags) \
|
||||
vm_page_grab((obj), (idx), (flags))
|
||||
#define vm_page_grab_valid_unlocked(m, obj, idx, flags) \
|
||||
vm_page_grab_valid((m), (obj), (idx), (flags))
|
||||
#endif
|
||||
static inline caddr_t
|
||||
zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
|
||||
{
|
||||
|
@ -186,11 +186,11 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
#endif
|
||||
|
||||
vmobj = ma[0]->object;
|
||||
zfs_vmobject_wlock(vmobj);
|
||||
zfs_vmobject_wlock_12(vmobj);
|
||||
|
||||
db = dbp[0];
|
||||
for (i = 0; i < *rbehind; i++) {
|
||||
m = vm_page_grab(vmobj, ma[0]->pindex - 1 - i,
|
||||
m = vm_page_grab_unlocked(vmobj, ma[0]->pindex - 1 - i,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS);
|
||||
if (m == NULL)
|
||||
break;
|
||||
@ -200,7 +200,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
break;
|
||||
}
|
||||
ASSERT(m->dirty == 0);
|
||||
ASSERT(!pmap_page_is_mapped(m));
|
||||
ASSERT(!pmap_page_is_write_mapped(m));
|
||||
|
||||
ASSERT(db->db_size > PAGE_SIZE);
|
||||
bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
|
||||
@ -227,7 +227,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
vm_page_assert_xbusied(m);
|
||||
ASSERT(vm_page_none_valid(m));
|
||||
ASSERT(m->dirty == 0);
|
||||
ASSERT(!pmap_page_is_mapped(m));
|
||||
ASSERT(!pmap_page_is_write_mapped(m));
|
||||
va = zfs_map_page(m, &sf);
|
||||
}
|
||||
}
|
||||
@ -306,7 +306,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
}
|
||||
|
||||
for (i = 0; i < *rahead; i++) {
|
||||
m = vm_page_grab(vmobj, ma[count - 1]->pindex + 1 + i,
|
||||
m = vm_page_grab_unlocked(vmobj, ma[count - 1]->pindex + 1 + i,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT | VM_ALLOC_BUSY_FLAGS);
|
||||
if (m == NULL)
|
||||
break;
|
||||
@ -339,7 +339,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
|
||||
vm_page_do_sunbusy(m);
|
||||
}
|
||||
*rahead = i;
|
||||
zfs_vmobject_wunlock(vmobj);
|
||||
zfs_vmobject_wunlock_12(vmobj);
|
||||
|
||||
dmu_buf_rele_array(dbp, numbufs, FTAG);
|
||||
return (0);
|
||||
|
@ -394,7 +394,7 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
|
||||
nbytes = end - off;
|
||||
|
||||
obj = vp->v_object;
|
||||
zfs_vmobject_assert_wlocked(obj);
|
||||
zfs_vmobject_assert_wlocked_12(obj);
|
||||
#if __FreeBSD_version < 1300050
|
||||
for (;;) {
|
||||
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
|
||||
@ -427,8 +427,9 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
|
||||
break;
|
||||
}
|
||||
#else
|
||||
vm_page_grab_valid(&pp, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
|
||||
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
|
||||
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
|
||||
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_IGN_SBUSY);
|
||||
if (pp != NULL) {
|
||||
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
|
||||
vm_object_pip_add(obj, 1);
|
||||
@ -460,10 +461,9 @@ page_hold(vnode_t *vp, int64_t start)
|
||||
vm_page_t m;
|
||||
|
||||
obj = vp->v_object;
|
||||
zfs_vmobject_assert_wlocked(obj);
|
||||
|
||||
vm_page_grab_valid(&m, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY);
|
||||
vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
|
||||
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
|
||||
VM_ALLOC_NOBUSY);
|
||||
return (m);
|
||||
}
|
||||
#else
|
||||
@ -541,7 +541,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
ASSERT(obj != NULL);
|
||||
|
||||
off = start & PAGEOFFSET;
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
#if __FreeBSD_version >= 1300041
|
||||
vm_object_pip_add(obj, 1);
|
||||
#endif
|
||||
@ -550,14 +550,14 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
int nbytes = imin(PAGESIZE - off, len);
|
||||
|
||||
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
|
||||
va = zfs_map_page(pp, &sf);
|
||||
(void) dmu_read(os, oid, start+off, nbytes,
|
||||
va+off, DMU_READ_PREFETCH);
|
||||
zfs_unmap_page(sf);
|
||||
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
page_unbusy(pp);
|
||||
}
|
||||
len -= nbytes;
|
||||
@ -568,7 +568,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
#else
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
#endif
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -599,36 +599,37 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
ASSERT(obj != NULL);
|
||||
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
|
||||
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
|
||||
int bytes = MIN(PAGESIZE, len);
|
||||
|
||||
pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
|
||||
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
|
||||
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
|
||||
if (vm_page_none_valid(pp)) {
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
va = zfs_map_page(pp, &sf);
|
||||
error = dmu_read(os, zp->z_id, start, bytes, va,
|
||||
DMU_READ_PREFETCH);
|
||||
if (bytes != PAGESIZE && error == 0)
|
||||
bzero(va + bytes, PAGESIZE - bytes);
|
||||
zfs_unmap_page(sf);
|
||||
zfs_vmobject_wlock(obj);
|
||||
vm_page_do_sunbusy(pp);
|
||||
#if __FreeBSD_version >= 1300047 && __FreeBSD_version < 1300051
|
||||
#error "unsupported version window"
|
||||
#elif __FreeBSD_version >= 1300051
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
#if __FreeBSD_version >= 1300081
|
||||
if (error == 0) {
|
||||
vm_page_valid(pp);
|
||||
vm_page_lock(pp);
|
||||
vm_page_activate(pp);
|
||||
vm_page_unlock(pp);
|
||||
vm_page_do_sunbusy(pp);
|
||||
} else {
|
||||
zfs_vmobject_wlock(obj);
|
||||
if (!vm_page_wired(pp) && pp->valid == 0 &&
|
||||
vm_page_busy_tryupgrade(pp))
|
||||
vm_page_free(pp);
|
||||
else
|
||||
vm_page_sunbusy(pp);
|
||||
zfs_vmobject_wunlock(obj);
|
||||
}
|
||||
vm_page_do_sunbusy(pp);
|
||||
if (error != 0 && !vm_page_wired(pp) == 0 &&
|
||||
pp->valid == 0 && vm_page_tryxbusy(pp))
|
||||
vm_page_free(pp);
|
||||
#else
|
||||
vm_page_do_sunbusy(pp);
|
||||
vm_page_lock(pp);
|
||||
if (error) {
|
||||
if (pp->wire_count == 0 && pp->valid == 0 &&
|
||||
@ -650,7 +651,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
uio->uio_offset += bytes;
|
||||
len -= bytes;
|
||||
}
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -680,7 +681,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
|
||||
start = uio->uio_loffset;
|
||||
off = start & PAGEOFFSET;
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
|
||||
vm_page_t pp;
|
||||
uint64_t bytes = MIN(PAGESIZE - off, len);
|
||||
@ -689,24 +690,24 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
struct sf_buf *sf;
|
||||
caddr_t va;
|
||||
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
va = zfs_map_page(pp, &sf);
|
||||
error = vn_io_fault_uiomove(va + off, bytes, uio);
|
||||
zfs_unmap_page(sf);
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
page_unhold(pp);
|
||||
} else {
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
|
||||
uio, bytes);
|
||||
zfs_vmobject_wlock(obj);
|
||||
zfs_vmobject_wlock_12(obj);
|
||||
}
|
||||
len -= bytes;
|
||||
off = 0;
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
zfs_vmobject_wunlock(obj);
|
||||
zfs_vmobject_wunlock_12(obj);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user