mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Implement fallocate FALLOC_FL_PUNCH_HOLE
Add support for the FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE mode of fallocate(2). Mimic the behavior of other native file systems such as ext4 in cases where the file might be extended. If the offset is beyond the end of the file, return success without changing the file. If the extent of the punched hole would extend the file, only the existing tail of the file is punched. Add the zfs_zero_partial_page() function, modeled after update_page(), to handle zeroing partial pages in a hole-punching operation. It must be used under a range lock for the requested region in order that the ARC and page cache stay in sync. Move the existing page cache truncation via truncate_setsize() into zfs_freesp() for better source structure compatibility with upstream code. Add page cache truncation to zfs_freesp() and zfs_free_range() to handle hole punching. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #2619
This commit is contained in:
parent
4f68d7878f
commit
223df0161f
@ -52,8 +52,10 @@ extern ssize_t zpl_read_common(struct inode *ip, const char *buf,
|
|||||||
extern ssize_t zpl_write_common(struct inode *ip, const char *buf,
|
extern ssize_t zpl_write_common(struct inode *ip, const char *buf,
|
||||||
size_t len, loff_t *ppos, uio_seg_t segment, int flags,
|
size_t len, loff_t *ppos, uio_seg_t segment, int flags,
|
||||||
cred_t *cr);
|
cred_t *cr);
|
||||||
|
#if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
|
||||||
extern long zpl_fallocate_common(struct inode *ip, int mode,
|
extern long zpl_fallocate_common(struct inode *ip, int mode,
|
||||||
loff_t offset, loff_t len);
|
loff_t offset, loff_t len);
|
||||||
|
#endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
|
||||||
|
|
||||||
extern const struct address_space_operations zpl_address_space_operations;
|
extern const struct address_space_operations zpl_address_space_operations;
|
||||||
extern const struct file_operations zpl_file_operations;
|
extern const struct file_operations zpl_file_operations;
|
||||||
|
@ -2565,8 +2565,6 @@ top:
|
|||||||
if (err)
|
if (err)
|
||||||
goto out3;
|
goto out3;
|
||||||
|
|
||||||
truncate_setsize(ip, vap->va_size);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX - Note, we are not providing any open
|
* XXX - Note, we are not providing any open
|
||||||
* mode flags here (like FNDELAY), so we may
|
* mode flags here (like FNDELAY), so we may
|
||||||
|
@ -1344,6 +1344,50 @@ zfs_extend(znode_t *zp, uint64_t end)
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zfs_zero_partial_page - Modeled after update_pages() but
|
||||||
|
* with different arguments and semantics for use by zfs_freesp().
|
||||||
|
*
|
||||||
|
* Zeroes a piece of a single page cache entry for zp at offset
|
||||||
|
* start and length len.
|
||||||
|
*
|
||||||
|
* Caller must acquire a range lock on the file for the region
|
||||||
|
* being zeroed in order that the ARC and page cache stay in sync.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
|
||||||
|
{
|
||||||
|
struct address_space *mp = ZTOI(zp)->i_mapping;
|
||||||
|
struct page *pp;
|
||||||
|
int64_t off;
|
||||||
|
void *pb;
|
||||||
|
|
||||||
|
ASSERT((start & PAGE_CACHE_MASK) ==
|
||||||
|
((start + len - 1) & PAGE_CACHE_MASK));
|
||||||
|
|
||||||
|
off = start & (PAGE_CACHE_SIZE - 1);
|
||||||
|
start &= PAGE_CACHE_MASK;
|
||||||
|
|
||||||
|
pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
|
||||||
|
if (pp) {
|
||||||
|
if (mapping_writably_mapped(mp))
|
||||||
|
flush_dcache_page(pp);
|
||||||
|
|
||||||
|
pb = kmap(pp);
|
||||||
|
bzero(pb + off, len);
|
||||||
|
kunmap(pp);
|
||||||
|
|
||||||
|
if (mapping_writably_mapped(mp))
|
||||||
|
flush_dcache_page(pp);
|
||||||
|
|
||||||
|
mark_page_accessed(pp);
|
||||||
|
SetPageUptodate(pp);
|
||||||
|
ClearPageError(pp);
|
||||||
|
unlock_page(pp);
|
||||||
|
page_cache_release(pp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free space in a file.
|
* Free space in a file.
|
||||||
*
|
*
|
||||||
@ -1378,6 +1422,40 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
|
|||||||
|
|
||||||
error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
|
error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zero partial page cache entries. This must be done under a
|
||||||
|
* range lock in order to keep the ARC and page cache in sync.
|
||||||
|
*/
|
||||||
|
if (zp->z_is_mapped) {
|
||||||
|
loff_t first_page, last_page, page_len;
|
||||||
|
loff_t first_page_offset, last_page_offset;
|
||||||
|
|
||||||
|
/* first possible full page in hole */
|
||||||
|
first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||||
|
/* last page of hole */
|
||||||
|
last_page = (off + len) >> PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
|
/* offset of first_page */
|
||||||
|
first_page_offset = first_page << PAGE_CACHE_SHIFT;
|
||||||
|
/* offset of last_page */
|
||||||
|
last_page_offset = last_page << PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
|
if (first_page > last_page) {
|
||||||
|
/* entire punched area within a single page */
|
||||||
|
zfs_zero_partial_page(zp, off, len);
|
||||||
|
} else {
|
||||||
|
/* beginning of punched area at the end of a page */
|
||||||
|
page_len = first_page_offset - off;
|
||||||
|
if (page_len > 0)
|
||||||
|
zfs_zero_partial_page(zp, off, page_len);
|
||||||
|
|
||||||
|
/* end of punched area at the beginning of a page */
|
||||||
|
page_len = off + len - last_page_offset;
|
||||||
|
if (page_len > 0)
|
||||||
|
zfs_zero_partial_page(zp, last_page_offset,
|
||||||
|
page_len);
|
||||||
|
}
|
||||||
|
}
|
||||||
zfs_range_unlock(rl);
|
zfs_range_unlock(rl);
|
||||||
|
|
||||||
return (error);
|
return (error);
|
||||||
@ -1479,8 +1557,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
|
|||||||
error = zfs_extend(zp, off+len);
|
error = zfs_extend(zp, off+len);
|
||||||
if (error == 0 && log)
|
if (error == 0 && log)
|
||||||
goto log;
|
goto log;
|
||||||
else
|
goto out;
|
||||||
return (error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1500,7 +1577,7 @@ zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
|
|||||||
error = zfs_extend(zp, off+len);
|
error = zfs_extend(zp, off+len);
|
||||||
}
|
}
|
||||||
if (error || !log)
|
if (error || !log)
|
||||||
return (error);
|
goto out;
|
||||||
log:
|
log:
|
||||||
tx = dmu_tx_create(zsb->z_os);
|
tx = dmu_tx_create(zsb->z_os);
|
||||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||||
@ -1508,7 +1585,7 @@ log:
|
|||||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||||
if (error) {
|
if (error) {
|
||||||
dmu_tx_abort(tx);
|
dmu_tx_abort(tx);
|
||||||
return (error);
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
|
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
|
||||||
@ -1522,8 +1599,40 @@ log:
|
|||||||
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
|
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
|
||||||
|
|
||||||
dmu_tx_commit(tx);
|
dmu_tx_commit(tx);
|
||||||
|
|
||||||
zfs_inode_update(zp);
|
zfs_inode_update(zp);
|
||||||
return (0);
|
error = 0;
|
||||||
|
|
||||||
|
out:
|
||||||
|
/*
|
||||||
|
* Truncate the page cache - for file truncate operations, use
|
||||||
|
* the purpose-built API for truncations. For punching operations,
|
||||||
|
* truncate only whole pages within the region; partial pages are
|
||||||
|
* zeroed under a range lock in zfs_free_range().
|
||||||
|
*/
|
||||||
|
if (len == 0)
|
||||||
|
truncate_setsize(ZTOI(zp), off);
|
||||||
|
else if (zp->z_is_mapped) {
|
||||||
|
loff_t first_page, last_page;
|
||||||
|
loff_t first_page_offset, last_page_offset;
|
||||||
|
|
||||||
|
/* first possible full page in hole */
|
||||||
|
first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||||
|
/* last page of hole */
|
||||||
|
last_page = (off + len) >> PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
|
/* offset of first_page */
|
||||||
|
first_page_offset = first_page << PAGE_CACHE_SHIFT;
|
||||||
|
/* offset of last_page */
|
||||||
|
last_page_offset = last_page << PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
|
/* truncate whole pages */
|
||||||
|
if (last_page_offset > first_page_offset) {
|
||||||
|
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
|
||||||
|
first_page_offset, last_page_offset - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (error);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -558,38 +558,53 @@ zpl_writepage(struct page *pp, struct writeback_control *wbc)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The only flag combination which matches the behavior of zfs_space()
|
* The only flag combination which matches the behavior of zfs_space()
|
||||||
* is FALLOC_FL_PUNCH_HOLE. This flag was introduced in the 2.6.38 kernel.
|
* is FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
|
||||||
|
* flag was introduced in the 2.6.38 kernel.
|
||||||
*/
|
*/
|
||||||
|
#if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
|
||||||
long
|
long
|
||||||
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
|
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
|
||||||
{
|
{
|
||||||
cred_t *cr = CRED();
|
|
||||||
int error = -EOPNOTSUPP;
|
int error = -EOPNOTSUPP;
|
||||||
|
|
||||||
if (mode & FALLOC_FL_KEEP_SIZE)
|
#if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
|
||||||
return (-EOPNOTSUPP);
|
cred_t *cr = CRED();
|
||||||
|
flock64_t bf;
|
||||||
|
loff_t olen;
|
||||||
|
|
||||||
|
if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
|
||||||
|
return (error);
|
||||||
|
|
||||||
crhold(cr);
|
crhold(cr);
|
||||||
|
|
||||||
#ifdef FALLOC_FL_PUNCH_HOLE
|
if (offset < 0 || len <= 0)
|
||||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
return (-EINVAL);
|
||||||
flock64_t bf;
|
|
||||||
|
|
||||||
bf.l_type = F_WRLCK;
|
spl_inode_lock(ip);
|
||||||
bf.l_whence = 0;
|
olen = i_size_read(ip);
|
||||||
bf.l_start = offset;
|
|
||||||
bf.l_len = len;
|
|
||||||
bf.l_pid = 0;
|
|
||||||
|
|
||||||
error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
|
if (offset > olen) {
|
||||||
|
spl_inode_unlock(ip);
|
||||||
|
return (0);
|
||||||
}
|
}
|
||||||
#endif /* FALLOC_FL_PUNCH_HOLE */
|
if (offset + len > olen)
|
||||||
|
len = olen - offset;
|
||||||
|
bf.l_type = F_WRLCK;
|
||||||
|
bf.l_whence = 0;
|
||||||
|
bf.l_start = offset;
|
||||||
|
bf.l_len = len;
|
||||||
|
bf.l_pid = 0;
|
||||||
|
|
||||||
|
error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
|
||||||
|
spl_inode_unlock(ip);
|
||||||
|
|
||||||
crfree(cr);
|
crfree(cr);
|
||||||
|
#endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */
|
||||||
|
|
||||||
ASSERT3S(error, <=, 0);
|
ASSERT3S(error, <=, 0);
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
#endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
|
||||||
|
|
||||||
#ifdef HAVE_FILE_FALLOCATE
|
#ifdef HAVE_FILE_FALLOCATE
|
||||||
static long
|
static long
|
||||||
|
Loading…
Reference in New Issue
Block a user