mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 18:59:33 +03:00
ZIL: Reduce maximum size of WR_COPIED to 7.5K
Benchmarks show that at certain write sizes range lock/unlock take not so much time as extra memory copy. The exact threshold is not obvious due to other overheads, but it is definitely lower than ~63KB used before. Make it configurable, defaulting at 7.5KB, that is 8KB of nearest malloc() size minus itx and lr structs. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored by: iXsystems, Inc. Closes #15353
This commit is contained in:
parent
3755cde22a
commit
9be8ddfb3c
@ -2150,6 +2150,11 @@ On very fragmented pools, lowering this
|
|||||||
.Pq typically to Sy 36 KiB
|
.Pq typically to Sy 36 KiB
|
||||||
can improve performance.
|
can improve performance.
|
||||||
.
|
.
|
||||||
|
.It Sy zil_maxcopied Ns = Ns Sy 7680 Ns B Po 7.5 KiB Pc Pq uint
|
||||||
|
This sets the maximum number of write bytes logged via WR_COPIED.
|
||||||
|
It tunes a tradeoff between additional memory copy and possibly worse log
|
||||||
|
space efficiency vs additional range lock/unlock.
|
||||||
|
.
|
||||||
.It Sy zil_min_commit_timeout Ns = Ns Sy 5000 Pq u64
|
.It Sy zil_min_commit_timeout Ns = Ns Sy 5000 Pq u64
|
||||||
This sets the minimum delay in nanoseconds ZIL care to delay block commit,
|
This sets the minimum delay in nanoseconds ZIL care to delay block commit,
|
||||||
waiting for more records.
|
waiting for more records.
|
||||||
|
@ -1958,26 +1958,28 @@ zil_max_log_data(zilog_t *zilog, size_t hdrsize)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum amount of log space we agree to waste to reduce number of
|
* Maximum amount of log space we agree to waste to reduce number of
|
||||||
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
|
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%).
|
||||||
*/
|
*/
|
||||||
static inline uint64_t
|
static inline uint64_t
|
||||||
zil_max_waste_space(zilog_t *zilog)
|
zil_max_waste_space(zilog_t *zilog)
|
||||||
{
|
{
|
||||||
return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 8);
|
return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum amount of write data for WR_COPIED. For correctness, consumers
|
* Maximum amount of write data for WR_COPIED. For correctness, consumers
|
||||||
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
|
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
|
||||||
* maximum sized log block, because each WR_COPIED record must fit in a
|
* maximum sized log block, because each WR_COPIED record must fit in a
|
||||||
* single log block. For space efficiency, we want to fit two records into a
|
* single log block. Below that it is a tradeoff of additional memory copy
|
||||||
* max-sized log block.
|
* and possibly worse log space efficiency vs additional range lock/unlock.
|
||||||
*/
|
*/
|
||||||
|
static uint_t zil_maxcopied = 7680;
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
zil_max_copied_data(zilog_t *zilog)
|
zil_max_copied_data(zilog_t *zilog)
|
||||||
{
|
{
|
||||||
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
|
uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t));
|
||||||
sizeof (lr_write_t));
|
return (MIN(max_data, zil_maxcopied));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4226,3 +4228,6 @@ ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW,
|
|||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
|
||||||
"Limit in bytes of ZIL log block size");
|
"Limit in bytes of ZIL log block size");
|
||||||
|
|
||||||
|
ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW,
|
||||||
|
"Limit in bytes WR_COPIED size");
|
||||||
|
Loading…
Reference in New Issue
Block a user