Retire zio_bulk_flags

Long ago the zio_bulk_flags module parameter was introduced to
facilitate debugging and profiling the zio_buf_caches.  Today
this code works well and there's no compelling reason to keep
this functionality.  In fact it's preferable to revert this so
the code is more consistent with other ZFS implementations.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ned Bass <bass6@llnl.gov>
Issue #3063
This commit is contained in:
Brian Behlendorf 2015-02-06 13:37:02 -08:00
parent 534759fad3
commit 6442f3cfe3
3 changed files with 4 additions and 48 deletions

View File

@ -1379,17 +1379,6 @@ Max commit bytes to separate log device
Default value: \fB1,048,576\fR. Default value: \fB1,048,576\fR.
.RE .RE
.sp
.ne 2
.na
\fBzio_bulk_flags\fR (int)
.ad
.RS 12n
Additional flags to pass to bulk buffers
.sp
Default value: \fB0\fR.
.RE
.sp .sp
.ne 2 .ne 2
.na .na

View File

@ -57,7 +57,6 @@ kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache; kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
int zio_bulk_flags = 0;
int zio_delay_max = ZIO_DELAY_MAX; int zio_delay_max = ZIO_DELAY_MAX;
/* /*
@ -142,6 +141,7 @@ zio_init(void)
size_t size = (c + 1) << SPA_MINBLOCKSHIFT; size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size; size_t p2 = size;
size_t align = 0; size_t align = 0;
size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
while (p2 & (p2 - 1)) while (p2 & (p2 - 1))
p2 &= p2 - 1; p2 &= p2 - 1;
@ -166,16 +166,14 @@ zio_init(void)
if (align != 0) { if (align != 0) {
char name[36]; char name[36];
int flags = zio_bulk_flags;
(void) sprintf(name, "zio_buf_%lu", (ulong_t)size); (void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size, zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, flags); align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size, zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, align, NULL, NULL, NULL, NULL,
data_alloc_arena, flags); data_alloc_arena, cflags);
} }
} }
@ -3398,9 +3396,6 @@ EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free); EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free); EXPORT_SYMBOL(zio_data_buf_free);
module_param(zio_bulk_flags, int, 0644);
MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers");
module_param(zio_delay_max, int, 0644); module_param(zio_delay_max, int, 0644);
MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event"); MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event");

View File

@ -120,40 +120,13 @@ zpios_survey_pending() {
tee -a ${ZPIOS_SURVEY_LOG} tee -a ${ZPIOS_SURVEY_LOG}
} }
# To avoid memory fragmentation issues our slab implementation can be
# based on a virtual address space. Interestingly, we take a pretty
# substantial performance penalty for this somewhere in the low level
# IO drivers. If we back the slab with kmem pages we see far better
# read performance numbers at the cost of memory fragmention and general
# system instability due to large allocations. This may be because of
# an optimization in the low level drivers due to the contigeous kmem
# based memory. This needs to be explained. The good news here is that
# with zerocopy interfaces added at the DMU layer we could gaurentee
# kmem based memory for a pool of pages.
#
# 0x100 = KMC_KMEM - Force kmem_* based slab
# 0x200 = KMC_VMEM - Force vmem_* based slab
zpios_survey_kmem() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+kmem"
print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} \
zfs="zio_bulk_flags=0x100" | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \
tee -a ${ZPIOS_SURVEY_LOG}
${ZFS_SH} -u ${VERBOSE_FLAG} | \
tee -a ${ZPIOS_SURVEY_LOG}
}
# Apply all possible turning concurrently to get a best case number # Apply all possible turning concurrently to get a best case number
zpios_survey_all() { zpios_survey_all() {
TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all" TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all"
print_header ${TEST_NAME} print_header ${TEST_NAME}
${ZFS_SH} ${VERBOSE_FLAG} \ ${ZFS_SH} ${VERBOSE_FLAG} \
zfs="zfs_vdev_max_pending=1024" \ zfs="zfs_vdev_max_pending=1024" | \
zfs="zio_bulk_flags=0x100" | \
tee -a ${ZPIOS_SURVEY_LOG} tee -a ${ZPIOS_SURVEY_LOG}
${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \ ${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \
-o "--noprefetch --zerocopy" \ -o "--noprefetch --zerocopy" \
@ -209,7 +182,6 @@ zpios_survey_prefetch
zpios_survey_zerocopy zpios_survey_zerocopy
zpios_survey_checksum zpios_survey_checksum
zpios_survey_pending zpios_survey_pending
zpios_survey_kmem
zpios_survey_all zpios_survey_all
exit 0 exit 0