2020-01-15 01:57:28 +03:00
|
|
|
# This file exports variables for each tunable used in the test suite.
|
|
|
|
#
|
|
|
|
# Different platforms use different names for most tunables. To avoid littering
|
|
|
|
# the tests with conditional logic for deciding how to set each tunable, the
|
|
|
|
# logic is instead consolidated to this one file.
|
|
|
|
#
|
|
|
|
# Any use of tunables in tests must use a name defined here. New entries
|
|
|
|
# should be added to the table as needed. Please keep the table sorted
|
|
|
|
# alphabetically for ease of maintenance.
|
|
|
|
#
|
|
|
|
# Platform-specific tunables should still use a NAME from this table for
|
|
|
|
# consistency. Enter UNSUPPORTED in the column for platforms on which the
|
|
|
|
# tunable is not implemented.
|
|
|
|
|
|
|
|
UNAME=$(uname)
|
|
|
|
|
|
|
|
# NAME FreeBSD tunable Linux tunable
|
|
|
|
cat <<%%%% |
|
|
|
|
ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot
|
|
|
|
ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount
|
|
|
|
ARC_MAX arc.max zfs_arc_max
|
|
|
|
ARC_MIN arc.min zfs_arc_min
|
|
|
|
ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks
|
|
|
|
CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second
|
|
|
|
COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct
|
|
|
|
COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled
|
|
|
|
CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms
|
2021-04-12 07:49:13 +03:00
|
|
|
CONDENSE_INDIRECT_OBSOLETE_PCT condense.indirect_obsolete_pct zfs_condense_indirect_obsolete_pct
|
2020-01-15 01:57:28 +03:00
|
|
|
CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes
|
|
|
|
DBUF_CACHE_MAX_BYTES dbuf_cache.max_bytes dbuf_cache_max_bytes
|
2021-03-12 06:23:24 +03:00
|
|
|
DEADMAN_CHECKTIME_MS deadman.checktime_ms zfs_deadman_checktime_ms
|
|
|
|
DEADMAN_FAILMODE deadman.failmode zfs_deadman_failmode
|
|
|
|
DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms
|
|
|
|
DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms
|
2020-01-29 22:25:15 +03:00
|
|
|
DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check
|
2020-01-15 01:57:28 +03:00
|
|
|
INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
|
|
|
|
INITIALIZE_VALUE initialize_value zfs_initialize_value
|
|
|
|
KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
|
2020-07-28 02:11:47 +03:00
|
|
|
LUA_MAX_MEMLIMIT lua.max_memlimit zfs_lua_max_memlimit
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 20:10:44 +03:00
|
|
|
L2ARC_MFUONLY l2arc.mfuonly l2arc_mfuonly
|
2020-03-31 20:46:48 +03:00
|
|
|
L2ARC_NOPREFETCH l2arc.noprefetch l2arc_noprefetch
|
2020-04-14 21:36:28 +03:00
|
|
|
L2ARC_REBUILD_BLOCKS_MIN_L2SIZE l2arc.rebuild_blocks_min_l2size l2arc_rebuild_blocks_min_l2size
|
|
|
|
L2ARC_REBUILD_ENABLED l2arc.rebuild_enabled l2arc_rebuild_enabled
|
2020-08-01 07:18:32 +03:00
|
|
|
L2ARC_TRIM_AHEAD l2arc.trim_ahead l2arc_trim_ahead
|
2020-03-31 20:46:48 +03:00
|
|
|
L2ARC_WRITE_BOOST l2arc.write_boost l2arc_write_boost
|
|
|
|
L2ARC_WRITE_MAX l2arc.write_max l2arc_write_max
|
2020-01-15 01:57:28 +03:00
|
|
|
LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc
|
|
|
|
LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel
|
|
|
|
LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause
|
|
|
|
LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel
|
|
|
|
LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause
|
|
|
|
LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries
|
|
|
|
LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared
|
|
|
|
MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting
|
|
|
|
MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds
|
|
|
|
METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load
|
|
|
|
METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging
|
|
|
|
MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals
|
2020-09-02 02:19:19 +03:00
|
|
|
MULTIHOST_HISTORY multihost.history zfs_multihost_history
|
2020-01-15 01:57:28 +03:00
|
|
|
MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals
|
2020-06-23 23:32:42 +03:00
|
|
|
MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
|
2020-01-15 01:57:28 +03:00
|
|
|
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
|
2020-10-06 01:29:05 +03:00
|
|
|
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-14 00:51:51 +03:00
|
|
|
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
|
2020-01-15 01:57:28 +03:00
|
|
|
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
|
|
|
|
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
|
|
|
|
RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
|
Resilver restarts unnecessarily when it encounters errors
When a resilver finishes, vdev_dtl_reassess is called to hopefully
excise DTL_MISSING (amongst other things). If there are errors during
the resilver, they are tracked in DTL_SCRUB, as spelled out in the
block comment in vdev.c. DTL_SCRUB is in-core only, so it can only
be used if the pool was online for the whole resilver. This state is
tracked with the spa_scrub_started flag, which only gets set when
the scan is initialized. Unfortunately, this flag gets cleared right
before vdev_dtl_reassess gets called, so if there are any errors
during the scan, DTL_MISSING will never get excised and the resilver
will just continually restart. This fix simply moves clearing that
flag until after the call to vdev_dtl_reasses.
In addition, if a pool is imported and already has scn_errors > 0,
this change will restart the resilver immediately instead of doing
the rest of the scan and then restarting it from the beginning. On
the other hand, if scn_errors == 0 at import, then no errors have
been encountered so far, so the spa_scrub_started flag can be safely
set.
A test has been added to verify that resilver does not restart when
relevant DTL's are available.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Paul Zuchowski <pzuchowski@datto.com>
Signed-off-by: John Poduska <jpoduska@datto.com>
Closes #10291
2020-05-13 20:54:27 +03:00
|
|
|
SCAN_LEGACY scan_legacy zfs_scan_legacy
|
2020-01-15 01:57:28 +03:00
|
|
|
SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
|
|
|
|
SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
|
|
|
|
SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
|
|
|
|
SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
|
|
|
|
SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
|
|
|
|
SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
|
|
|
|
SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
|
|
|
|
SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
|
|
|
|
TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
|
|
|
|
TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
|
|
|
|
TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
|
2020-09-02 02:19:19 +03:00
|
|
|
TXG_HISTORY txg.history zfs_txg_history
|
2020-04-15 21:14:47 +03:00
|
|
|
TXG_TIMEOUT txg.timeout zfs_txg_timeout
|
2020-01-15 01:57:28 +03:00
|
|
|
UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress
|
2020-09-18 22:13:47 +03:00
|
|
|
VDEV_FILE_PHYSICAL_ASHIFT vdev.file.physical_ashift vdev_file_physical_ashift
|
2020-01-15 01:57:28 +03:00
|
|
|
VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count
|
|
|
|
VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip
|
|
|
|
VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev
|
|
|
|
VOL_MODE vol.mode zvol_volmode
|
|
|
|
VOL_RECURSIVE vol.recursive UNSUPPORTED
|
2020-02-18 22:22:56 +03:00
|
|
|
ZEVENT_LEN_MAX zevent.len_max zfs_zevent_len_max
|
2020-09-04 20:34:28 +03:00
|
|
|
ZEVENT_RETAIN_MAX zevent.retain_max zfs_zevent_retain_max
|
2020-01-15 01:57:28 +03:00
|
|
|
ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms
|
|
|
|
%%%%
|
|
|
|
while read name FreeBSD Linux; do
|
|
|
|
eval "export ${name}=\$${UNAME}"
|
|
|
|
done
|