mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 02:49:32 +03:00
Cleanup: Specify unsignedness on things that should not be signed
In #13871, zfs_vdev_aggregation_limit_non_rotating and zfs_vdev_aggregation_limit being signed was pointed out as a possible reason not to eliminate an unnecessary MAX(unsigned, 0) since the unsigned value was assigned from them. There is no reason for these module parameters to be signed and upon inspection, it was found that there are a number of other module parameters that are signed, but should not be, so we make them unsigned. Making them unsigned made it clear that some other variables in the code should also be unsigned, so we also make those unsigned. This prevents users from setting negative values that could potentially cause bad behaviors. It also makes the code slightly easier to understand. Mostly module parameters that deal with timeouts, limits, bitshifts and percentages are made unsigned by this. Any that are boolean are left signed, since whether booleans should be considered signed or unsigned does not matter. Making zfs_arc_lotsfree_percent unsigned caused a `zfs_arc_lotsfree_percent >= 0` check to become redundant, so it was removed. Removing the check was also necessary to prevent a compiler error from -Werror=type-limits. Several end of line comments had to be moved to their own lines because replacing int with uint_t caused us to exceed the 80 character limit enforced by cstyle.pl. The following were kept signed because they are passed to taskq_create(), which expects signed values and modifying the OpenSolaris/Illumos DDI is out of scope of this patch: * metaslab_load_pct * zfs_sync_taskq_batch_pct * zfs_zil_clean_taskq_nthr_pct * zfs_zil_clean_taskq_minalloc * zfs_zil_clean_taskq_maxalloc * zfs_arc_prune_task_threads Also, negative values in those parameters was found to be harmless. The following were left signed because either negative values make sense, or more analysis was needed to determine whether negative values should be disallowed: * zfs_metaslab_switch_threshold * zfs_pd_bytes_max * zfs_livelist_min_percent_shared zfs_multihost_history was made static to be consistent with other parameters. A number of module parameters were marked as signed, but in reality referenced unsigned variables. upgrade_errlog_limit is one of the numerous examples. In the case of zfs_vdev_async_read_max_active, it was already uint32_t, but zdb had an extern int declaration for it. Interestingly, the documentation in zfs.4 was right for upgrade_errlog_limit despite the module parameter being wrongly marked, while the documentation for zfs_vdev_async_read_max_active (and friends) was wrong. It was also wrong for zstd_abort_size, which was unsigned, but was documented as signed. Also, the documentation in zfs.4 incorrectly described the following parameters as ulong when they were int: * zfs_arc_meta_adjust_restarts * zfs_override_estimate_recordsize They are now uint_t as of this patch and thus the man page has been updated to describe them as uint. dbuf_state_index was left alone since it does nothing and perhaps should be removed in another patch. If any module parameters were missed, they were not found by `grep -r 'ZFS_MODULE_PARAM' | grep ', INT'`. I did find a few that grep missed, but only because they were in files that had hits. This patch intentionally did not attempt to address whether some of these module parameters should be elevated to 64-bit parameters, because the length of a long on 32-bit is 32-bit. Lastly, it was pointed out during review that uint_t is a better match for these variables than uint32_t because FreeBSD kernel parameter definitions are designed for uint_t, whose bit width can change in future memory models. As a result, we change the existing parameters that are uint32_t to use uint_t. Reviewed-by: Alexander Motin <mav@FreeBSD.org> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Neal Gompa <ngompa@datto.com> Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Closes #13875
This commit is contained in:
parent
7584fbe846
commit
fdc2d30371
@ -117,10 +117,10 @@ zdb_ot_name(dmu_object_type_t type)
|
|||||||
extern int reference_tracking_enable;
|
extern int reference_tracking_enable;
|
||||||
extern int zfs_recover;
|
extern int zfs_recover;
|
||||||
extern unsigned long zfs_arc_meta_min, zfs_arc_meta_limit;
|
extern unsigned long zfs_arc_meta_min, zfs_arc_meta_limit;
|
||||||
extern int zfs_vdev_async_read_max_active;
|
extern uint_t zfs_vdev_async_read_max_active;
|
||||||
extern boolean_t spa_load_verify_dryrun;
|
extern boolean_t spa_load_verify_dryrun;
|
||||||
extern boolean_t spa_mode_readable_spacemaps;
|
extern boolean_t spa_mode_readable_spacemaps;
|
||||||
extern int zfs_reconstruct_indirect_combinations_max;
|
extern uint_t zfs_reconstruct_indirect_combinations_max;
|
||||||
extern uint_t zfs_btree_verify_intensity;
|
extern uint_t zfs_btree_verify_intensity;
|
||||||
|
|
||||||
static const char cmdname[] = "zdb";
|
static const char cmdname[] = "zdb";
|
||||||
|
@ -253,10 +253,10 @@ static const ztest_shared_opts_t ztest_opts_defaults = {
|
|||||||
extern uint64_t metaslab_force_ganging;
|
extern uint64_t metaslab_force_ganging;
|
||||||
extern uint64_t metaslab_df_alloc_threshold;
|
extern uint64_t metaslab_df_alloc_threshold;
|
||||||
extern unsigned long zfs_deadman_synctime_ms;
|
extern unsigned long zfs_deadman_synctime_ms;
|
||||||
extern int metaslab_preload_limit;
|
extern uint_t metaslab_preload_limit;
|
||||||
extern int zfs_compressed_arc_enabled;
|
extern int zfs_compressed_arc_enabled;
|
||||||
extern int zfs_abd_scatter_enabled;
|
extern int zfs_abd_scatter_enabled;
|
||||||
extern int dmu_object_alloc_chunk_shift;
|
extern uint_t dmu_object_alloc_chunk_shift;
|
||||||
extern boolean_t zfs_force_some_double_word_sm_entries;
|
extern boolean_t zfs_force_some_double_word_sm_entries;
|
||||||
extern unsigned long zio_decompress_fail_fraction;
|
extern unsigned long zio_decompress_fail_fraction;
|
||||||
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
|
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
|
||||||
@ -4639,7 +4639,7 @@ ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
|
|||||||
{
|
{
|
||||||
(void) id;
|
(void) id;
|
||||||
objset_t *os = zd->zd_os;
|
objset_t *os = zd->zd_os;
|
||||||
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
uint_t dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
||||||
uint64_t object;
|
uint64_t object;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -84,7 +84,7 @@ typedef void arc_write_done_func_t(zio_t *zio, arc_buf_t *buf, void *priv);
|
|||||||
typedef void arc_prune_func_t(int64_t bytes, void *priv);
|
typedef void arc_prune_func_t(int64_t bytes, void *priv);
|
||||||
|
|
||||||
/* Shared module parameters */
|
/* Shared module parameters */
|
||||||
extern int zfs_arc_average_blocksize;
|
extern uint_t zfs_arc_average_blocksize;
|
||||||
extern int l2arc_exclude_special;
|
extern int l2arc_exclude_special;
|
||||||
|
|
||||||
/* generic arc_done_func_t's which you can use */
|
/* generic arc_done_func_t's which you can use */
|
||||||
|
@ -976,15 +976,15 @@ extern arc_stats_t arc_stats;
|
|||||||
extern arc_sums_t arc_sums;
|
extern arc_sums_t arc_sums;
|
||||||
extern hrtime_t arc_growtime;
|
extern hrtime_t arc_growtime;
|
||||||
extern boolean_t arc_warm;
|
extern boolean_t arc_warm;
|
||||||
extern int arc_grow_retry;
|
extern uint_t arc_grow_retry;
|
||||||
extern int arc_no_grow_shift;
|
extern uint_t arc_no_grow_shift;
|
||||||
extern int arc_shrink_shift;
|
extern uint_t arc_shrink_shift;
|
||||||
extern kmutex_t arc_prune_mtx;
|
extern kmutex_t arc_prune_mtx;
|
||||||
extern list_t arc_prune_list;
|
extern list_t arc_prune_list;
|
||||||
extern arc_state_t ARC_mfu;
|
extern arc_state_t ARC_mfu;
|
||||||
extern arc_state_t ARC_mru;
|
extern arc_state_t ARC_mru;
|
||||||
extern uint_t zfs_arc_pc_percent;
|
extern uint_t zfs_arc_pc_percent;
|
||||||
extern int arc_lotsfree_percent;
|
extern uint_t arc_lotsfree_percent;
|
||||||
extern unsigned long zfs_arc_min;
|
extern unsigned long zfs_arc_min;
|
||||||
extern unsigned long zfs_arc_max;
|
extern unsigned long zfs_arc_max;
|
||||||
|
|
||||||
@ -995,7 +995,7 @@ extern void arc_wait_for_eviction(uint64_t, boolean_t);
|
|||||||
|
|
||||||
extern void arc_lowmem_init(void);
|
extern void arc_lowmem_init(void);
|
||||||
extern void arc_lowmem_fini(void);
|
extern void arc_lowmem_fini(void);
|
||||||
extern void arc_prune_async(int64_t);
|
extern void arc_prune_async(uint64_t);
|
||||||
extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
|
extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
|
||||||
extern uint64_t arc_free_memory(void);
|
extern uint64_t arc_free_memory(void);
|
||||||
extern int64_t arc_available_memory(void);
|
extern int64_t arc_available_memory(void);
|
||||||
|
@ -865,7 +865,7 @@ int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
|
|||||||
int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
|
int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
|
||||||
struct arc_buf *buf, dmu_tx_t *tx);
|
struct arc_buf *buf, dmu_tx_t *tx);
|
||||||
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
|
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
|
||||||
extern int zfs_max_recordsize;
|
extern uint_t zfs_max_recordsize;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Asynchronously try to read in the data.
|
* Asynchronously try to read in the data.
|
||||||
@ -1070,7 +1070,7 @@ int dmu_diff(const char *tosnap_name, const char *fromsnap_name,
|
|||||||
#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
|
#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
|
||||||
extern uint64_t zfs_crc64_table[256];
|
extern uint64_t zfs_crc64_table[256];
|
||||||
|
|
||||||
extern int dmu_prefetch_max;
|
extern uint_t dmu_prefetch_max;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -60,9 +60,9 @@ struct dsl_deadlist;
|
|||||||
extern unsigned long zfs_dirty_data_max;
|
extern unsigned long zfs_dirty_data_max;
|
||||||
extern unsigned long zfs_dirty_data_max_max;
|
extern unsigned long zfs_dirty_data_max_max;
|
||||||
extern unsigned long zfs_wrlog_data_max;
|
extern unsigned long zfs_wrlog_data_max;
|
||||||
extern int zfs_dirty_data_max_percent;
|
extern uint_t zfs_dirty_data_max_percent;
|
||||||
extern int zfs_dirty_data_max_max_percent;
|
extern uint_t zfs_dirty_data_max_max_percent;
|
||||||
extern int zfs_delay_min_dirty_percent;
|
extern uint_t zfs_delay_min_dirty_percent;
|
||||||
extern unsigned long zfs_delay_scale;
|
extern unsigned long zfs_delay_scale;
|
||||||
|
|
||||||
/* These macros are for indexing into the zfs_all_blkstats_t. */
|
/* These macros are for indexing into the zfs_all_blkstats_t. */
|
||||||
|
@ -95,7 +95,7 @@ extern void fm_init(void);
|
|||||||
extern void fm_fini(void);
|
extern void fm_fini(void);
|
||||||
extern void zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector);
|
extern void zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector);
|
||||||
extern int zfs_zevent_post(nvlist_t *, nvlist_t *, zevent_cb_t *);
|
extern int zfs_zevent_post(nvlist_t *, nvlist_t *, zevent_cb_t *);
|
||||||
extern void zfs_zevent_drain_all(int *);
|
extern void zfs_zevent_drain_all(uint_t *);
|
||||||
extern zfs_file_t *zfs_zevent_fd_hold(int, minor_t *, zfs_zevent_t **);
|
extern zfs_file_t *zfs_zevent_fd_hold(int, minor_t *, zfs_zevent_t **);
|
||||||
extern void zfs_zevent_fd_rele(zfs_file_t *);
|
extern void zfs_zevent_fd_rele(zfs_file_t *);
|
||||||
extern int zfs_zevent_next(zfs_zevent_t *, nvlist_t **, uint64_t *, uint64_t *);
|
extern int zfs_zevent_next(zfs_zevent_t *, nvlist_t **, uint64_t *, uint64_t *);
|
||||||
|
@ -826,7 +826,7 @@ extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
|
|||||||
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
|
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
|
||||||
extern void spa_sync_allpools(void);
|
extern void spa_sync_allpools(void);
|
||||||
|
|
||||||
extern int zfs_sync_pass_deferred_free;
|
extern uint_t zfs_sync_pass_deferred_free;
|
||||||
|
|
||||||
/* spa namespace global mutex */
|
/* spa namespace global mutex */
|
||||||
extern kmutex_t spa_namespace_lock;
|
extern kmutex_t spa_namespace_lock;
|
||||||
@ -1013,7 +1013,7 @@ extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
|
|||||||
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
|
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
|
||||||
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
|
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
|
||||||
extern void spa_altroot(spa_t *, char *, size_t);
|
extern void spa_altroot(spa_t *, char *, size_t);
|
||||||
extern int spa_sync_pass(spa_t *spa);
|
extern uint32_t spa_sync_pass(spa_t *spa);
|
||||||
extern char *spa_name(spa_t *spa);
|
extern char *spa_name(spa_t *spa);
|
||||||
extern uint64_t spa_guid(spa_t *spa);
|
extern uint64_t spa_guid(spa_t *spa);
|
||||||
extern uint64_t spa_load_guid(spa_t *spa);
|
extern uint64_t spa_load_guid(spa_t *spa);
|
||||||
|
@ -215,7 +215,7 @@ struct spa {
|
|||||||
nvlist_t *spa_config_splitting; /* config for splitting */
|
nvlist_t *spa_config_splitting; /* config for splitting */
|
||||||
nvlist_t *spa_load_info; /* info and errors from load */
|
nvlist_t *spa_load_info; /* info and errors from load */
|
||||||
uint64_t spa_config_txg; /* txg of last config change */
|
uint64_t spa_config_txg; /* txg of last config change */
|
||||||
int spa_sync_pass; /* iterate-to-convergence */
|
uint32_t spa_sync_pass; /* iterate-to-convergence */
|
||||||
pool_state_t spa_state; /* pool state */
|
pool_state_t spa_state; /* pool state */
|
||||||
int spa_inject_ref; /* injection references */
|
int spa_inject_ref; /* injection references */
|
||||||
uint8_t spa_sync_on; /* sync threads are running */
|
uint8_t spa_sync_on; /* sync threads are running */
|
||||||
@ -446,7 +446,7 @@ struct spa {
|
|||||||
|
|
||||||
extern char *spa_config_path;
|
extern char *spa_config_path;
|
||||||
extern const char *zfs_deadman_failmode;
|
extern const char *zfs_deadman_failmode;
|
||||||
extern int spa_slop_shift;
|
extern uint_t spa_slop_shift;
|
||||||
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
||||||
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
|
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
|
||||||
extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
|
extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
|
||||||
|
@ -138,7 +138,7 @@ extern void *txg_list_head(txg_list_t *tl, uint64_t txg);
|
|||||||
extern void *txg_list_next(txg_list_t *tl, void *p, uint64_t txg);
|
extern void *txg_list_next(txg_list_t *tl, void *p, uint64_t txg);
|
||||||
|
|
||||||
/* Global tuning */
|
/* Global tuning */
|
||||||
extern int zfs_txg_timeout;
|
extern uint_t zfs_txg_timeout;
|
||||||
|
|
||||||
|
|
||||||
#ifdef ZFS_DEBUG
|
#ifdef ZFS_DEBUG
|
||||||
|
@ -61,9 +61,9 @@ typedef struct vdev_cache vdev_cache_t;
|
|||||||
typedef struct vdev_cache_entry vdev_cache_entry_t;
|
typedef struct vdev_cache_entry vdev_cache_entry_t;
|
||||||
struct abd;
|
struct abd;
|
||||||
|
|
||||||
extern int zfs_vdev_queue_depth_pct;
|
extern uint_t zfs_vdev_queue_depth_pct;
|
||||||
extern int zfs_vdev_def_queue_depth;
|
extern uint_t zfs_vdev_def_queue_depth;
|
||||||
extern uint32_t zfs_vdev_async_write_max_active;
|
extern uint_t zfs_vdev_async_write_max_active;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Virtual device operations
|
* Virtual device operations
|
||||||
|
@ -87,7 +87,7 @@ extern int spa_vdev_remove_cancel(spa_t *);
|
|||||||
extern void spa_vdev_removal_destroy(spa_vdev_removal_t *);
|
extern void spa_vdev_removal_destroy(spa_vdev_removal_t *);
|
||||||
extern uint64_t spa_remove_max_segment(spa_t *);
|
extern uint64_t spa_remove_max_segment(spa_t *);
|
||||||
|
|
||||||
extern int vdev_removal_max_span;
|
extern uint_t vdev_removal_max_span;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
210
man/man4/zfs.4
210
man/man4/zfs.4
@ -56,12 +56,12 @@ The percentage below
|
|||||||
.Sy dbuf_cache_max_bytes
|
.Sy dbuf_cache_max_bytes
|
||||||
when the evict thread stops evicting dbufs.
|
when the evict thread stops evicting dbufs.
|
||||||
.
|
.
|
||||||
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq int
|
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq uint
|
||||||
Set the size of the dbuf cache
|
Set the size of the dbuf cache
|
||||||
.Pq Sy dbuf_cache_max_bytes
|
.Pq Sy dbuf_cache_max_bytes
|
||||||
to a log2 fraction of the target ARC size.
|
to a log2 fraction of the target ARC size.
|
||||||
.
|
.
|
||||||
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq int
|
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq uint
|
||||||
Set the size of the dbuf metadata cache
|
Set the size of the dbuf metadata cache
|
||||||
.Pq Sy dbuf_metadata_cache_max_bytes
|
.Pq Sy dbuf_metadata_cache_max_bytes
|
||||||
to a log2 fraction of the target ARC size.
|
to a log2 fraction of the target ARC size.
|
||||||
@ -72,11 +72,11 @@ When set to
|
|||||||
.Sy 0
|
.Sy 0
|
||||||
the array is dynamically sized based on total system memory.
|
the array is dynamically sized based on total system memory.
|
||||||
.
|
.
|
||||||
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq int
|
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq uint
|
||||||
dnode slots allocated in a single operation as a power of 2.
|
dnode slots allocated in a single operation as a power of 2.
|
||||||
The default value minimizes lock contention for the bulk operation performed.
|
The default value minimizes lock contention for the bulk operation performed.
|
||||||
.
|
.
|
||||||
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq int
|
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
|
||||||
Limit the amount we can prefetch with one call to this amount in bytes.
|
Limit the amount we can prefetch with one call to this amount in bytes.
|
||||||
This helps to limit the amount of memory that can be used by prefetching.
|
This helps to limit the amount of memory that can be used by prefetching.
|
||||||
.
|
.
|
||||||
@ -155,7 +155,7 @@ provided by the
|
|||||||
arcstats can be used to decide if toggling this option is appropriate
|
arcstats can be used to decide if toggling this option is appropriate
|
||||||
for the current workload.
|
for the current workload.
|
||||||
.
|
.
|
||||||
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq int
|
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq uint
|
||||||
Percent of ARC size allowed for L2ARC-only headers.
|
Percent of ARC size allowed for L2ARC-only headers.
|
||||||
Since L2ARC buffers are not evicted on memory pressure,
|
Since L2ARC buffers are not evicted on memory pressure,
|
||||||
too many headers on a system with an irrationally large L2ARC
|
too many headers on a system with an irrationally large L2ARC
|
||||||
@ -267,7 +267,7 @@ Prevent metaslabs from being unloaded.
|
|||||||
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
||||||
Enable use of the fragmentation metric in computing metaslab weights.
|
Enable use of the fragmentation metric in computing metaslab weights.
|
||||||
.
|
.
|
||||||
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
|
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
|
||||||
Maximum distance to search forward from the last offset.
|
Maximum distance to search forward from the last offset.
|
||||||
Without this limit, fragmented pools can see
|
Without this limit, fragmented pools can see
|
||||||
.Em >100`000
|
.Em >100`000
|
||||||
@ -309,7 +309,7 @@ After a number of seconds controlled by this tunable,
|
|||||||
we stop considering the cached max size and start
|
we stop considering the cached max size and start
|
||||||
considering only the histogram instead.
|
considering only the histogram instead.
|
||||||
.
|
.
|
||||||
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq int
|
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq uint
|
||||||
When we are loading a new metaslab, we check the amount of memory being used
|
When we are loading a new metaslab, we check the amount of memory being used
|
||||||
to store metaslab range trees.
|
to store metaslab range trees.
|
||||||
If it is over a threshold, we attempt to unload the least recently used metaslab
|
If it is over a threshold, we attempt to unload the least recently used metaslab
|
||||||
@ -341,16 +341,16 @@ If that fails we will do a "try hard" gang allocation.
|
|||||||
If that fails then we will have a multi-layer gang block.
|
If that fails then we will have a multi-layer gang block.
|
||||||
.El
|
.El
|
||||||
.
|
.
|
||||||
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq int
|
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq uint
|
||||||
When not trying hard, we only consider this number of the best metaslabs.
|
When not trying hard, we only consider this number of the best metaslabs.
|
||||||
This improves performance, especially when there are many metaslabs per vdev
|
This improves performance, especially when there are many metaslabs per vdev
|
||||||
and the allocation can't actually be satisfied
|
and the allocation can't actually be satisfied
|
||||||
(so we would otherwise iterate all metaslabs).
|
(so we would otherwise iterate all metaslabs).
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq int
|
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq uint
|
||||||
When a vdev is added, target this number of metaslabs per top-level vdev.
|
When a vdev is added, target this number of metaslabs per top-level vdev.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq int
|
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
|
||||||
Default limit for metaslab size.
|
Default limit for metaslab size.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq ulong
|
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq ulong
|
||||||
@ -363,7 +363,7 @@ but this may negatively impact pool space efficiency.
|
|||||||
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq ulong
|
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq ulong
|
||||||
Minimum ashift used when creating new top-level vdevs.
|
Minimum ashift used when creating new top-level vdevs.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq int
|
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq uint
|
||||||
Minimum number of metaslabs to create in a top-level vdev.
|
Minimum number of metaslabs to create in a top-level vdev.
|
||||||
.
|
.
|
||||||
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
||||||
@ -371,7 +371,7 @@ Skip label validation steps during pool import.
|
|||||||
Changing is not recommended unless you know what you're doing
|
Changing is not recommended unless you know what you're doing
|
||||||
and are recovering a damaged label.
|
and are recovering a damaged label.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq int
|
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq uint
|
||||||
Practical upper limit of total metaslabs per top-level vdev.
|
Practical upper limit of total metaslabs per top-level vdev.
|
||||||
.
|
.
|
||||||
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
||||||
@ -382,21 +382,21 @@ Give more weight to metaslabs with lower LBAs,
|
|||||||
assuming they have greater bandwidth,
|
assuming they have greater bandwidth,
|
||||||
as is typically the case on a modern constant angular velocity disk drive.
|
as is typically the case on a modern constant angular velocity disk drive.
|
||||||
.
|
.
|
||||||
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq int
|
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq uint
|
||||||
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
|
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
|
||||||
reduce unnecessary reloading.
|
reduce unnecessary reloading.
|
||||||
Note that both this many TXGs and
|
Note that both this many TXGs and
|
||||||
.Sy metaslab_unload_delay_ms
|
.Sy metaslab_unload_delay_ms
|
||||||
milliseconds must pass before unloading will occur.
|
milliseconds must pass before unloading will occur.
|
||||||
.
|
.
|
||||||
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq int
|
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq uint
|
||||||
After a metaslab is used, we keep it loaded for this many milliseconds,
|
After a metaslab is used, we keep it loaded for this many milliseconds,
|
||||||
to attempt to reduce unnecessary reloading.
|
to attempt to reduce unnecessary reloading.
|
||||||
Note, that both this many milliseconds and
|
Note, that both this many milliseconds and
|
||||||
.Sy metaslab_unload_delay
|
.Sy metaslab_unload_delay
|
||||||
TXGs must pass before unloading will occur.
|
TXGs must pass before unloading will occur.
|
||||||
.
|
.
|
||||||
.It Sy reference_history Ns = Ns Sy 3 Pq int
|
.It Sy reference_history Ns = Ns Sy 3 Pq uint
|
||||||
Maximum reference holders being tracked when reference_tracking_enable is active.
|
Maximum reference holders being tracked when reference_tracking_enable is active.
|
||||||
.
|
.
|
||||||
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
||||||
@ -415,7 +415,7 @@ This is useful if you suspect your datasets are affected by a bug in
|
|||||||
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
|
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
|
||||||
SPA config file.
|
SPA config file.
|
||||||
.
|
.
|
||||||
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq int
|
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq uint
|
||||||
Multiplication factor used to estimate actual disk consumption from the
|
Multiplication factor used to estimate actual disk consumption from the
|
||||||
size of data being written.
|
size of data being written.
|
||||||
The default value is a worst case estimate,
|
The default value is a worst case estimate,
|
||||||
@ -448,7 +448,7 @@ blocks in the pool for verification.
|
|||||||
If this parameter is unset, the traversal is not performed.
|
If this parameter is unset, the traversal is not performed.
|
||||||
It can be toggled once the import has started to stop or start the traversal.
|
It can be toggled once the import has started to stop or start the traversal.
|
||||||
.
|
.
|
||||||
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq int
|
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq uint
|
||||||
Sets the maximum number of bytes to consume during pool import to the log2
|
Sets the maximum number of bytes to consume during pool import to the log2
|
||||||
fraction of the target ARC size.
|
fraction of the target ARC size.
|
||||||
.
|
.
|
||||||
@ -470,7 +470,7 @@ new format when enabling the
|
|||||||
feature.
|
feature.
|
||||||
The default is to convert all log entries.
|
The default is to convert all log entries.
|
||||||
.
|
.
|
||||||
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq int
|
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
|
||||||
During top-level vdev removal, chunks of data are copied from the vdev
|
During top-level vdev removal, chunks of data are copied from the vdev
|
||||||
which may include free space in order to trade bandwidth for IOPS.
|
which may include free space in order to trade bandwidth for IOPS.
|
||||||
This parameter determines the maximum span of free space, in bytes,
|
This parameter determines the maximum span of free space, in bytes,
|
||||||
@ -565,7 +565,7 @@ Percentage of ARC dnodes to try to scan in response to demand for non-metadata
|
|||||||
when the number of bytes consumed by dnodes exceeds
|
when the number of bytes consumed by dnodes exceeds
|
||||||
.Sy zfs_arc_dnode_limit .
|
.Sy zfs_arc_dnode_limit .
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq int
|
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq uint
|
||||||
The ARC's buffer hash table is sized based on the assumption of an average
|
The ARC's buffer hash table is sized based on the assumption of an average
|
||||||
block size of this value.
|
block size of this value.
|
||||||
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
|
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
|
||||||
@ -573,7 +573,7 @@ with 8-byte pointers.
|
|||||||
For configurations with a known larger average block size,
|
For configurations with a known larger average block size,
|
||||||
this value can be increased to reduce the memory footprint.
|
this value can be increased to reduce the memory footprint.
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq int
|
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq uint
|
||||||
When
|
When
|
||||||
.Fn arc_is_overflowing ,
|
.Fn arc_is_overflowing ,
|
||||||
.Fn arc_get_data_impl
|
.Fn arc_get_data_impl
|
||||||
@ -591,12 +591,12 @@ Since this is finite, it ensures that allocations can still happen,
|
|||||||
even during the potentially long time that
|
even during the potentially long time that
|
||||||
.Sy arc_size No is more than Sy arc_c .
|
.Sy arc_size No is more than Sy arc_c .
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq int
|
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq uint
|
||||||
Number ARC headers to evict per sub-list before proceeding to another sub-list.
|
Number ARC headers to evict per sub-list before proceeding to another sub-list.
|
||||||
This batch-style operation prevents entire sub-lists from being evicted at once
|
This batch-style operation prevents entire sub-lists from being evicted at once
|
||||||
but comes at a cost of additional unlocking and locking.
|
but comes at a cost of additional unlocking and locking.
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq int
|
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq uint
|
||||||
If set to a non zero value, it will replace the
|
If set to a non zero value, it will replace the
|
||||||
.Sy arc_grow_retry
|
.Sy arc_grow_retry
|
||||||
value with this value.
|
value with this value.
|
||||||
@ -635,7 +635,7 @@ It cannot be set back to
|
|||||||
while running, and reducing it below the current ARC size will not cause
|
while running, and reducing it below the current ARC size will not cause
|
||||||
the ARC to shrink without memory pressure to induce shrinking.
|
the ARC to shrink without memory pressure to induce shrinking.
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_meta_adjust_restarts Ns = Ns Sy 4096 Pq ulong
|
.It Sy zfs_arc_meta_adjust_restarts Ns = Ns Sy 4096 Pq uint
|
||||||
The number of restart passes to make while scanning the ARC attempting
|
The number of restart passes to make while scanning the ARC attempting
|
||||||
the free buffers in order to stay below the
|
the free buffers in order to stay below the
|
||||||
.Sy fs_arc_meta_limit .
|
.Sy fs_arc_meta_limit .
|
||||||
@ -681,7 +681,7 @@ Setting this value to
|
|||||||
.Sy 0
|
.Sy 0
|
||||||
will disable pruning the inode and dentry caches.
|
will disable pruning the inode and dentry caches.
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_meta_strategy Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
.It Sy zfs_arc_meta_strategy Ns = Ns Sy 1 Ns | Ns 0 Pq uint
|
||||||
Define the strategy for ARC metadata buffer eviction (meta reclaim strategy):
|
Define the strategy for ARC metadata buffer eviction (meta reclaim strategy):
|
||||||
.Bl -tag -compact -offset 4n -width "0 (META_ONLY)"
|
.Bl -tag -compact -offset 4n -width "0 (META_ONLY)"
|
||||||
.It Sy 0 Pq META_ONLY
|
.It Sy 0 Pq META_ONLY
|
||||||
@ -699,10 +699,10 @@ will default to consuming the larger of
|
|||||||
and
|
and
|
||||||
.Sy all_system_memory No / Sy 32 .
|
.Sy all_system_memory No / Sy 32 .
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq int
|
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq uint
|
||||||
Minimum time prefetched blocks are locked in the ARC.
|
Minimum time prefetched blocks are locked in the ARC.
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq int
|
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq uint
|
||||||
Minimum time "prescient prefetched" blocks are locked in the ARC.
|
Minimum time "prescient prefetched" blocks are locked in the ARC.
|
||||||
These blocks are meant to be prefetched fairly aggressively ahead of
|
These blocks are meant to be prefetched fairly aggressively ahead of
|
||||||
the code that may use them.
|
the code that may use them.
|
||||||
@ -739,7 +739,7 @@ and to
|
|||||||
.Sy 134217728 Ns B Pq 128 MiB
|
.Sy 134217728 Ns B Pq 128 MiB
|
||||||
under Linux.
|
under Linux.
|
||||||
.
|
.
|
||||||
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq int
|
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq uint
|
||||||
To allow more fine-grained locking, each ARC state contains a series
|
To allow more fine-grained locking, each ARC state contains a series
|
||||||
of lists for both data and metadata objects.
|
of lists for both data and metadata objects.
|
||||||
Locking is performed at the level of these "sub-lists".
|
Locking is performed at the level of these "sub-lists".
|
||||||
@ -772,7 +772,7 @@ causes the ARC to start reclamation if it exceeds the target size by
|
|||||||
of the target size, and block allocations by
|
of the target size, and block allocations by
|
||||||
.Em 0.6% .
|
.Em 0.6% .
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_p_min_shift Ns = Ns Sy 0 Pq int
|
.It Sy zfs_arc_p_min_shift Ns = Ns Sy 0 Pq uint
|
||||||
If nonzero, this will update
|
If nonzero, this will update
|
||||||
.Sy arc_p_min_shift Pq default Sy 4
|
.Sy arc_p_min_shift Pq default Sy 4
|
||||||
with the new value.
|
with the new value.
|
||||||
@ -786,7 +786,7 @@ Disable
|
|||||||
adapt dampener, which reduces the maximum single adjustment to
|
adapt dampener, which reduces the maximum single adjustment to
|
||||||
.Sy arc_p .
|
.Sy arc_p .
|
||||||
.
|
.
|
||||||
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq int
|
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq uint
|
||||||
If nonzero, this will update
|
If nonzero, this will update
|
||||||
.Sy arc_shrink_shift Pq default Sy 7
|
.Sy arc_shrink_shift Pq default Sy 7
|
||||||
with the new value.
|
with the new value.
|
||||||
@ -837,7 +837,7 @@ Note that this should not be set below the ZED thresholds
|
|||||||
(currently 10 checksums over 10 seconds)
|
(currently 10 checksums over 10 seconds)
|
||||||
or else the daemon may not trigger any action.
|
or else the daemon may not trigger any action.
|
||||||
.
|
.
|
||||||
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq int
|
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq uint
|
||||||
This controls the amount of time that a ZIL block (lwb) will remain "open"
|
This controls the amount of time that a ZIL block (lwb) will remain "open"
|
||||||
when it isn't "full", and it has a thread waiting for it to be committed to
|
when it isn't "full", and it has a thread waiting for it to be committed to
|
||||||
stable storage.
|
stable storage.
|
||||||
@ -850,7 +850,7 @@ Vdev indirection layer (used for device removal) sleeps for this many
|
|||||||
milliseconds during mapping generation.
|
milliseconds during mapping generation.
|
||||||
Intended for use with the test suite to throttle vdev removal speed.
|
Intended for use with the test suite to throttle vdev removal speed.
|
||||||
.
|
.
|
||||||
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq int
|
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq uint
|
||||||
Minimum percent of obsolete bytes in vdev mapping required to attempt to condense
|
Minimum percent of obsolete bytes in vdev mapping required to attempt to condense
|
||||||
.Pq see Sy zfs_condense_indirect_vdevs_enable .
|
.Pq see Sy zfs_condense_indirect_vdevs_enable .
|
||||||
Intended for use with the test suite
|
Intended for use with the test suite
|
||||||
@ -887,7 +887,7 @@ to the file clears the log.
|
|||||||
This setting does not influence debug prints due to
|
This setting does not influence debug prints due to
|
||||||
.Sy zfs_flags .
|
.Sy zfs_flags .
|
||||||
.
|
.
|
||||||
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq int
|
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
|
||||||
Maximum size of the internal ZFS debug log.
|
Maximum size of the internal ZFS debug log.
|
||||||
.
|
.
|
||||||
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
|
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
|
||||||
@ -952,7 +952,7 @@ milliseconds until the operation completes.
|
|||||||
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
||||||
Enable prefetching dedup-ed blocks which are going to be freed.
|
Enable prefetching dedup-ed blocks which are going to be freed.
|
||||||
.
|
.
|
||||||
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq int
|
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
|
||||||
Start to delay each transaction once there is this amount of dirty data,
|
Start to delay each transaction once there is this amount of dirty data,
|
||||||
expressed as a percentage of
|
expressed as a percentage of
|
||||||
.Sy zfs_dirty_data_max .
|
.Sy zfs_dirty_data_max .
|
||||||
@ -1087,7 +1087,7 @@ This parameter takes precedence over
|
|||||||
Defaults to
|
Defaults to
|
||||||
.Sy physical_ram/4 ,
|
.Sy physical_ram/4 ,
|
||||||
.
|
.
|
||||||
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq int
|
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq uint
|
||||||
Maximum allowable value of
|
Maximum allowable value of
|
||||||
.Sy zfs_dirty_data_max ,
|
.Sy zfs_dirty_data_max ,
|
||||||
expressed as a percentage of physical RAM.
|
expressed as a percentage of physical RAM.
|
||||||
@ -1099,7 +1099,7 @@ The parameter
|
|||||||
takes precedence over this one.
|
takes precedence over this one.
|
||||||
.No See Sx ZFS TRANSACTION DELAY .
|
.No See Sx ZFS TRANSACTION DELAY .
|
||||||
.
|
.
|
||||||
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq int
|
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq uint
|
||||||
Determines the dirty space limit, expressed as a percentage of all memory.
|
Determines the dirty space limit, expressed as a percentage of all memory.
|
||||||
Once this limit is exceeded, new writes are halted until space frees up.
|
Once this limit is exceeded, new writes are halted until space frees up.
|
||||||
The parameter
|
The parameter
|
||||||
@ -1110,7 +1110,7 @@ takes precedence over this one.
|
|||||||
Subject to
|
Subject to
|
||||||
.Sy zfs_dirty_data_max_max .
|
.Sy zfs_dirty_data_max_max .
|
||||||
.
|
.
|
||||||
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq int
|
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq uint
|
||||||
Start syncing out a transaction group if there's at least this much dirty data
|
Start syncing out a transaction group if there's at least this much dirty data
|
||||||
.Pq as a percentage of Sy zfs_dirty_data_max .
|
.Pq as a percentage of Sy zfs_dirty_data_max .
|
||||||
This should be less than
|
This should be less than
|
||||||
@ -1191,15 +1191,15 @@ Maximum number of blocks freed in a single TXG.
|
|||||||
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq ulong
|
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq ulong
|
||||||
Maximum number of dedup blocks freed in a single TXG.
|
Maximum number of dedup blocks freed in a single TXG.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq int
|
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq uint
|
||||||
Maximum asynchronous read I/O operations active to each device.
|
Maximum asynchronous read I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum asynchronous read I/O operation active to each device.
|
Minimum asynchronous read I/O operation active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq int
|
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
|
||||||
When the pool has more than this much dirty data, use
|
When the pool has more than this much dirty data, use
|
||||||
.Sy zfs_vdev_async_write_max_active
|
.Sy zfs_vdev_async_write_max_active
|
||||||
to limit active async writes.
|
to limit active async writes.
|
||||||
@ -1207,7 +1207,7 @@ If the dirty data is between the minimum and maximum,
|
|||||||
the active I/O limit is linearly interpolated.
|
the active I/O limit is linearly interpolated.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq int
|
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq uint
|
||||||
When the pool has less than this much dirty data, use
|
When the pool has less than this much dirty data, use
|
||||||
.Sy zfs_vdev_async_write_min_active
|
.Sy zfs_vdev_async_write_min_active
|
||||||
to limit active async writes.
|
to limit active async writes.
|
||||||
@ -1216,11 +1216,11 @@ the active I/O limit is linearly
|
|||||||
interpolated.
|
interpolated.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 30 Pq int
|
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 30 Pq uint
|
||||||
Maximum asynchronous write I/O operations active to each device.
|
Maximum asynchronous write I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq int
|
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq uint
|
||||||
Minimum asynchronous write I/O operations active to each device.
|
Minimum asynchronous write I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.Pp
|
.Pp
|
||||||
@ -1234,69 +1234,69 @@ A value of
|
|||||||
has been shown to improve resilver performance further at a cost of
|
has been shown to improve resilver performance further at a cost of
|
||||||
further increasing latency.
|
further increasing latency.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq uint
|
||||||
Maximum initializing I/O operations active to each device.
|
Maximum initializing I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum initializing I/O operations active to each device.
|
Minimum initializing I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq int
|
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq uint
|
||||||
The maximum number of I/O operations active to each device.
|
The maximum number of I/O operations active to each device.
|
||||||
Ideally, this will be at least the sum of each queue's
|
Ideally, this will be at least the sum of each queue's
|
||||||
.Sy max_active .
|
.Sy max_active .
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq int
|
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq uint
|
||||||
Maximum sequential resilver I/O operations active to each device.
|
Maximum sequential resilver I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum sequential resilver I/O operations active to each device.
|
Minimum sequential resilver I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq int
|
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq uint
|
||||||
Maximum removal I/O operations active to each device.
|
Maximum removal I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum removal I/O operations active to each device.
|
Minimum removal I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq int
|
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq uint
|
||||||
Maximum scrub I/O operations active to each device.
|
Maximum scrub I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum scrub I/O operations active to each device.
|
Minimum scrub I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq int
|
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq uint
|
||||||
Maximum synchronous read I/O operations active to each device.
|
Maximum synchronous read I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq int
|
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq uint
|
||||||
Minimum synchronous read I/O operations active to each device.
|
Minimum synchronous read I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq int
|
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq uint
|
||||||
Maximum synchronous write I/O operations active to each device.
|
Maximum synchronous write I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq int
|
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq uint
|
||||||
Minimum synchronous write I/O operations active to each device.
|
Minimum synchronous write I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq int
|
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq uint
|
||||||
Maximum trim/discard I/O operations active to each device.
|
Maximum trim/discard I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq int
|
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq uint
|
||||||
Minimum trim/discard I/O operations active to each device.
|
Minimum trim/discard I/O operations active to each device.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq int
|
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq uint
|
||||||
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
|
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
|
||||||
the number of concurrently-active I/O operations is limited to
|
the number of concurrently-active I/O operations is limited to
|
||||||
.Sy zfs_*_min_active ,
|
.Sy zfs_*_min_active ,
|
||||||
@ -1310,7 +1310,7 @@ and the number of concurrently-active non-interactive operations is increased to
|
|||||||
.Sy zfs_*_max_active .
|
.Sy zfs_*_max_active .
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq int
|
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq uint
|
||||||
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
|
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
|
||||||
random I/O latency reaches several seconds.
|
random I/O latency reaches several seconds.
|
||||||
On some HDDs this happens even if sequential I/O operations
|
On some HDDs this happens even if sequential I/O operations
|
||||||
@ -1325,7 +1325,7 @@ This enforced wait ensures the HDD services the interactive I/O
|
|||||||
within a reasonable amount of time.
|
within a reasonable amount of time.
|
||||||
.No See Sx ZFS I/O SCHEDULER .
|
.No See Sx ZFS I/O SCHEDULER .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq int
|
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
|
||||||
Maximum number of queued allocations per top-level vdev expressed as
|
Maximum number of queued allocations per top-level vdev expressed as
|
||||||
a percentage of
|
a percentage of
|
||||||
.Sy zfs_vdev_async_write_max_active ,
|
.Sy zfs_vdev_async_write_max_active ,
|
||||||
@ -1431,7 +1431,7 @@ but we chose the more conservative approach of not setting it,
|
|||||||
so that there is no possibility of
|
so that there is no possibility of
|
||||||
leaking space in the "partial temporary" failure case.
|
leaking space in the "partial temporary" failure case.
|
||||||
.
|
.
|
||||||
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq int
|
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq uint
|
||||||
During a
|
During a
|
||||||
.Nm zfs Cm destroy
|
.Nm zfs Cm destroy
|
||||||
operation using the
|
operation using the
|
||||||
@ -1439,7 +1439,7 @@ operation using the
|
|||||||
feature,
|
feature,
|
||||||
a minimum of this much time will be spent working on freeing blocks per TXG.
|
a minimum of this much time will be spent working on freeing blocks per TXG.
|
||||||
.
|
.
|
||||||
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq int
|
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq uint
|
||||||
Similar to
|
Similar to
|
||||||
.Sy zfs_free_min_time_ms ,
|
.Sy zfs_free_min_time_ms ,
|
||||||
but for cleanup of old indirection records for removed vdevs.
|
but for cleanup of old indirection records for removed vdevs.
|
||||||
@ -1518,7 +1518,7 @@ feature uses to estimate incoming log blocks.
|
|||||||
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq ulong
|
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq ulong
|
||||||
Maximum number of rows allowed in the summary of the spacemap log.
|
Maximum number of rows allowed in the summary of the spacemap log.
|
||||||
.
|
.
|
||||||
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq int
|
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq uint
|
||||||
We currently support block sizes from
|
We currently support block sizes from
|
||||||
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
|
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
|
||||||
The benefits of larger blocks, and thus larger I/O,
|
The benefits of larger blocks, and thus larger I/O,
|
||||||
@ -1537,13 +1537,13 @@ Normally disabled because these datasets may be missing key data.
|
|||||||
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq ulong
|
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq ulong
|
||||||
Minimum number of metaslabs to flush per dirty TXG.
|
Minimum number of metaslabs to flush per dirty TXG.
|
||||||
.
|
.
|
||||||
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq int
|
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq uint
|
||||||
Allow metaslabs to keep their active state as long as their fragmentation
|
Allow metaslabs to keep their active state as long as their fragmentation
|
||||||
percentage is no more than this value.
|
percentage is no more than this value.
|
||||||
An active metaslab that exceeds this threshold
|
An active metaslab that exceeds this threshold
|
||||||
will no longer keep its active status allowing better metaslabs to be selected.
|
will no longer keep its active status allowing better metaslabs to be selected.
|
||||||
.
|
.
|
||||||
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq int
|
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq uint
|
||||||
Metaslab groups are considered eligible for allocations if their
|
Metaslab groups are considered eligible for allocations if their
|
||||||
fragmentation metric (measured as a percentage) is less than or equal to
|
fragmentation metric (measured as a percentage) is less than or equal to
|
||||||
this value.
|
this value.
|
||||||
@ -1551,7 +1551,7 @@ If a metaslab group exceeds this threshold then it will be
|
|||||||
skipped unless all metaslab groups within the metaslab class have also
|
skipped unless all metaslab groups within the metaslab class have also
|
||||||
crossed this threshold.
|
crossed this threshold.
|
||||||
.
|
.
|
||||||
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq int
|
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq uint
|
||||||
Defines a threshold at which metaslab groups should be eligible for allocations.
|
Defines a threshold at which metaslab groups should be eligible for allocations.
|
||||||
The value is expressed as a percentage of free space
|
The value is expressed as a percentage of free space
|
||||||
beyond which a metaslab group is always eligible for allocations.
|
beyond which a metaslab group is always eligible for allocations.
|
||||||
@ -1580,7 +1580,7 @@ If enabled, ZFS will place DDT data into the special allocation class.
|
|||||||
If enabled, ZFS will place user data indirect blocks
|
If enabled, ZFS will place user data indirect blocks
|
||||||
into the special allocation class.
|
into the special allocation class.
|
||||||
.
|
.
|
||||||
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq int
|
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq uint
|
||||||
Historical statistics for this many latest multihost updates will be available in
|
Historical statistics for this many latest multihost updates will be available in
|
||||||
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
|
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
|
||||||
.
|
.
|
||||||
@ -1671,7 +1671,7 @@ The number of bytes which should be prefetched during a pool traversal, like
|
|||||||
.Nm zfs Cm send
|
.Nm zfs Cm send
|
||||||
or other data crawling operations.
|
or other data crawling operations.
|
||||||
.
|
.
|
||||||
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq int
|
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq uint
|
||||||
The number of blocks pointed by indirect (non-L0) block which should be
|
The number of blocks pointed by indirect (non-L0) block which should be
|
||||||
prefetched during a pool traversal, like
|
prefetched during a pool traversal, like
|
||||||
.Nm zfs Cm send
|
.Nm zfs Cm send
|
||||||
@ -1708,7 +1708,7 @@ hardware as long as support is compiled in and the QAT driver is present.
|
|||||||
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq long
|
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq long
|
||||||
Bytes to read per chunk.
|
Bytes to read per chunk.
|
||||||
.
|
.
|
||||||
.It Sy zfs_read_history Ns = Ns Sy 0 Pq int
|
.It Sy zfs_read_history Ns = Ns Sy 0 Pq uint
|
||||||
Historical statistics for this many latest reads will be available in
|
Historical statistics for this many latest reads will be available in
|
||||||
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
|
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
|
||||||
.
|
.
|
||||||
@ -1753,11 +1753,11 @@ and is hence not recommended.
|
|||||||
This should only be used as a last resort when the
|
This should only be used as a last resort when the
|
||||||
pool cannot be returned to a healthy state prior to removing the device.
|
pool cannot be returned to a healthy state prior to removing the device.
|
||||||
.
|
.
|
||||||
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
|
||||||
This is used by the test suite so that it can ensure that certain actions
|
This is used by the test suite so that it can ensure that certain actions
|
||||||
happen while in the middle of a removal.
|
happen while in the middle of a removal.
|
||||||
.
|
.
|
||||||
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
|
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
|
||||||
The largest contiguous segment that we will attempt to allocate when removing
|
The largest contiguous segment that we will attempt to allocate when removing
|
||||||
a device.
|
a device.
|
||||||
If there is a performance problem with attempting to allocate large blocks,
|
If there is a performance problem with attempting to allocate large blocks,
|
||||||
@ -1770,7 +1770,7 @@ Ignore the
|
|||||||
feature, causing an operation that would start a resilver to
|
feature, causing an operation that would start a resilver to
|
||||||
immediately restart the one in progress.
|
immediately restart the one in progress.
|
||||||
.
|
.
|
||||||
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq int
|
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq uint
|
||||||
Resilvers are processed by the sync thread.
|
Resilvers are processed by the sync thread.
|
||||||
While resilvering, it will spend at least this much time
|
While resilvering, it will spend at least this much time
|
||||||
working on a resilver between TXG flushes.
|
working on a resilver between TXG flushes.
|
||||||
@ -1781,17 +1781,17 @@ even if there were unrepairable errors.
|
|||||||
Intended to be used during pool repair or recovery to
|
Intended to be used during pool repair or recovery to
|
||||||
stop resilvering when the pool is next imported.
|
stop resilvering when the pool is next imported.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq int
|
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
|
||||||
Scrubs are processed by the sync thread.
|
Scrubs are processed by the sync thread.
|
||||||
While scrubbing, it will spend at least this much time
|
While scrubbing, it will spend at least this much time
|
||||||
working on a scrub between TXG flushes.
|
working on a scrub between TXG flushes.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq int
|
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq uint
|
||||||
To preserve progress across reboots, the sequential scan algorithm periodically
|
To preserve progress across reboots, the sequential scan algorithm periodically
|
||||||
needs to stop metadata scanning and issue all the verification I/O to disk.
|
needs to stop metadata scanning and issue all the verification I/O to disk.
|
||||||
The frequency of this flushing is determined by this tunable.
|
The frequency of this flushing is determined by this tunable.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq int
|
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq uint
|
||||||
This tunable affects how scrub and resilver I/O segments are ordered.
|
This tunable affects how scrub and resilver I/O segments are ordered.
|
||||||
A higher number indicates that we care more about how filled in a segment is,
|
A higher number indicates that we care more about how filled in a segment is,
|
||||||
while a lower number indicates we care more about the size of the extent without
|
while a lower number indicates we care more about the size of the extent without
|
||||||
@ -1799,7 +1799,7 @@ considering the gaps within a segment.
|
|||||||
This value is only tunable upon module insertion.
|
This value is only tunable upon module insertion.
|
||||||
Changing the value afterwards will have no effect on scrub or resilver performance.
|
Changing the value afterwards will have no effect on scrub or resilver performance.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq int
|
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq uint
|
||||||
Determines the order that data will be verified while scrubbing or resilvering:
|
Determines the order that data will be verified while scrubbing or resilvering:
|
||||||
.Bl -tag -compact -offset 4n -width "a"
|
.Bl -tag -compact -offset 4n -width "a"
|
||||||
.It Sy 1
|
.It Sy 1
|
||||||
@ -1829,14 +1829,14 @@ that will still be considered sequential for sorting purposes.
|
|||||||
Changing this value will not
|
Changing this value will not
|
||||||
affect scrubs or resilvers that are already in progress.
|
affect scrubs or resilvers that are already in progress.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq int
|
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
|
||||||
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
|
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
|
||||||
This tunable determines the hard limit for I/O sorting memory usage.
|
This tunable determines the hard limit for I/O sorting memory usage.
|
||||||
When the hard limit is reached we stop scanning metadata and start issuing
|
When the hard limit is reached we stop scanning metadata and start issuing
|
||||||
data verification I/O.
|
data verification I/O.
|
||||||
This is done until we get below the soft limit.
|
This is done until we get below the soft limit.
|
||||||
.
|
.
|
||||||
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq int
|
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
|
||||||
The fraction of the hard limit used to determined the soft limit for I/O sorting
|
The fraction of the hard limit used to determined the soft limit for I/O sorting
|
||||||
by the sequential scan algorithm.
|
by the sequential scan algorithm.
|
||||||
When we cross this limit from below no action is taken.
|
When we cross this limit from below no action is taken.
|
||||||
@ -1866,41 +1866,41 @@ remove the spill block from an existing object.
|
|||||||
Including unmodified copies of the spill blocks creates a backwards-compatible
|
Including unmodified copies of the spill blocks creates a backwards-compatible
|
||||||
stream which will recreate a spill block if it was incorrectly removed.
|
stream which will recreate a spill block if it was incorrectly removed.
|
||||||
.
|
.
|
||||||
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
|
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
|
||||||
The fill fraction of the
|
The fill fraction of the
|
||||||
.Nm zfs Cm send
|
.Nm zfs Cm send
|
||||||
internal queues.
|
internal queues.
|
||||||
The fill fraction controls the timing with which internal threads are woken up.
|
The fill fraction controls the timing with which internal threads are woken up.
|
||||||
.
|
.
|
||||||
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
|
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
|
||||||
The maximum number of bytes allowed in
|
The maximum number of bytes allowed in
|
||||||
.Nm zfs Cm send Ns 's
|
.Nm zfs Cm send Ns 's
|
||||||
internal queues.
|
internal queues.
|
||||||
.
|
.
|
||||||
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
|
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
|
||||||
The fill fraction of the
|
The fill fraction of the
|
||||||
.Nm zfs Cm send
|
.Nm zfs Cm send
|
||||||
prefetch queue.
|
prefetch queue.
|
||||||
The fill fraction controls the timing with which internal threads are woken up.
|
The fill fraction controls the timing with which internal threads are woken up.
|
||||||
.
|
.
|
||||||
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
|
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
|
||||||
The maximum number of bytes allowed that will be prefetched by
|
The maximum number of bytes allowed that will be prefetched by
|
||||||
.Nm zfs Cm send .
|
.Nm zfs Cm send .
|
||||||
This value must be at least twice the maximum block size in use.
|
This value must be at least twice the maximum block size in use.
|
||||||
.
|
.
|
||||||
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq int
|
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
|
||||||
The fill fraction of the
|
The fill fraction of the
|
||||||
.Nm zfs Cm receive
|
.Nm zfs Cm receive
|
||||||
queue.
|
queue.
|
||||||
The fill fraction controls the timing with which internal threads are woken up.
|
The fill fraction controls the timing with which internal threads are woken up.
|
||||||
.
|
.
|
||||||
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
|
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
|
||||||
The maximum number of bytes allowed in the
|
The maximum number of bytes allowed in the
|
||||||
.Nm zfs Cm receive
|
.Nm zfs Cm receive
|
||||||
queue.
|
queue.
|
||||||
This value must be at least twice the maximum block size in use.
|
This value must be at least twice the maximum block size in use.
|
||||||
.
|
.
|
||||||
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
|
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
|
||||||
The maximum amount of data, in bytes, that
|
The maximum amount of data, in bytes, that
|
||||||
.Nm zfs Cm receive
|
.Nm zfs Cm receive
|
||||||
will write in one DMU transaction.
|
will write in one DMU transaction.
|
||||||
@ -1920,7 +1920,7 @@ If there is an error during healing, the healing receive is not
|
|||||||
terminated instead it moves on to the next record.
|
terminated instead it moves on to the next record.
|
||||||
.El
|
.El
|
||||||
.
|
.
|
||||||
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq ulong
|
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq uint
|
||||||
Setting this variable overrides the default logic for estimating block
|
Setting this variable overrides the default logic for estimating block
|
||||||
sizes when doing a
|
sizes when doing a
|
||||||
.Nm zfs Cm send .
|
.Nm zfs Cm send .
|
||||||
@ -1929,7 +1929,7 @@ will be the current recordsize.
|
|||||||
Override this value if most data in your dataset is not of that size
|
Override this value if most data in your dataset is not of that size
|
||||||
and you require accurate zfs send size estimates.
|
and you require accurate zfs send size estimates.
|
||||||
.
|
.
|
||||||
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq int
|
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq uint
|
||||||
Flushing of data to disk is done in passes.
|
Flushing of data to disk is done in passes.
|
||||||
Defer frees starting in this pass.
|
Defer frees starting in this pass.
|
||||||
.
|
.
|
||||||
@ -1937,13 +1937,13 @@ Defer frees starting in this pass.
|
|||||||
Maximum memory used for prefetching a checkpoint's space map on each
|
Maximum memory used for prefetching a checkpoint's space map on each
|
||||||
vdev while discarding the checkpoint.
|
vdev while discarding the checkpoint.
|
||||||
.
|
.
|
||||||
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq int
|
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq uint
|
||||||
Only allow small data blocks to be allocated on the special and dedup vdev
|
Only allow small data blocks to be allocated on the special and dedup vdev
|
||||||
types when the available free space percentage on these vdevs exceeds this value.
|
types when the available free space percentage on these vdevs exceeds this value.
|
||||||
This ensures reserved space is available for pool metadata as the
|
This ensures reserved space is available for pool metadata as the
|
||||||
special vdevs approach capacity.
|
special vdevs approach capacity.
|
||||||
.
|
.
|
||||||
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq int
|
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq uint
|
||||||
Starting in this sync pass, disable compression (including of metadata).
|
Starting in this sync pass, disable compression (including of metadata).
|
||||||
With the default setting, in practice, we don't have this many sync passes,
|
With the default setting, in practice, we don't have this many sync passes,
|
||||||
so this has no effect.
|
so this has no effect.
|
||||||
@ -1964,7 +1964,7 @@ allocations are especially detrimental to performance
|
|||||||
on highly fragmented systems, which may have very few free segments of this size,
|
on highly fragmented systems, which may have very few free segments of this size,
|
||||||
and may need to load new metaslabs to satisfy these allocations.
|
and may need to load new metaslabs to satisfy these allocations.
|
||||||
.
|
.
|
||||||
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq int
|
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq uint
|
||||||
Rewrite new block pointers starting in this pass.
|
Rewrite new block pointers starting in this pass.
|
||||||
.
|
.
|
||||||
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
|
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
|
||||||
@ -2013,35 +2013,35 @@ The default of
|
|||||||
.Sy 32
|
.Sy 32
|
||||||
was determined to be a reasonable compromise.
|
was determined to be a reasonable compromise.
|
||||||
.
|
.
|
||||||
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq int
|
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
|
||||||
Historical statistics for this many latest TXGs will be available in
|
Historical statistics for this many latest TXGs will be available in
|
||||||
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
|
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
|
||||||
.
|
.
|
||||||
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq int
|
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq uint
|
||||||
Flush dirty data to disk at least every this many seconds (maximum TXG duration).
|
Flush dirty data to disk at least every this many seconds (maximum TXG duration).
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_aggregate_trim Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
.It Sy zfs_vdev_aggregate_trim Ns = Ns Sy 0 Ns | Ns 1 Pq uint
|
||||||
Allow TRIM I/O operations to be aggregated.
|
Allow TRIM I/O operations to be aggregated.
|
||||||
This is normally not helpful because the extents to be trimmed
|
This is normally not helpful because the extents to be trimmed
|
||||||
will have been already been aggregated by the metaslab.
|
will have been already been aggregated by the metaslab.
|
||||||
This option is provided for debugging and performance analysis.
|
This option is provided for debugging and performance analysis.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
|
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
|
||||||
Max vdev I/O aggregation size.
|
Max vdev I/O aggregation size.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
|
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
|
||||||
Max vdev I/O aggregation size for non-rotating media.
|
Max vdev I/O aggregation size for non-rotating media.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_cache_bshift Ns = Ns Sy 16 Po 64 KiB Pc Pq int
|
.It Sy zfs_vdev_cache_bshift Ns = Ns Sy 16 Po 64 KiB Pc Pq uint
|
||||||
Shift size to inflate reads to.
|
Shift size to inflate reads to.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_cache_max Ns = Ns Sy 16384 Ns B Po 16 KiB Pc Pq int
|
.It Sy zfs_vdev_cache_max Ns = Ns Sy 16384 Ns B Po 16 KiB Pc Pq uint
|
||||||
Inflate reads smaller than this value to meet the
|
Inflate reads smaller than this value to meet the
|
||||||
.Sy zfs_vdev_cache_bshift
|
.Sy zfs_vdev_cache_bshift
|
||||||
size
|
size
|
||||||
.Pq default Sy 64 KiB .
|
.Pq default Sy 64 KiB .
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_cache_size Ns = Ns Sy 0 Pq int
|
.It Sy zfs_vdev_cache_size Ns = Ns Sy 0 Pq uint
|
||||||
Total size of the per-disk cache in bytes.
|
Total size of the per-disk cache in bytes.
|
||||||
.Pp
|
.Pp
|
||||||
Currently this feature is disabled, as it has been found to not be helpful
|
Currently this feature is disabled, as it has been found to not be helpful
|
||||||
@ -2079,11 +2079,11 @@ locality as defined by the
|
|||||||
Operations within this that are not immediately following the previous operation
|
Operations within this that are not immediately following the previous operation
|
||||||
are incremented by half.
|
are incremented by half.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq int
|
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
|
||||||
Aggregate read I/O operations if the on-disk gap between them is within this
|
Aggregate read I/O operations if the on-disk gap between them is within this
|
||||||
threshold.
|
threshold.
|
||||||
.
|
.
|
||||||
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq int
|
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq uint
|
||||||
Aggregate write I/O operations if the on-disk gap between them is within this
|
Aggregate write I/O operations if the on-disk gap between them is within this
|
||||||
threshold.
|
threshold.
|
||||||
.
|
.
|
||||||
@ -2120,7 +2120,7 @@ powerpc_altivec Altivec PowerPC
|
|||||||
.Sy DEPRECATED .
|
.Sy DEPRECATED .
|
||||||
Prints warning to kernel log for compatibility.
|
Prints warning to kernel log for compatibility.
|
||||||
.
|
.
|
||||||
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq int
|
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq uint
|
||||||
Max event queue length.
|
Max event queue length.
|
||||||
Events in the queue can be viewed with
|
Events in the queue can be viewed with
|
||||||
.Xr zpool-events 8 .
|
.Xr zpool-events 8 .
|
||||||
@ -2150,7 +2150,7 @@ The default value of
|
|||||||
.Sy 100%
|
.Sy 100%
|
||||||
will create a maximum of one thread per cpu.
|
will create a maximum of one thread per cpu.
|
||||||
.
|
.
|
||||||
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
|
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
|
||||||
This sets the maximum block size used by the ZIL.
|
This sets the maximum block size used by the ZIL.
|
||||||
On very fragmented pools, lowering this
|
On very fragmented pools, lowering this
|
||||||
.Pq typically to Sy 36 KiB
|
.Pq typically to Sy 36 KiB
|
||||||
@ -2181,7 +2181,7 @@ This would only be necessary to work around bugs in the ZIL logging or replay
|
|||||||
code for this record type.
|
code for this record type.
|
||||||
The tunable has no effect if the feature is disabled.
|
The tunable has no effect if the feature is disabled.
|
||||||
.
|
.
|
||||||
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq int
|
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq uint
|
||||||
Usually, one metaslab from each normal-class vdev is dedicated for use by
|
Usually, one metaslab from each normal-class vdev is dedicated for use by
|
||||||
the ZIL to log synchronous writes.
|
the ZIL to log synchronous writes.
|
||||||
However, if there are fewer than
|
However, if there are fewer than
|
||||||
@ -2189,11 +2189,11 @@ However, if there are fewer than
|
|||||||
metaslabs in the vdev, this functionality is disabled.
|
metaslabs in the vdev, this functionality is disabled.
|
||||||
This ensures that we don't set aside an unreasonable amount of space for the ZIL.
|
This ensures that we don't set aside an unreasonable amount of space for the ZIL.
|
||||||
.
|
.
|
||||||
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq int
|
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq uint
|
||||||
Whether heuristic for detection of incompressible data with zstd levels >= 3
|
Whether heuristic for detection of incompressible data with zstd levels >= 3
|
||||||
using LZ4 and zstd-1 passes is enabled.
|
using LZ4 and zstd-1 passes is enabled.
|
||||||
.
|
.
|
||||||
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq int
|
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq uint
|
||||||
Minimal uncompressed size (inclusive) of a record before the early abort
|
Minimal uncompressed size (inclusive) of a record before the early abort
|
||||||
heuristic will be attempted.
|
heuristic will be attempted.
|
||||||
.
|
.
|
||||||
|
@ -138,7 +138,7 @@ arc_default_max(uint64_t min, uint64_t allmem)
|
|||||||
static void
|
static void
|
||||||
arc_prune_task(void *arg)
|
arc_prune_task(void *arg)
|
||||||
{
|
{
|
||||||
int64_t nr_scan = (intptr_t)arg;
|
uint64_t nr_scan = (uintptr_t)arg;
|
||||||
|
|
||||||
arc_reduce_target_size(ptob(nr_scan));
|
arc_reduce_target_size(ptob(nr_scan));
|
||||||
|
|
||||||
@ -168,12 +168,12 @@ arc_prune_task(void *arg)
|
|||||||
* for releasing it once the registered arc_prune_func_t has completed.
|
* for releasing it once the registered arc_prune_func_t has completed.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
arc_prune_async(int64_t adjust)
|
arc_prune_async(uint64_t adjust)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifndef __LP64__
|
#ifndef __LP64__
|
||||||
if (adjust > INTPTR_MAX)
|
if (adjust > UINTPTR_MAX)
|
||||||
adjust = INTPTR_MAX;
|
adjust = UINTPTR_MAX;
|
||||||
#endif
|
#endif
|
||||||
taskq_dispatch(arc_prune_taskq, arc_prune_task,
|
taskq_dispatch(arc_prune_taskq, arc_prune_task,
|
||||||
(void *)(intptr_t)adjust, TQ_SLEEP);
|
(void *)(intptr_t)adjust, TQ_SLEEP);
|
||||||
|
@ -514,19 +514,19 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
|
|||||||
* space map representation must be before we compact it on-disk.
|
* space map representation must be before we compact it on-disk.
|
||||||
* Values should be greater than or equal to 100.
|
* Values should be greater than or equal to 100.
|
||||||
*/
|
*/
|
||||||
extern int zfs_condense_pct;
|
extern uint_t zfs_condense_pct;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct,
|
SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
|
||||||
CTLFLAG_RWTUN, &zfs_condense_pct, 0,
|
CTLFLAG_RWTUN, &zfs_condense_pct, 0,
|
||||||
"Condense on-disk spacemap when it is more than this many percents"
|
"Condense on-disk spacemap when it is more than this many percents"
|
||||||
" of in-memory counterpart");
|
" of in-memory counterpart");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
|
||||||
extern int zfs_remove_max_segment;
|
extern uint_t zfs_remove_max_segment;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment,
|
SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
|
||||||
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
|
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
|
||||||
"Largest contiguous segment ZFS will attempt to allocate when removing"
|
"Largest contiguous segment ZFS will attempt to allocate when removing"
|
||||||
" a device");
|
" a device");
|
||||||
@ -561,10 +561,10 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
|
|||||||
* Once the space map's free space drops below this level we dynamically
|
* Once the space map's free space drops below this level we dynamically
|
||||||
* switch to using best-fit allocations.
|
* switch to using best-fit allocations.
|
||||||
*/
|
*/
|
||||||
extern int metaslab_df_free_pct;
|
extern uint_t metaslab_df_free_pct;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
|
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
|
||||||
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
|
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
|
||||||
"The minimum free space, in percent, which must be available in a"
|
"The minimum free space, in percent, which must be available in a"
|
||||||
" space map to continue allocations in a first-fit fashion");
|
" space map to continue allocations in a first-fit fashion");
|
||||||
@ -584,10 +584,10 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct,
|
|||||||
/*
|
/*
|
||||||
* Max number of metaslabs per group to preload.
|
* Max number of metaslabs per group to preload.
|
||||||
*/
|
*/
|
||||||
extern int metaslab_preload_limit;
|
extern uint_t metaslab_preload_limit;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
|
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
|
||||||
CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
|
CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
|
||||||
"Max number of metaslabs per group to preload");
|
"Max number of metaslabs per group to preload");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
@ -852,7 +852,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
|
|||||||
|
|
||||||
/* vdev_queue.c */
|
/* vdev_queue.c */
|
||||||
|
|
||||||
extern uint32_t zfs_vdev_max_active;
|
extern uint_t zfs_vdev_max_active;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
|
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
|
||||||
@ -861,10 +861,10 @@ SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
|
|||||||
" (LEGACY)");
|
" (LEGACY)");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
|
||||||
extern int zfs_vdev_def_queue_depth;
|
extern uint_t zfs_vdev_def_queue_depth;
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
|
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
|
||||||
CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
|
CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
|
||||||
"Default queue depth for each allocator");
|
"Default queue depth for each allocator");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -29,14 +29,14 @@
|
|||||||
typedef struct zfs_dbgmsg {
|
typedef struct zfs_dbgmsg {
|
||||||
list_node_t zdm_node;
|
list_node_t zdm_node;
|
||||||
time_t zdm_timestamp;
|
time_t zdm_timestamp;
|
||||||
int zdm_size;
|
uint_t zdm_size;
|
||||||
char zdm_msg[1]; /* variable length allocation */
|
char zdm_msg[1]; /* variable length allocation */
|
||||||
} zfs_dbgmsg_t;
|
} zfs_dbgmsg_t;
|
||||||
|
|
||||||
static list_t zfs_dbgmsgs;
|
static list_t zfs_dbgmsgs;
|
||||||
static int zfs_dbgmsg_size = 0;
|
static uint_t zfs_dbgmsg_size = 0;
|
||||||
static kmutex_t zfs_dbgmsgs_lock;
|
static kmutex_t zfs_dbgmsgs_lock;
|
||||||
int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */
|
uint_t zfs_dbgmsg_maxsize = 4<<20; /* 4MB */
|
||||||
static kstat_t *zfs_dbgmsg_kstat;
|
static kstat_t *zfs_dbgmsg_kstat;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -88,10 +88,10 @@ zfs_dbgmsg_addr(kstat_t *ksp, loff_t n)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zfs_dbgmsg_purge(int max_size)
|
zfs_dbgmsg_purge(uint_t max_size)
|
||||||
{
|
{
|
||||||
zfs_dbgmsg_t *zdm;
|
zfs_dbgmsg_t *zdm;
|
||||||
int size;
|
uint_t size;
|
||||||
|
|
||||||
ASSERT(MUTEX_HELD(&zfs_dbgmsgs_lock));
|
ASSERT(MUTEX_HELD(&zfs_dbgmsgs_lock));
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ void
|
|||||||
__zfs_dbgmsg(char *buf)
|
__zfs_dbgmsg(char *buf)
|
||||||
{
|
{
|
||||||
zfs_dbgmsg_t *zdm;
|
zfs_dbgmsg_t *zdm;
|
||||||
int size;
|
uint_t size;
|
||||||
|
|
||||||
DTRACE_PROBE1(zfs__dbgmsg, char *, buf);
|
DTRACE_PROBE1(zfs__dbgmsg, char *, buf);
|
||||||
|
|
||||||
@ -168,7 +168,7 @@ __zfs_dbgmsg(char *buf)
|
|||||||
mutex_enter(&zfs_dbgmsgs_lock);
|
mutex_enter(&zfs_dbgmsgs_lock);
|
||||||
list_insert_tail(&zfs_dbgmsgs, zdm);
|
list_insert_tail(&zfs_dbgmsgs, zdm);
|
||||||
zfs_dbgmsg_size += size;
|
zfs_dbgmsg_size += size;
|
||||||
zfs_dbgmsg_purge(MAX(zfs_dbgmsg_maxsize, 0));
|
zfs_dbgmsg_purge(zfs_dbgmsg_maxsize);
|
||||||
mutex_exit(&zfs_dbgmsgs_lock);
|
mutex_exit(&zfs_dbgmsgs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,5 +248,5 @@ zfs_dbgmsg_print(const char *tag)
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_enable, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_enable, INT, ZMOD_RW,
|
||||||
"Enable ZFS debug message log");
|
"Enable ZFS debug message log");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_maxsize, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_maxsize, UINT, ZMOD_RW,
|
||||||
"Maximum ZFS debug log size");
|
"Maximum ZFS debug log size");
|
||||||
|
@ -46,8 +46,10 @@ module_param(spl_taskq_thread_priority, int, 0644);
|
|||||||
MODULE_PARM_DESC(spl_taskq_thread_priority,
|
MODULE_PARM_DESC(spl_taskq_thread_priority,
|
||||||
"Allow non-default priority for taskq threads");
|
"Allow non-default priority for taskq threads");
|
||||||
|
|
||||||
static int spl_taskq_thread_sequential = 4;
|
static uint_t spl_taskq_thread_sequential = 4;
|
||||||
module_param(spl_taskq_thread_sequential, int, 0644);
|
/* BEGIN CSTYLED */
|
||||||
|
module_param(spl_taskq_thread_sequential, uint, 0644);
|
||||||
|
/* END CSTYLED */
|
||||||
MODULE_PARM_DESC(spl_taskq_thread_sequential,
|
MODULE_PARM_DESC(spl_taskq_thread_sequential,
|
||||||
"Create new taskq threads after N sequential tasks");
|
"Create new taskq threads after N sequential tasks");
|
||||||
|
|
||||||
|
@ -513,7 +513,7 @@ arc_prune_task(void *ptr)
|
|||||||
* for releasing it once the registered arc_prune_func_t has completed.
|
* for releasing it once the registered arc_prune_func_t has completed.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
arc_prune_async(int64_t adjust)
|
arc_prune_async(uint64_t adjust)
|
||||||
{
|
{
|
||||||
arc_prune_t *ap;
|
arc_prune_t *ap;
|
||||||
|
|
||||||
|
@ -29,13 +29,13 @@
|
|||||||
typedef struct zfs_dbgmsg {
|
typedef struct zfs_dbgmsg {
|
||||||
procfs_list_node_t zdm_node;
|
procfs_list_node_t zdm_node;
|
||||||
uint64_t zdm_timestamp;
|
uint64_t zdm_timestamp;
|
||||||
int zdm_size;
|
uint_t zdm_size;
|
||||||
char zdm_msg[1]; /* variable length allocation */
|
char zdm_msg[1]; /* variable length allocation */
|
||||||
} zfs_dbgmsg_t;
|
} zfs_dbgmsg_t;
|
||||||
|
|
||||||
static procfs_list_t zfs_dbgmsgs;
|
static procfs_list_t zfs_dbgmsgs;
|
||||||
static int zfs_dbgmsg_size = 0;
|
static uint_t zfs_dbgmsg_size = 0;
|
||||||
int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */
|
uint_t zfs_dbgmsg_maxsize = 4<<20; /* 4MB */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal ZFS debug messages are enabled by default.
|
* Internal ZFS debug messages are enabled by default.
|
||||||
@ -68,14 +68,14 @@ zfs_dbgmsg_show(struct seq_file *f, void *p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zfs_dbgmsg_purge(int max_size)
|
zfs_dbgmsg_purge(uint_t max_size)
|
||||||
{
|
{
|
||||||
while (zfs_dbgmsg_size > max_size) {
|
while (zfs_dbgmsg_size > max_size) {
|
||||||
zfs_dbgmsg_t *zdm = list_remove_head(&zfs_dbgmsgs.pl_list);
|
zfs_dbgmsg_t *zdm = list_remove_head(&zfs_dbgmsgs.pl_list);
|
||||||
if (zdm == NULL)
|
if (zdm == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
int size = zdm->zdm_size;
|
uint_t size = zdm->zdm_size;
|
||||||
kmem_free(zdm, size);
|
kmem_free(zdm, size);
|
||||||
zfs_dbgmsg_size -= size;
|
zfs_dbgmsg_size -= size;
|
||||||
}
|
}
|
||||||
@ -135,7 +135,7 @@ __set_error(const char *file, const char *func, int line, int err)
|
|||||||
void
|
void
|
||||||
__zfs_dbgmsg(char *buf)
|
__zfs_dbgmsg(char *buf)
|
||||||
{
|
{
|
||||||
int size = sizeof (zfs_dbgmsg_t) + strlen(buf);
|
uint_t size = sizeof (zfs_dbgmsg_t) + strlen(buf);
|
||||||
zfs_dbgmsg_t *zdm = kmem_zalloc(size, KM_SLEEP);
|
zfs_dbgmsg_t *zdm = kmem_zalloc(size, KM_SLEEP);
|
||||||
zdm->zdm_size = size;
|
zdm->zdm_size = size;
|
||||||
zdm->zdm_timestamp = gethrestime_sec();
|
zdm->zdm_timestamp = gethrestime_sec();
|
||||||
@ -144,7 +144,7 @@ __zfs_dbgmsg(char *buf)
|
|||||||
mutex_enter(&zfs_dbgmsgs.pl_lock);
|
mutex_enter(&zfs_dbgmsgs.pl_lock);
|
||||||
procfs_list_add(&zfs_dbgmsgs, zdm);
|
procfs_list_add(&zfs_dbgmsgs, zdm);
|
||||||
zfs_dbgmsg_size += size;
|
zfs_dbgmsg_size += size;
|
||||||
zfs_dbgmsg_purge(MAX(zfs_dbgmsg_maxsize, 0));
|
zfs_dbgmsg_purge(zfs_dbgmsg_maxsize);
|
||||||
mutex_exit(&zfs_dbgmsgs.pl_lock);
|
mutex_exit(&zfs_dbgmsgs.pl_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,6 +252,8 @@ zfs_dbgmsg_print(const char *tag)
|
|||||||
module_param(zfs_dbgmsg_enable, int, 0644);
|
module_param(zfs_dbgmsg_enable, int, 0644);
|
||||||
MODULE_PARM_DESC(zfs_dbgmsg_enable, "Enable ZFS debug message log");
|
MODULE_PARM_DESC(zfs_dbgmsg_enable, "Enable ZFS debug message log");
|
||||||
|
|
||||||
module_param(zfs_dbgmsg_maxsize, int, 0644);
|
/* BEGIN CSTYLED */
|
||||||
|
module_param(zfs_dbgmsg_maxsize, uint, 0644);
|
||||||
|
/* END CSTYLED */
|
||||||
MODULE_PARM_DESC(zfs_dbgmsg_maxsize, "Maximum ZFS debug log size");
|
MODULE_PARM_DESC(zfs_dbgmsg_maxsize, "Maximum ZFS debug log size");
|
||||||
#endif
|
#endif
|
||||||
|
@ -354,7 +354,7 @@ static list_t arc_evict_waiters;
|
|||||||
* can still happen, even during the potentially long time that arc_size is
|
* can still happen, even during the potentially long time that arc_size is
|
||||||
* more than arc_c.
|
* more than arc_c.
|
||||||
*/
|
*/
|
||||||
static int zfs_arc_eviction_pct = 200;
|
static uint_t zfs_arc_eviction_pct = 200;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The number of headers to evict in arc_evict_state_impl() before
|
* The number of headers to evict in arc_evict_state_impl() before
|
||||||
@ -363,10 +363,10 @@ static int zfs_arc_eviction_pct = 200;
|
|||||||
* oldest header in the arc state), but comes with higher overhead
|
* oldest header in the arc state), but comes with higher overhead
|
||||||
* (i.e. more invocations of arc_evict_state_impl()).
|
* (i.e. more invocations of arc_evict_state_impl()).
|
||||||
*/
|
*/
|
||||||
static int zfs_arc_evict_batch_limit = 10;
|
static uint_t zfs_arc_evict_batch_limit = 10;
|
||||||
|
|
||||||
/* number of seconds before growing cache again */
|
/* number of seconds before growing cache again */
|
||||||
int arc_grow_retry = 5;
|
uint_t arc_grow_retry = 5;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum time between calls to arc_kmem_reap_soon().
|
* Minimum time between calls to arc_kmem_reap_soon().
|
||||||
@ -377,10 +377,10 @@ static const int arc_kmem_cache_reap_retry_ms = 1000;
|
|||||||
static int zfs_arc_overflow_shift = 8;
|
static int zfs_arc_overflow_shift = 8;
|
||||||
|
|
||||||
/* shift of arc_c for calculating both min and max arc_p */
|
/* shift of arc_c for calculating both min and max arc_p */
|
||||||
static int arc_p_min_shift = 4;
|
static uint_t arc_p_min_shift = 4;
|
||||||
|
|
||||||
/* log2(fraction of arc to reclaim) */
|
/* log2(fraction of arc to reclaim) */
|
||||||
int arc_shrink_shift = 7;
|
uint_t arc_shrink_shift = 7;
|
||||||
|
|
||||||
/* percent of pagecache to reclaim arc to */
|
/* percent of pagecache to reclaim arc to */
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
@ -396,20 +396,20 @@ uint_t zfs_arc_pc_percent = 0;
|
|||||||
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
|
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
|
||||||
* we will still not allow it to grow.
|
* we will still not allow it to grow.
|
||||||
*/
|
*/
|
||||||
int arc_no_grow_shift = 5;
|
uint_t arc_no_grow_shift = 5;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* minimum lifespan of a prefetch block in clock ticks
|
* minimum lifespan of a prefetch block in clock ticks
|
||||||
* (initialized in arc_init())
|
* (initialized in arc_init())
|
||||||
*/
|
*/
|
||||||
static int arc_min_prefetch_ms;
|
static uint_t arc_min_prefetch_ms;
|
||||||
static int arc_min_prescient_prefetch_ms;
|
static uint_t arc_min_prescient_prefetch_ms;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this percent of memory is free, don't throttle.
|
* If this percent of memory is free, don't throttle.
|
||||||
*/
|
*/
|
||||||
int arc_lotsfree_percent = 10;
|
uint_t arc_lotsfree_percent = 10;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The arc has filled available memory and has now warmed up.
|
* The arc has filled available memory and has now warmed up.
|
||||||
@ -425,10 +425,10 @@ unsigned long zfs_arc_meta_limit = 0;
|
|||||||
unsigned long zfs_arc_meta_min = 0;
|
unsigned long zfs_arc_meta_min = 0;
|
||||||
static unsigned long zfs_arc_dnode_limit = 0;
|
static unsigned long zfs_arc_dnode_limit = 0;
|
||||||
static unsigned long zfs_arc_dnode_reduce_percent = 10;
|
static unsigned long zfs_arc_dnode_reduce_percent = 10;
|
||||||
static int zfs_arc_grow_retry = 0;
|
static uint_t zfs_arc_grow_retry = 0;
|
||||||
static int zfs_arc_shrink_shift = 0;
|
static uint_t zfs_arc_shrink_shift = 0;
|
||||||
static int zfs_arc_p_min_shift = 0;
|
static uint_t zfs_arc_p_min_shift = 0;
|
||||||
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
|
uint_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ARC dirty data constraints for arc_tempreserve_space() throttle:
|
* ARC dirty data constraints for arc_tempreserve_space() throttle:
|
||||||
@ -460,13 +460,13 @@ static unsigned long zfs_arc_dnode_limit_percent = 10;
|
|||||||
* These tunables are Linux-specific
|
* These tunables are Linux-specific
|
||||||
*/
|
*/
|
||||||
static unsigned long zfs_arc_sys_free = 0;
|
static unsigned long zfs_arc_sys_free = 0;
|
||||||
static int zfs_arc_min_prefetch_ms = 0;
|
static uint_t zfs_arc_min_prefetch_ms = 0;
|
||||||
static int zfs_arc_min_prescient_prefetch_ms = 0;
|
static uint_t zfs_arc_min_prescient_prefetch_ms = 0;
|
||||||
static int zfs_arc_p_dampener_disable = 1;
|
static int zfs_arc_p_dampener_disable = 1;
|
||||||
static int zfs_arc_meta_prune = 10000;
|
static uint_t zfs_arc_meta_prune = 10000;
|
||||||
static int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
|
static uint_t zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
|
||||||
static int zfs_arc_meta_adjust_restarts = 4096;
|
static uint_t zfs_arc_meta_adjust_restarts = 4096;
|
||||||
static int zfs_arc_lotsfree_percent = 10;
|
static uint_t zfs_arc_lotsfree_percent = 10;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of arc_prune threads
|
* Number of arc_prune threads
|
||||||
@ -790,7 +790,7 @@ unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
|
|||||||
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
|
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
|
||||||
int l2arc_feed_again = B_TRUE; /* turbo warmup */
|
int l2arc_feed_again = B_TRUE; /* turbo warmup */
|
||||||
int l2arc_norw = B_FALSE; /* no reads during writes */
|
int l2arc_norw = B_FALSE; /* no reads during writes */
|
||||||
static int l2arc_meta_percent = 33; /* limit on headers size */
|
static uint_t l2arc_meta_percent = 33; /* limit on headers size */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* L2ARC Internals
|
* L2ARC Internals
|
||||||
@ -3898,7 +3898,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted)
|
|||||||
{
|
{
|
||||||
arc_state_t *evicted_state, *state;
|
arc_state_t *evicted_state, *state;
|
||||||
int64_t bytes_evicted = 0;
|
int64_t bytes_evicted = 0;
|
||||||
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
|
uint_t min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
|
||||||
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
|
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
|
||||||
|
|
||||||
ASSERT(MUTEX_HELD(hash_lock));
|
ASSERT(MUTEX_HELD(hash_lock));
|
||||||
@ -4053,7 +4053,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
|
|||||||
uint64_t bytes_evicted = 0, real_evicted = 0;
|
uint64_t bytes_evicted = 0, real_evicted = 0;
|
||||||
arc_buf_hdr_t *hdr;
|
arc_buf_hdr_t *hdr;
|
||||||
kmutex_t *hash_lock;
|
kmutex_t *hash_lock;
|
||||||
int evict_count = zfs_arc_evict_batch_limit;
|
uint_t evict_count = zfs_arc_evict_batch_limit;
|
||||||
|
|
||||||
ASSERT3P(marker, !=, NULL);
|
ASSERT3P(marker, !=, NULL);
|
||||||
|
|
||||||
@ -4061,7 +4061,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
|
|||||||
|
|
||||||
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
|
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
|
||||||
hdr = multilist_sublist_prev(mls, marker)) {
|
hdr = multilist_sublist_prev(mls, marker)) {
|
||||||
if ((evict_count <= 0) || (bytes_evicted >= bytes))
|
if ((evict_count == 0) || (bytes_evicted >= bytes))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4404,10 +4404,10 @@ arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
|
|||||||
static uint64_t
|
static uint64_t
|
||||||
arc_evict_meta_balanced(uint64_t meta_used)
|
arc_evict_meta_balanced(uint64_t meta_used)
|
||||||
{
|
{
|
||||||
int64_t delta, prune = 0, adjustmnt;
|
int64_t delta, adjustmnt;
|
||||||
uint64_t total_evicted = 0;
|
uint64_t total_evicted = 0, prune = 0;
|
||||||
arc_buf_contents_t type = ARC_BUFC_DATA;
|
arc_buf_contents_t type = ARC_BUFC_DATA;
|
||||||
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
|
uint_t restarts = zfs_arc_meta_adjust_restarts;
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
/*
|
/*
|
||||||
@ -7656,8 +7656,7 @@ arc_tuning_update(boolean_t verbose)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Valid range: 0 - 100 */
|
/* Valid range: 0 - 100 */
|
||||||
if ((zfs_arc_lotsfree_percent >= 0) &&
|
if (zfs_arc_lotsfree_percent <= 100)
|
||||||
(zfs_arc_lotsfree_percent <= 100))
|
|
||||||
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
|
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
|
||||||
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
|
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
|
||||||
verbose);
|
verbose);
|
||||||
@ -11077,56 +11076,56 @@ EXPORT_SYMBOL(arc_add_prune_callback);
|
|||||||
EXPORT_SYMBOL(arc_remove_prune_callback);
|
EXPORT_SYMBOL(arc_remove_prune_callback);
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
|
||||||
param_get_long, ZMOD_RW, "Minimum ARC size in bytes");
|
param_get_ulong, ZMOD_RW, "Minimum ARC size in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
|
||||||
param_get_long, ZMOD_RW, "Maximum ARC size in bytes");
|
param_get_ulong, ZMOD_RW, "Maximum ARC size in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
|
||||||
param_get_long, ZMOD_RW, "Metadata limit for ARC size in bytes");
|
param_get_ulong, ZMOD_RW, "Metadata limit for ARC size in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
|
||||||
param_set_arc_long, param_get_long, ZMOD_RW,
|
param_set_arc_long, param_get_ulong, ZMOD_RW,
|
||||||
"Percent of ARC size for ARC meta limit");
|
"Percent of ARC size for ARC meta limit");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
|
||||||
param_get_long, ZMOD_RW, "Minimum ARC metadata size in bytes");
|
param_get_ulong, ZMOD_RW, "Minimum ARC metadata size in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
|
||||||
"Meta objects to scan for prune");
|
"Meta objects to scan for prune");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, UINT, ZMOD_RW,
|
||||||
"Limit number of restarts in arc_evict_meta");
|
"Limit number of restarts in arc_evict_meta");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, UINT, ZMOD_RW,
|
||||||
"Meta reclaim strategy");
|
"Meta reclaim strategy");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
|
||||||
param_get_int, ZMOD_RW, "Seconds before growing ARC size");
|
param_get_uint, ZMOD_RW, "Seconds before growing ARC size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
|
||||||
"Disable arc_p adapt dampener");
|
"Disable arc_p adapt dampener");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
|
||||||
param_get_int, ZMOD_RW, "log2(fraction of ARC to reclaim)");
|
param_get_uint, ZMOD_RW, "log2(fraction of ARC to reclaim)");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
|
||||||
"Percent of pagecache to reclaim ARC to");
|
"Percent of pagecache to reclaim ARC to");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
|
||||||
param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p");
|
param_get_uint, ZMOD_RW, "arc_c shift to calc min/max arc_p");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, UINT, ZMOD_RD,
|
||||||
"Target average block size");
|
"Target average block size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
|
||||||
"Disable compressed ARC buffers");
|
"Disable compressed ARC buffers");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
|
||||||
param_get_int, ZMOD_RW, "Min life of prefetch block in ms");
|
param_get_uint, ZMOD_RW, "Min life of prefetch block in ms");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
|
||||||
param_set_arc_int, param_get_int, ZMOD_RW,
|
param_set_arc_int, param_get_uint, ZMOD_RW,
|
||||||
"Min life of prescient prefetched block in ms");
|
"Min life of prescient prefetched block in ms");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
|
||||||
@ -11159,7 +11158,7 @@ ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
|
||||||
"No reads during writes");
|
"No reads during writes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, UINT, ZMOD_RW,
|
||||||
"Percent of ARC size allowed for L2ARC-only headers");
|
"Percent of ARC size allowed for L2ARC-only headers");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
|
||||||
@ -11175,25 +11174,25 @@ ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW,
|
|||||||
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
|
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
|
||||||
param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes");
|
param_get_uint, ZMOD_RW, "System free memory I/O throttle in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
|
||||||
param_get_long, ZMOD_RW, "System free memory target size in bytes");
|
param_get_ulong, ZMOD_RW, "System free memory target size in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
|
||||||
param_get_long, ZMOD_RW, "Minimum bytes of dnodes in ARC");
|
param_get_ulong, ZMOD_RW, "Minimum bytes of dnodes in ARC");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
|
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
|
||||||
param_set_arc_long, param_get_long, ZMOD_RW,
|
param_set_arc_long, param_get_ulong, ZMOD_RW,
|
||||||
"Percent of ARC meta buffers for dnodes");
|
"Percent of ARC meta buffers for dnodes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
|
||||||
"Percentage of excess dnodes to try to unpin");
|
"Percentage of excess dnodes to try to unpin");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, UINT, ZMOD_RW,
|
||||||
"When full, ARC allocation waits for eviction of this % of alloc size");
|
"When full, ARC allocation waits for eviction of this % of alloc size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, UINT, ZMOD_RW,
|
||||||
"The number of headers to evict per sublist before moving to the next");
|
"The number of headers to evict per sublist before moving to the next");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
|
||||||
|
@ -231,11 +231,11 @@ static unsigned long dbuf_cache_max_bytes = ULONG_MAX;
|
|||||||
static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
|
static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
|
||||||
|
|
||||||
/* Set the default sizes of the caches to log2 fraction of arc size */
|
/* Set the default sizes of the caches to log2 fraction of arc size */
|
||||||
static int dbuf_cache_shift = 5;
|
static uint_t dbuf_cache_shift = 5;
|
||||||
static int dbuf_metadata_cache_shift = 6;
|
static uint_t dbuf_metadata_cache_shift = 6;
|
||||||
|
|
||||||
/* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
|
/* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
|
||||||
static uint32_t dbuf_mutex_cache_shift = 0;
|
static uint_t dbuf_mutex_cache_shift = 0;
|
||||||
|
|
||||||
static unsigned long dbuf_cache_target_bytes(void);
|
static unsigned long dbuf_cache_target_bytes(void);
|
||||||
static unsigned long dbuf_metadata_cache_target_bytes(void);
|
static unsigned long dbuf_metadata_cache_target_bytes(void);
|
||||||
@ -5132,10 +5132,10 @@ ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
|
||||||
"Maximum size in bytes of dbuf metadata cache.");
|
"Maximum size in bytes of dbuf metadata cache.");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
|
||||||
"Set size of dbuf cache to log2 fraction of arc size.");
|
"Set size of dbuf cache to log2 fraction of arc size.");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
|
||||||
"Set size of dbuf metadata cache to log2 fraction of arc size.");
|
"Set size of dbuf metadata cache to log2 fraction of arc size.");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
|
||||||
|
@ -86,7 +86,7 @@ static int zfs_dmu_offset_next_sync = 1;
|
|||||||
* helps to limit the amount of memory that can be used by prefetching.
|
* helps to limit the amount of memory that can be used by prefetching.
|
||||||
* Larger objects should be prefetched a bit at a time.
|
* Larger objects should be prefetched a bit at a time.
|
||||||
*/
|
*/
|
||||||
int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
|
uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
|
||||||
|
|
||||||
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
|
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
|
||||||
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
|
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
|
||||||
@ -2362,5 +2362,5 @@ ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
|
|||||||
"Enable forcing txg sync to find holes");
|
"Enable forcing txg sync to find holes");
|
||||||
|
|
||||||
/* CSTYLED */
|
/* CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
|
||||||
"Limit one prefetch call to this size");
|
"Limit one prefetch call to this size");
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
* determined to be the lowest value that eliminates the measurable effect
|
* determined to be the lowest value that eliminates the measurable effect
|
||||||
* of lock contention from this code path.
|
* of lock contention from this code path.
|
||||||
*/
|
*/
|
||||||
int dmu_object_alloc_chunk_shift = 7;
|
uint_t dmu_object_alloc_chunk_shift = 7;
|
||||||
|
|
||||||
static uint64_t
|
static uint64_t
|
||||||
dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
|
dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
|
||||||
@ -55,7 +55,7 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
|
|||||||
int dn_slots = dnodesize >> DNODE_SHIFT;
|
int dn_slots = dnodesize >> DNODE_SHIFT;
|
||||||
boolean_t restarted = B_FALSE;
|
boolean_t restarted = B_FALSE;
|
||||||
uint64_t *cpuobj = NULL;
|
uint64_t *cpuobj = NULL;
|
||||||
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
uint_t dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE %
|
cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE %
|
||||||
@ -518,6 +518,6 @@ EXPORT_SYMBOL(dmu_object_zapify);
|
|||||||
EXPORT_SYMBOL(dmu_object_free_zapified);
|
EXPORT_SYMBOL(dmu_object_free_zapified);
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, UINT, ZMOD_RW,
|
||||||
"CPU-specific allocator grabs 2^N objects at once");
|
"CPU-specific allocator grabs 2^N objects at once");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -67,9 +67,9 @@
|
|||||||
#endif
|
#endif
|
||||||
#include <sys/zfs_file.h>
|
#include <sys/zfs_file.h>
|
||||||
|
|
||||||
static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
|
static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
|
||||||
static int zfs_recv_queue_ff = 20;
|
static uint_t zfs_recv_queue_ff = 20;
|
||||||
static int zfs_recv_write_batch_size = 1024 * 1024;
|
static uint_t zfs_recv_write_batch_size = 1024 * 1024;
|
||||||
static int zfs_recv_best_effort_corrective = 0;
|
static int zfs_recv_best_effort_corrective = 0;
|
||||||
|
|
||||||
static const void *const dmu_recv_tag = "dmu_recv_tag";
|
static const void *const dmu_recv_tag = "dmu_recv_tag";
|
||||||
@ -3729,13 +3729,13 @@ dmu_objset_is_receiving(objset_t *os)
|
|||||||
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
|
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW,
|
||||||
"Maximum receive queue length");
|
"Maximum receive queue length");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW,
|
||||||
"Receive queue fill fraction");
|
"Receive queue fill fraction");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
|
||||||
"Maximum amount of writes to batch into one transaction");
|
"Maximum amount of writes to batch into one transaction");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
|
||||||
|
@ -75,7 +75,7 @@ static int zfs_send_corrupt_data = B_FALSE;
|
|||||||
* thread is issuing new reads because the prefetches have fallen out of the
|
* thread is issuing new reads because the prefetches have fallen out of the
|
||||||
* cache, this may need to be decreased.
|
* cache, this may need to be decreased.
|
||||||
*/
|
*/
|
||||||
static int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
|
static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE;
|
||||||
/*
|
/*
|
||||||
* This tunable controls the length of the queues that zfs send worker threads
|
* This tunable controls the length of the queues that zfs send worker threads
|
||||||
* use to communicate. If the send_main_thread is blocking on these queues,
|
* use to communicate. If the send_main_thread is blocking on these queues,
|
||||||
@ -83,7 +83,7 @@ static int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
|
|||||||
* at the start of a send as these threads consume all the available IO
|
* at the start of a send as these threads consume all the available IO
|
||||||
* resources, this variable may need to be decreased.
|
* resources, this variable may need to be decreased.
|
||||||
*/
|
*/
|
||||||
static int zfs_send_no_prefetch_queue_length = 1024 * 1024;
|
static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024;
|
||||||
/*
|
/*
|
||||||
* These tunables control the fill fraction of the queues by zfs send. The fill
|
* These tunables control the fill fraction of the queues by zfs send. The fill
|
||||||
* fraction controls the frequency with which threads have to be cv_signaled.
|
* fraction controls the frequency with which threads have to be cv_signaled.
|
||||||
@ -91,13 +91,13 @@ static int zfs_send_no_prefetch_queue_length = 1024 * 1024;
|
|||||||
* down. If the queues empty before the signalled thread can catch up, then
|
* down. If the queues empty before the signalled thread can catch up, then
|
||||||
* these should be tuned up.
|
* these should be tuned up.
|
||||||
*/
|
*/
|
||||||
static int zfs_send_queue_ff = 20;
|
static uint_t zfs_send_queue_ff = 20;
|
||||||
static int zfs_send_no_prefetch_queue_ff = 20;
|
static uint_t zfs_send_no_prefetch_queue_ff = 20;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use this to override the recordsize calculation for fast zfs send estimates.
|
* Use this to override the recordsize calculation for fast zfs send estimates.
|
||||||
*/
|
*/
|
||||||
static int zfs_override_estimate_recordsize = 0;
|
static uint_t zfs_override_estimate_recordsize = 0;
|
||||||
|
|
||||||
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
|
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
|
||||||
static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
|
static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
|
||||||
@ -3089,20 +3089,20 @@ out:
|
|||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
|
||||||
"Allow sending corrupt data");
|
"Allow sending corrupt data");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW,
|
||||||
"Maximum send queue length");
|
"Maximum send queue length");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
|
||||||
"Send unmodified spill blocks");
|
"Send unmodified spill blocks");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW,
|
||||||
"Maximum send queue length for non-prefetch queues");
|
"Maximum send queue length for non-prefetch queues");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW,
|
||||||
"Send queue fill fraction");
|
"Send queue fill fraction");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW,
|
||||||
"Send queue fill fraction for non-prefetch queues");
|
"Send queue fill fraction for non-prefetch queues");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW,
|
||||||
"Override block size estimate with fixed size");
|
"Override block size estimate with fixed size");
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
|
|
||||||
static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
|
static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
|
||||||
static int32_t send_holes_without_birth_time = 1;
|
static int32_t send_holes_without_birth_time = 1;
|
||||||
static int32_t zfs_traverse_indirect_prefetch_limit = 32;
|
static uint_t zfs_traverse_indirect_prefetch_limit = 32;
|
||||||
|
|
||||||
typedef struct prefetch_data {
|
typedef struct prefetch_data {
|
||||||
kmutex_t pd_mtx;
|
kmutex_t pd_mtx;
|
||||||
@ -812,7 +812,7 @@ EXPORT_SYMBOL(traverse_pool);
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW,
|
||||||
"Max number of bytes to prefetch");
|
"Max number of bytes to prefetch");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW,
|
||||||
"Traverse prefetch number of blocks pointed by indirect block");
|
"Traverse prefetch number of blocks pointed by indirect block");
|
||||||
|
|
||||||
#if defined(_KERNEL)
|
#if defined(_KERNEL)
|
||||||
|
@ -82,9 +82,9 @@
|
|||||||
* 3/1 memory split doesn't leave much room for 16M chunks.
|
* 3/1 memory split doesn't leave much room for 16M chunks.
|
||||||
*/
|
*/
|
||||||
#ifdef _ILP32
|
#ifdef _ILP32
|
||||||
int zfs_max_recordsize = 1 * 1024 * 1024;
|
uint_t zfs_max_recordsize = 1 * 1024 * 1024;
|
||||||
#else
|
#else
|
||||||
int zfs_max_recordsize = 16 * 1024 * 1024;
|
uint_t zfs_max_recordsize = 16 * 1024 * 1024;
|
||||||
#endif
|
#endif
|
||||||
static int zfs_allow_redacted_dataset_mount = 0;
|
static int zfs_allow_redacted_dataset_mount = 0;
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
|
|||||||
|
|
||||||
static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
|
static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
|
||||||
|
|
||||||
extern int spa_asize_inflation;
|
extern uint_t spa_asize_inflation;
|
||||||
|
|
||||||
static zil_header_t zero_zil;
|
static zil_header_t zero_zil;
|
||||||
|
|
||||||
@ -4971,7 +4971,7 @@ dsl_dataset_oldest_snapshot(spa_t *spa, uint64_t head_ds, uint64_t min_txg,
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, UINT, ZMOD_RW,
|
||||||
"Max allowed record size");
|
"Max allowed record size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
|
||||||
|
@ -101,8 +101,8 @@
|
|||||||
*/
|
*/
|
||||||
unsigned long zfs_dirty_data_max = 0;
|
unsigned long zfs_dirty_data_max = 0;
|
||||||
unsigned long zfs_dirty_data_max_max = 0;
|
unsigned long zfs_dirty_data_max_max = 0;
|
||||||
int zfs_dirty_data_max_percent = 10;
|
uint_t zfs_dirty_data_max_percent = 10;
|
||||||
int zfs_dirty_data_max_max_percent = 25;
|
uint_t zfs_dirty_data_max_max_percent = 25;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The upper limit of TX_WRITE log data. Write operations are throttled
|
* The upper limit of TX_WRITE log data. Write operations are throttled
|
||||||
@ -116,14 +116,14 @@ unsigned long zfs_wrlog_data_max = 0;
|
|||||||
* zfs_dirty_data_max), push out a txg. This should be less than
|
* zfs_dirty_data_max), push out a txg. This should be less than
|
||||||
* zfs_vdev_async_write_active_min_dirty_percent.
|
* zfs_vdev_async_write_active_min_dirty_percent.
|
||||||
*/
|
*/
|
||||||
static int zfs_dirty_data_sync_percent = 20;
|
static uint_t zfs_dirty_data_sync_percent = 20;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
|
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
|
||||||
* and delay each transaction.
|
* and delay each transaction.
|
||||||
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
|
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
|
||||||
*/
|
*/
|
||||||
int zfs_delay_min_dirty_percent = 60;
|
uint_t zfs_delay_min_dirty_percent = 60;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This controls how quickly the delay approaches infinity.
|
* This controls how quickly the delay approaches infinity.
|
||||||
@ -1455,14 +1455,14 @@ EXPORT_SYMBOL(dsl_pool_config_enter);
|
|||||||
EXPORT_SYMBOL(dsl_pool_config_exit);
|
EXPORT_SYMBOL(dsl_pool_config_exit);
|
||||||
|
|
||||||
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
|
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, UINT, ZMOD_RD,
|
||||||
"Max percent of RAM allowed to be dirty");
|
"Max percent of RAM allowed to be dirty");
|
||||||
|
|
||||||
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
|
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, UINT, ZMOD_RD,
|
||||||
"zfs_dirty_data_max upper bound as % of RAM");
|
"zfs_dirty_data_max upper bound as % of RAM");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, UINT, ZMOD_RW,
|
||||||
"Transaction delay threshold");
|
"Transaction delay threshold");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW,
|
||||||
@ -1475,7 +1475,7 @@ ZFS_MODULE_PARAM(zfs, zfs_, wrlog_data_max, ULONG, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
|
||||||
"zfs_dirty_data_max upper bound in bytes");
|
"zfs_dirty_data_max upper bound in bytes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, UINT, ZMOD_RW,
|
||||||
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
|
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
|
||||||
|
@ -128,7 +128,7 @@ static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
|
|||||||
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
|
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
|
||||||
static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
|
static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
|
||||||
|
|
||||||
extern int zfs_vdev_async_write_active_min_dirty_percent;
|
extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
|
||||||
static int zfs_scan_blkstats = 0;
|
static int zfs_scan_blkstats = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -149,8 +149,10 @@ static int zfs_scan_strict_mem_lim = B_FALSE;
|
|||||||
*/
|
*/
|
||||||
static unsigned long zfs_scan_vdev_limit = 4 << 20;
|
static unsigned long zfs_scan_vdev_limit = 4 << 20;
|
||||||
|
|
||||||
static int zfs_scan_issue_strategy = 0;
|
static uint_t zfs_scan_issue_strategy = 0;
|
||||||
static int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
|
|
||||||
|
/* don't queue & sort zios, go direct */
|
||||||
|
static int zfs_scan_legacy = B_FALSE;
|
||||||
static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
|
static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -158,20 +160,33 @@ static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
|
|||||||
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
|
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
|
||||||
* break queue sorting.
|
* break queue sorting.
|
||||||
*/
|
*/
|
||||||
static int zfs_scan_fill_weight = 3;
|
static uint_t zfs_scan_fill_weight = 3;
|
||||||
static uint64_t fill_weight;
|
static uint64_t fill_weight;
|
||||||
|
|
||||||
/* See dsl_scan_should_clear() for details on the memory limit tunables */
|
/* See dsl_scan_should_clear() for details on the memory limit tunables */
|
||||||
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
|
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
|
||||||
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
|
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
|
||||||
static int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
|
|
||||||
static int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
|
|
||||||
|
|
||||||
static int zfs_scrub_min_time_ms = 1000; /* min millis to scrub per txg */
|
|
||||||
static int zfs_obsolete_min_time_ms = 500; /* min millis to obsolete per txg */
|
/* fraction of physmem */
|
||||||
static int zfs_free_min_time_ms = 1000; /* min millis to free per txg */
|
static uint_t zfs_scan_mem_lim_fact = 20;
|
||||||
static int zfs_resilver_min_time_ms = 3000; /* min millis to resilver per txg */
|
|
||||||
static int zfs_scan_checkpoint_intval = 7200; /* in seconds */
|
/* fraction of mem lim above */
|
||||||
|
static uint_t zfs_scan_mem_lim_soft_fact = 20;
|
||||||
|
|
||||||
|
/* minimum milliseconds to scrub per txg */
|
||||||
|
static uint_t zfs_scrub_min_time_ms = 1000;
|
||||||
|
|
||||||
|
/* minimum milliseconds to obsolete per txg */
|
||||||
|
static uint_t zfs_obsolete_min_time_ms = 500;
|
||||||
|
|
||||||
|
/* minimum milliseconds to free per txg */
|
||||||
|
static uint_t zfs_free_min_time_ms = 1000;
|
||||||
|
|
||||||
|
/* minimum milliseconds to resilver per txg */
|
||||||
|
static uint_t zfs_resilver_min_time_ms = 3000;
|
||||||
|
|
||||||
|
static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
|
||||||
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
|
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
|
||||||
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
|
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
|
||||||
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
|
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
|
||||||
@ -1350,7 +1365,7 @@ dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
|
|||||||
scn->scn_dp->dp_spa->spa_sync_starttime;
|
scn->scn_dp->dp_spa->spa_sync_starttime;
|
||||||
uint64_t dirty_min_bytes = zfs_dirty_data_max *
|
uint64_t dirty_min_bytes = zfs_dirty_data_max *
|
||||||
zfs_vdev_async_write_active_min_dirty_percent / 100;
|
zfs_vdev_async_write_active_min_dirty_percent / 100;
|
||||||
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
||||||
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
|
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
|
||||||
|
|
||||||
if ((NSEC2MSEC(scan_time_ns) > mintime &&
|
if ((NSEC2MSEC(scan_time_ns) > mintime &&
|
||||||
@ -2840,7 +2855,7 @@ scan_io_queue_check_suspend(dsl_scan_t *scn)
|
|||||||
scn->scn_dp->dp_spa->spa_sync_starttime;
|
scn->scn_dp->dp_spa->spa_sync_starttime;
|
||||||
uint64_t dirty_min_bytes = zfs_dirty_data_max *
|
uint64_t dirty_min_bytes = zfs_dirty_data_max *
|
||||||
zfs_vdev_async_write_active_min_dirty_percent / 100;
|
zfs_vdev_async_write_active_min_dirty_percent / 100;
|
||||||
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
||||||
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
|
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
|
||||||
|
|
||||||
return ((NSEC2MSEC(scan_time_ns) > mintime &&
|
return ((NSEC2MSEC(scan_time_ns) > mintime &&
|
||||||
@ -3622,8 +3637,9 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
|
|||||||
*/
|
*/
|
||||||
if (zfs_scan_suspend_progress) {
|
if (zfs_scan_suspend_progress) {
|
||||||
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
|
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
|
||||||
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
uint_t mintime = (scn->scn_phys.scn_func ==
|
||||||
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
|
POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
|
||||||
|
zfs_scrub_min_time_ms;
|
||||||
|
|
||||||
while (zfs_scan_suspend_progress &&
|
while (zfs_scan_suspend_progress &&
|
||||||
!txg_sync_waiting(scn->scn_dp) &&
|
!txg_sync_waiting(scn->scn_dp) &&
|
||||||
@ -4433,16 +4449,16 @@ dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
|
||||||
"Max bytes in flight per leaf vdev for scrubs and resilvers");
|
"Max bytes in flight per leaf vdev for scrubs and resilvers");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
|
||||||
"Min millisecs to scrub per txg");
|
"Min millisecs to scrub per txg");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
|
||||||
"Min millisecs to obsolete per txg");
|
"Min millisecs to obsolete per txg");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
|
||||||
"Min millisecs to free per txg");
|
"Min millisecs to free per txg");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
|
||||||
"Min millisecs to resilver per txg");
|
"Min millisecs to resilver per txg");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
|
||||||
@ -4466,28 +4482,28 @@ ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
|
||||||
"Enable block statistics calculation during scrub");
|
"Enable block statistics calculation during scrub");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
|
||||||
"Fraction of RAM for scan hard limit");
|
"Fraction of RAM for scan hard limit");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
|
||||||
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
|
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
|
||||||
"Scrub using legacy non-sequential method");
|
"Scrub using legacy non-sequential method");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
|
||||||
"Scan progress on-disk checkpointing interval");
|
"Scan progress on-disk checkpointing interval");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
|
||||||
"Max gap in bytes between sequential scrub / resilver I/Os");
|
"Max gap in bytes between sequential scrub / resilver I/Os");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
|
||||||
"Fraction of hard limit used as soft limit");
|
"Fraction of hard limit used as soft limit");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
|
||||||
"Tunable to attempt to reduce lock contention");
|
"Tunable to attempt to reduce lock contention");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
|
||||||
"Tunable to adjust bias towards more filled segments during scans");
|
"Tunable to adjust bias towards more filled segments during scans");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
|
||||||
|
@ -68,9 +68,9 @@
|
|||||||
#include <sys/condvar.h>
|
#include <sys/condvar.h>
|
||||||
#include <sys/zfs_ioctl.h>
|
#include <sys/zfs_ioctl.h>
|
||||||
|
|
||||||
static int zfs_zevent_len_max = 512;
|
static uint_t zfs_zevent_len_max = 512;
|
||||||
|
|
||||||
static int zevent_len_cur = 0;
|
static uint_t zevent_len_cur = 0;
|
||||||
static int zevent_waiters = 0;
|
static int zevent_waiters = 0;
|
||||||
static int zevent_flags = 0;
|
static int zevent_flags = 0;
|
||||||
|
|
||||||
@ -158,7 +158,7 @@ zfs_zevent_drain(zevent_t *ev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
zfs_zevent_drain_all(int *count)
|
zfs_zevent_drain_all(uint_t *count)
|
||||||
{
|
{
|
||||||
zevent_t *ev;
|
zevent_t *ev;
|
||||||
|
|
||||||
@ -1342,7 +1342,7 @@ fm_init(void)
|
|||||||
void
|
void
|
||||||
fm_fini(void)
|
fm_fini(void)
|
||||||
{
|
{
|
||||||
int count;
|
uint_t count;
|
||||||
|
|
||||||
zfs_ereport_fini();
|
zfs_ereport_fini();
|
||||||
|
|
||||||
@ -1370,5 +1370,5 @@ fm_fini(void)
|
|||||||
}
|
}
|
||||||
#endif /* _KERNEL */
|
#endif /* _KERNEL */
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, UINT, ZMOD_RW,
|
||||||
"Max event queue length");
|
"Max event queue length");
|
||||||
|
@ -81,7 +81,7 @@ int zfs_metaslab_sm_blksz_with_log = (1 << 17);
|
|||||||
* space map representation must be before we compact it on-disk.
|
* space map representation must be before we compact it on-disk.
|
||||||
* Values should be greater than or equal to 100.
|
* Values should be greater than or equal to 100.
|
||||||
*/
|
*/
|
||||||
int zfs_condense_pct = 200;
|
uint_t zfs_condense_pct = 200;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Condensing a metaslab is not guaranteed to actually reduce the amount of
|
* Condensing a metaslab is not guaranteed to actually reduce the amount of
|
||||||
@ -111,7 +111,7 @@ static const int zfs_metaslab_condense_block_threshold = 4;
|
|||||||
* eligible to allocate on any metaslab group. The default value of 0 means
|
* eligible to allocate on any metaslab group. The default value of 0 means
|
||||||
* no metaslab group will be excluded based on this criterion.
|
* no metaslab group will be excluded based on this criterion.
|
||||||
*/
|
*/
|
||||||
static int zfs_mg_noalloc_threshold = 0;
|
static uint_t zfs_mg_noalloc_threshold = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Metaslab groups are considered eligible for allocations if their
|
* Metaslab groups are considered eligible for allocations if their
|
||||||
@ -135,7 +135,7 @@ static int zfs_mg_noalloc_threshold = 0;
|
|||||||
* enough to avoid hitting the speed bump on pools that are being pushed
|
* enough to avoid hitting the speed bump on pools that are being pushed
|
||||||
* to the edge.
|
* to the edge.
|
||||||
*/
|
*/
|
||||||
static int zfs_mg_fragmentation_threshold = 95;
|
static uint_t zfs_mg_fragmentation_threshold = 95;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow metaslabs to keep their active state as long as their fragmentation
|
* Allow metaslabs to keep their active state as long as their fragmentation
|
||||||
@ -143,7 +143,7 @@ static int zfs_mg_fragmentation_threshold = 95;
|
|||||||
* active metaslab that exceeds this threshold will no longer keep its active
|
* active metaslab that exceeds this threshold will no longer keep its active
|
||||||
* status allowing better metaslabs to be selected.
|
* status allowing better metaslabs to be selected.
|
||||||
*/
|
*/
|
||||||
static int zfs_metaslab_fragmentation_threshold = 70;
|
static uint_t zfs_metaslab_fragmentation_threshold = 70;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When set will load all metaslabs when pool is first opened.
|
* When set will load all metaslabs when pool is first opened.
|
||||||
@ -169,7 +169,7 @@ uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
|
|||||||
* Once the space map's free space drops below this level we dynamically
|
* Once the space map's free space drops below this level we dynamically
|
||||||
* switch to using best-fit allocations.
|
* switch to using best-fit allocations.
|
||||||
*/
|
*/
|
||||||
int metaslab_df_free_pct = 4;
|
uint_t metaslab_df_free_pct = 4;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum distance to search forward from the last offset. Without this
|
* Maximum distance to search forward from the last offset. Without this
|
||||||
@ -184,7 +184,7 @@ int metaslab_df_free_pct = 4;
|
|||||||
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
|
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
|
||||||
* 2048 (with ashift=12).
|
* 2048 (with ashift=12).
|
||||||
*/
|
*/
|
||||||
static int metaslab_df_max_search = 16 * 1024 * 1024;
|
static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Forces the metaslab_block_picker function to search for at least this many
|
* Forces the metaslab_block_picker function to search for at least this many
|
||||||
@ -215,13 +215,13 @@ int metaslab_load_pct = 50;
|
|||||||
* unloaded sooner. These settings are intended to be generous -- to keep
|
* unloaded sooner. These settings are intended to be generous -- to keep
|
||||||
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
|
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
|
||||||
*/
|
*/
|
||||||
static int metaslab_unload_delay = 32;
|
static uint_t metaslab_unload_delay = 32;
|
||||||
static int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
|
static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Max number of metaslabs per group to preload.
|
* Max number of metaslabs per group to preload.
|
||||||
*/
|
*/
|
||||||
int metaslab_preload_limit = 10;
|
uint_t metaslab_preload_limit = 10;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable/disable preloading of metaslab.
|
* Enable/disable preloading of metaslab.
|
||||||
@ -293,7 +293,7 @@ static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
|
|||||||
* a metaslab would take it over this percentage, the oldest selected metaslab
|
* a metaslab would take it over this percentage, the oldest selected metaslab
|
||||||
* is automatically unloaded.
|
* is automatically unloaded.
|
||||||
*/
|
*/
|
||||||
static int zfs_metaslab_mem_limit = 25;
|
static uint_t zfs_metaslab_mem_limit = 25;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force the per-metaslab range trees to use 64-bit integers to store
|
* Force the per-metaslab range trees to use 64-bit integers to store
|
||||||
@ -337,7 +337,7 @@ static int zfs_metaslab_try_hard_before_gang = B_FALSE;
|
|||||||
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
|
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
|
||||||
* bucket, and therefore a lower weight).
|
* bucket, and therefore a lower weight).
|
||||||
*/
|
*/
|
||||||
static int zfs_metaslab_find_max_tries = 100;
|
static uint_t zfs_metaslab_find_max_tries = 100;
|
||||||
|
|
||||||
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
|
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
|
||||||
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
|
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
|
||||||
@ -1672,7 +1672,7 @@ metaslab_df_alloc(metaslab_t *msp, uint64_t size)
|
|||||||
uint64_t align = size & -size;
|
uint64_t align = size & -size;
|
||||||
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
|
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
|
||||||
range_tree_t *rt = msp->ms_allocatable;
|
range_tree_t *rt = msp->ms_allocatable;
|
||||||
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
|
uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
|
|
||||||
ASSERT(MUTEX_HELD(&msp->ms_lock));
|
ASSERT(MUTEX_HELD(&msp->ms_lock));
|
||||||
@ -2169,7 +2169,7 @@ metaslab_potentially_evict(metaslab_class_t *mc)
|
|||||||
uint64_t allmem = arc_all_memory();
|
uint64_t allmem = arc_all_memory();
|
||||||
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
|
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
|
||||||
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
|
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
|
||||||
int tries = 0;
|
uint_t tries = 0;
|
||||||
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
|
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
|
||||||
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
|
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
|
||||||
tries++) {
|
tries++) {
|
||||||
@ -4640,7 +4640,7 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
|
|||||||
if (msp == NULL)
|
if (msp == NULL)
|
||||||
msp = avl_nearest(t, idx, AVL_AFTER);
|
msp = avl_nearest(t, idx, AVL_AFTER);
|
||||||
|
|
||||||
int tries = 0;
|
uint_t tries = 0;
|
||||||
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
|
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -6215,18 +6215,18 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
|
||||||
"Preload potential metaslabs during reassessment");
|
"Preload potential metaslabs during reassessment");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
|
||||||
"Delay in txgs after metaslab was last used before unloading");
|
"Delay in txgs after metaslab was last used before unloading");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
|
||||||
"Delay in milliseconds after metaslab was last used before unloading");
|
"Delay in milliseconds after metaslab was last used before unloading");
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
|
||||||
"Percentage of metaslab group size that should be free to make it "
|
"Percentage of metaslab group size that should be free to make it "
|
||||||
"eligible for allocation");
|
"eligible for allocation");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
|
||||||
"Percentage of metaslab group size that should be considered eligible "
|
"Percentage of metaslab group size that should be considered eligible "
|
||||||
"for allocations unless all metaslab groups within the metaslab class "
|
"for allocations unless all metaslab groups within the metaslab class "
|
||||||
"have also crossed this threshold");
|
"have also crossed this threshold");
|
||||||
@ -6236,7 +6236,7 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
|
|||||||
"Use the fragmentation metric to prefer less fragmented metaslabs");
|
"Use the fragmentation metric to prefer less fragmented metaslabs");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT,
|
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
|
||||||
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
|
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
|
||||||
@ -6254,7 +6254,7 @@ ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
|
||||||
"Blocks larger than this size are forced to be gang blocks");
|
"Blocks larger than this size are forced to be gang blocks");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
|
||||||
"Max distance (bytes) to search forward before using size tree");
|
"Max distance (bytes) to search forward before using size tree");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
|
||||||
@ -6263,11 +6263,11 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
|
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
|
||||||
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
|
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
|
||||||
"Percentage of memory that can be used to store metaslab range trees");
|
"Percentage of memory that can be used to store metaslab range trees");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
|
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
|
||||||
ZMOD_RW, "Try hard to allocate before ganging");
|
ZMOD_RW, "Try hard to allocate before ganging");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
|
||||||
"Normally only consider this many of the best metaslabs in each vdev");
|
"Normally only consider this many of the best metaslabs in each vdev");
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
* This overrides the number of sublists in each multilist_t, which defaults
|
* This overrides the number of sublists in each multilist_t, which defaults
|
||||||
* to the number of CPUs in the system (see multilist_create()).
|
* to the number of CPUs in the system (see multilist_create()).
|
||||||
*/
|
*/
|
||||||
int zfs_multilist_num_sublists = 0;
|
uint_t zfs_multilist_num_sublists = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Given the object contained on the list, return a pointer to the
|
* Given the object contained on the list, return a pointer to the
|
||||||
@ -69,7 +69,7 @@ multilist_d2l(multilist_t *ml, void *obj)
|
|||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
multilist_create_impl(multilist_t *ml, size_t size, size_t offset,
|
multilist_create_impl(multilist_t *ml, size_t size, size_t offset,
|
||||||
unsigned int num, multilist_sublist_index_func_t *index_func)
|
uint_t num, multilist_sublist_index_func_t *index_func)
|
||||||
{
|
{
|
||||||
ASSERT3U(size, >, 0);
|
ASSERT3U(size, >, 0);
|
||||||
ASSERT3U(size, >=, offset + sizeof (multilist_node_t));
|
ASSERT3U(size, >=, offset + sizeof (multilist_node_t));
|
||||||
@ -104,7 +104,7 @@ void
|
|||||||
multilist_create(multilist_t *ml, size_t size, size_t offset,
|
multilist_create(multilist_t *ml, size_t size, size_t offset,
|
||||||
multilist_sublist_index_func_t *index_func)
|
multilist_sublist_index_func_t *index_func)
|
||||||
{
|
{
|
||||||
int num_sublists;
|
uint_t num_sublists;
|
||||||
|
|
||||||
if (zfs_multilist_num_sublists > 0) {
|
if (zfs_multilist_num_sublists > 0) {
|
||||||
num_sublists = zfs_multilist_num_sublists;
|
num_sublists = zfs_multilist_num_sublists;
|
||||||
@ -425,5 +425,5 @@ multilist_link_active(multilist_node_t *link)
|
|||||||
return (list_link_active(link));
|
return (list_link_active(link));
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, multilist_num_sublists, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, multilist_num_sublists, UINT, ZMOD_RW,
|
||||||
"Number of sublists used in each multilist");
|
"Number of sublists used in each multilist");
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* cpu time. Until its performance is improved it should be manually enabled.
|
* cpu time. Until its performance is improved it should be manually enabled.
|
||||||
*/
|
*/
|
||||||
int reference_tracking_enable = B_FALSE;
|
int reference_tracking_enable = B_FALSE;
|
||||||
static int reference_history = 3; /* tunable */
|
static uint_t reference_history = 3; /* tunable */
|
||||||
|
|
||||||
static kmem_cache_t *reference_cache;
|
static kmem_cache_t *reference_cache;
|
||||||
static kmem_cache_t *reference_history_cache;
|
static kmem_cache_t *reference_history_cache;
|
||||||
@ -329,7 +329,7 @@ EXPORT_SYMBOL(zfs_refcount_held);
|
|||||||
ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW,
|
||||||
"Track reference holders to refcount_t objects");
|
"Track reference holders to refcount_t objects");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, , reference_history, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW,
|
||||||
"Maximum reference holders being tracked");
|
"Maximum reference holders being tracked");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
#endif /* ZFS_DEBUG */
|
#endif /* ZFS_DEBUG */
|
||||||
|
@ -2301,7 +2301,7 @@ spa_load_verify_done(zio_t *zio)
|
|||||||
* Maximum number of inflight bytes is the log2 fraction of the arc size.
|
* Maximum number of inflight bytes is the log2 fraction of the arc size.
|
||||||
* By default, we set it to 1/16th of the arc.
|
* By default, we set it to 1/16th of the arc.
|
||||||
*/
|
*/
|
||||||
static int spa_load_verify_shift = 4;
|
static uint_t spa_load_verify_shift = 4;
|
||||||
static int spa_load_verify_metadata = B_TRUE;
|
static int spa_load_verify_metadata = B_TRUE;
|
||||||
static int spa_load_verify_data = B_TRUE;
|
static int spa_load_verify_data = B_TRUE;
|
||||||
|
|
||||||
@ -9988,7 +9988,7 @@ EXPORT_SYMBOL(spa_prop_clear_bootfs);
|
|||||||
EXPORT_SYMBOL(spa_event_notify);
|
EXPORT_SYMBOL(spa_event_notify);
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
|
||||||
"log2 fraction of arc that can be used by inflight I/Os when "
|
"log2 fraction of arc that can be used by inflight I/Os when "
|
||||||
"verifying pool during import");
|
"verifying pool during import");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -78,7 +78,7 @@
|
|||||||
* format when enabling head_errlog. Defaults to 0 which converts
|
* format when enabling head_errlog. Defaults to 0 which converts
|
||||||
* all log entries.
|
* all log entries.
|
||||||
*/
|
*/
|
||||||
static uint32_t spa_upgrade_errlog_limit = 0;
|
static uint_t spa_upgrade_errlog_limit = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert a bookmark to a string.
|
* Convert a bookmark to a string.
|
||||||
@ -1367,7 +1367,7 @@ EXPORT_SYMBOL(spa_upgrade_errlog);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
|
||||||
"Limit the number of errors which will be upgraded to the new "
|
"Limit the number of errors which will be upgraded to the new "
|
||||||
"on-disk error log when enabling head_errlog");
|
"on-disk error log when enabling head_errlog");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -343,7 +343,7 @@ const char *zfs_deadman_failmode = "wait";
|
|||||||
* the worst case is:
|
* the worst case is:
|
||||||
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
|
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
|
||||||
*/
|
*/
|
||||||
int spa_asize_inflation = 24;
|
uint_t spa_asize_inflation = 24;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
|
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
|
||||||
@ -383,7 +383,7 @@ int spa_asize_inflation = 24;
|
|||||||
*
|
*
|
||||||
* See also the comments in zfs_space_check_t.
|
* See also the comments in zfs_space_check_t.
|
||||||
*/
|
*/
|
||||||
int spa_slop_shift = 5;
|
uint_t spa_slop_shift = 5;
|
||||||
static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
|
static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
|
||||||
static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
|
static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
|
||||||
static const int spa_allocators = 4;
|
static const int spa_allocators = 4;
|
||||||
@ -428,7 +428,7 @@ static int zfs_user_indirect_is_special = B_TRUE;
|
|||||||
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
|
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
|
||||||
* let metadata into the class.
|
* let metadata into the class.
|
||||||
*/
|
*/
|
||||||
static int zfs_special_class_metadata_reserve_pct = 25;
|
static uint_t zfs_special_class_metadata_reserve_pct = 25;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ==========================================================================
|
* ==========================================================================
|
||||||
@ -1657,7 +1657,7 @@ spa_altroot(spa_t *spa, char *buf, size_t buflen)
|
|||||||
(void) strlcpy(buf, spa->spa_root, buflen);
|
(void) strlcpy(buf, spa->spa_root, buflen);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
uint32_t
|
||||||
spa_sync_pass(spa_t *spa)
|
spa_sync_pass(spa_t *spa)
|
||||||
{
|
{
|
||||||
return (spa->spa_sync_pass);
|
return (spa->spa_sync_pass);
|
||||||
@ -2928,7 +2928,7 @@ ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
|
||||||
"Enable deadman timer");
|
"Enable deadman timer");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
|
||||||
"SPA size estimate multiplication factor");
|
"SPA size estimate multiplication factor");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
|
||||||
@ -2950,10 +2950,10 @@ ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
|
|||||||
param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
|
param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
|
||||||
"IO expiration time in milliseconds");
|
"IO expiration time in milliseconds");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
|
||||||
"Small file blocks in special vdevs depends on this much "
|
"Small file blocks in special vdevs depends on this much "
|
||||||
"free space available");
|
"free space available");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
|
||||||
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
|
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
|
||||||
param_get_int, ZMOD_RW, "Reserved free space in pool");
|
param_get_uint, ZMOD_RW, "Reserved free space in pool");
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
/*
|
/*
|
||||||
* Keeps stats on last N reads per spa_t, disabled by default.
|
* Keeps stats on last N reads per spa_t, disabled by default.
|
||||||
*/
|
*/
|
||||||
static int zfs_read_history = B_FALSE;
|
static uint_t zfs_read_history = B_FALSE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Include cache hits in history, disabled by default.
|
* Include cache hits in history, disabled by default.
|
||||||
@ -38,12 +38,12 @@ static int zfs_read_history_hits = B_FALSE;
|
|||||||
/*
|
/*
|
||||||
* Keeps stats on the last 100 txgs by default.
|
* Keeps stats on the last 100 txgs by default.
|
||||||
*/
|
*/
|
||||||
static int zfs_txg_history = 100;
|
static uint_t zfs_txg_history = 100;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Keeps stats on the last N MMP updates, disabled by default.
|
* Keeps stats on the last N MMP updates, disabled by default.
|
||||||
*/
|
*/
|
||||||
int zfs_multihost_history = B_FALSE;
|
static uint_t zfs_multihost_history = B_FALSE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ==========================================================================
|
* ==========================================================================
|
||||||
@ -1012,14 +1012,14 @@ spa_stats_destroy(spa_t *spa)
|
|||||||
spa_guid_destroy(spa);
|
spa_guid_destroy(spa);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, read_history, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, read_history, UINT, ZMOD_RW,
|
||||||
"Historical statistics for the last N reads");
|
"Historical statistics for the last N reads");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, read_history_hits, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, read_history_hits, INT, ZMOD_RW,
|
||||||
"Include cache hits in read history");
|
"Include cache hits in read history");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, history, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, history, UINT, ZMOD_RW,
|
||||||
"Historical statistics for the last N txgs");
|
"Historical statistics for the last N txgs");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, history, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, history, UINT, ZMOD_RW,
|
||||||
"Historical statistics for last N multihost writes");
|
"Historical statistics for last N multihost writes");
|
||||||
|
@ -111,7 +111,7 @@
|
|||||||
static __attribute__((noreturn)) void txg_sync_thread(void *arg);
|
static __attribute__((noreturn)) void txg_sync_thread(void *arg);
|
||||||
static __attribute__((noreturn)) void txg_quiesce_thread(void *arg);
|
static __attribute__((noreturn)) void txg_quiesce_thread(void *arg);
|
||||||
|
|
||||||
int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
|
uint_t zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare the txg subsystem.
|
* Prepare the txg subsystem.
|
||||||
@ -1069,5 +1069,5 @@ EXPORT_SYMBOL(txg_wait_callbacks);
|
|||||||
EXPORT_SYMBOL(txg_stalled);
|
EXPORT_SYMBOL(txg_stalled);
|
||||||
EXPORT_SYMBOL(txg_sync_waiting);
|
EXPORT_SYMBOL(txg_sync_waiting);
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, UINT, ZMOD_RW,
|
||||||
"Max seconds worth of delta per txg");
|
"Max seconds worth of delta per txg");
|
||||||
|
@ -81,22 +81,22 @@
|
|||||||
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
|
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
|
||||||
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
|
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
|
||||||
*/
|
*/
|
||||||
static int zfs_embedded_slog_min_ms = 64;
|
static uint_t zfs_embedded_slog_min_ms = 64;
|
||||||
|
|
||||||
/* default target for number of metaslabs per top-level vdev */
|
/* default target for number of metaslabs per top-level vdev */
|
||||||
static int zfs_vdev_default_ms_count = 200;
|
static uint_t zfs_vdev_default_ms_count = 200;
|
||||||
|
|
||||||
/* minimum number of metaslabs per top-level vdev */
|
/* minimum number of metaslabs per top-level vdev */
|
||||||
static int zfs_vdev_min_ms_count = 16;
|
static uint_t zfs_vdev_min_ms_count = 16;
|
||||||
|
|
||||||
/* practical upper limit of total metaslabs per top-level vdev */
|
/* practical upper limit of total metaslabs per top-level vdev */
|
||||||
static int zfs_vdev_ms_count_limit = 1ULL << 17;
|
static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
|
||||||
|
|
||||||
/* lower limit for metaslab size (512M) */
|
/* lower limit for metaslab size (512M) */
|
||||||
static int zfs_vdev_default_ms_shift = 29;
|
static uint_t zfs_vdev_default_ms_shift = 29;
|
||||||
|
|
||||||
/* upper limit for metaslab size (16G) */
|
/* upper limit for metaslab size (16G) */
|
||||||
static const int zfs_vdev_max_ms_shift = 34;
|
static const uint_t zfs_vdev_max_ms_shift = 34;
|
||||||
|
|
||||||
int vdev_validate_skip = B_FALSE;
|
int vdev_validate_skip = B_FALSE;
|
||||||
|
|
||||||
@ -6062,16 +6062,16 @@ EXPORT_SYMBOL(vdev_online);
|
|||||||
EXPORT_SYMBOL(vdev_offline);
|
EXPORT_SYMBOL(vdev_offline);
|
||||||
EXPORT_SYMBOL(vdev_clear);
|
EXPORT_SYMBOL(vdev_clear);
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
|
||||||
"Target number of metaslabs per top-level vdev");
|
"Target number of metaslabs per top-level vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
|
||||||
"Default limit for metaslab size");
|
"Default limit for metaslab size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
|
||||||
"Minimum number of metaslabs per top-level vdev");
|
"Minimum number of metaslabs per top-level vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
|
||||||
"Practical upper limit of total metaslabs per top-level vdev");
|
"Practical upper limit of total metaslabs per top-level vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
|
||||||
@ -6092,7 +6092,7 @@ ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
|
||||||
"Disable cache flushes");
|
"Disable cache flushes");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
|
||||||
"Minimum number of metaslabs required to dedicate one for log blocks");
|
"Minimum number of metaslabs required to dedicate one for log blocks");
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
|
@ -83,9 +83,9 @@
|
|||||||
* it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
|
* it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
|
||||||
* has made these same changes.
|
* has made these same changes.
|
||||||
*/
|
*/
|
||||||
static int zfs_vdev_cache_max = 1 << 14; /* 16KB */
|
static uint_t zfs_vdev_cache_max = 1 << 14; /* 16KB */
|
||||||
static int zfs_vdev_cache_size = 0;
|
static uint_t zfs_vdev_cache_size = 0;
|
||||||
static int zfs_vdev_cache_bshift = 16;
|
static uint_t zfs_vdev_cache_bshift = 16;
|
||||||
|
|
||||||
#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
|
#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
|
||||||
|
|
||||||
@ -426,11 +426,11 @@ vdev_cache_stat_fini(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_max, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_max, UINT, ZMOD_RW,
|
||||||
"Inflate reads small than max");
|
"Inflate reads small than max");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_size, INT, ZMOD_RD,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_size, UINT, ZMOD_RD,
|
||||||
"Total size of the per-disk cache");
|
"Total size of the per-disk cache");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_bshift, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_bshift, UINT, ZMOD_RW,
|
||||||
"Shift size to inflate reads too");
|
"Shift size to inflate reads too");
|
||||||
|
@ -181,7 +181,7 @@ static int zfs_condense_indirect_vdevs_enable = B_TRUE;
|
|||||||
* condenses. Higher values will condense less often (causing less
|
* condenses. Higher values will condense less often (causing less
|
||||||
* i/o); lower values will reduce the mapping size more quickly.
|
* i/o); lower values will reduce the mapping size more quickly.
|
||||||
*/
|
*/
|
||||||
static int zfs_condense_indirect_obsolete_pct = 25;
|
static uint_t zfs_condense_indirect_obsolete_pct = 25;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Condense if the obsolete space map takes up more than this amount of
|
* Condense if the obsolete space map takes up more than this amount of
|
||||||
@ -204,7 +204,7 @@ static unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
|
|||||||
* complete too quickly). If used to reduce the performance impact of
|
* complete too quickly). If used to reduce the performance impact of
|
||||||
* condensing in production, a maximum value of 1 should be sufficient.
|
* condensing in production, a maximum value of 1 should be sufficient.
|
||||||
*/
|
*/
|
||||||
static int zfs_condense_indirect_commit_entry_delay_ms = 0;
|
static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If an indirect split block contains more than this many possible unique
|
* If an indirect split block contains more than this many possible unique
|
||||||
@ -214,7 +214,7 @@ static int zfs_condense_indirect_commit_entry_delay_ms = 0;
|
|||||||
* copies to participate fairly in the reconstruction when all combinations
|
* copies to participate fairly in the reconstruction when all combinations
|
||||||
* cannot be checked and prevents repeated use of one bad copy.
|
* cannot be checked and prevents repeated use of one bad copy.
|
||||||
*/
|
*/
|
||||||
int zfs_reconstruct_indirect_combinations_max = 4096;
|
uint_t zfs_reconstruct_indirect_combinations_max = 4096;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable to simulate damaged segments and validate reconstruction. This
|
* Enable to simulate damaged segments and validate reconstruction. This
|
||||||
@ -1886,7 +1886,7 @@ EXPORT_SYMBOL(vdev_obsolete_sm_object);
|
|||||||
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
|
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
|
||||||
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
|
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, INT,
|
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
|
||||||
ZMOD_RW,
|
ZMOD_RW,
|
||||||
"Minimum obsolete percent of bytes in the mapping "
|
"Minimum obsolete percent of bytes in the mapping "
|
||||||
"to attempt condensing");
|
"to attempt condensing");
|
||||||
@ -1900,11 +1900,11 @@ ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG,
|
|||||||
"Minimum size obsolete spacemap to attempt condensing");
|
"Minimum size obsolete spacemap to attempt condensing");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
|
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
|
||||||
INT, ZMOD_RW,
|
UINT, ZMOD_RW,
|
||||||
"Used by tests to ensure certain actions happen in the middle of a "
|
"Used by tests to ensure certain actions happen in the middle of a "
|
||||||
"condense. A maximum value of 1 should be sufficient.");
|
"condense. A maximum value of 1 should be sufficient.");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
|
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
|
||||||
INT, ZMOD_RW,
|
UINT, ZMOD_RW,
|
||||||
"Maximum number of combinations when reconstructing split segments");
|
"Maximum number of combinations when reconstructing split segments");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -121,7 +121,7 @@
|
|||||||
* The maximum number of i/os active to each device. Ideally, this will be >=
|
* The maximum number of i/os active to each device. Ideally, this will be >=
|
||||||
* the sum of each queue's max_active.
|
* the sum of each queue's max_active.
|
||||||
*/
|
*/
|
||||||
uint32_t zfs_vdev_max_active = 1000;
|
uint_t zfs_vdev_max_active = 1000;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per-queue limits on the number of i/os active to each device. If the
|
* Per-queue limits on the number of i/os active to each device. If the
|
||||||
@ -141,24 +141,24 @@ uint32_t zfs_vdev_max_active = 1000;
|
|||||||
* more quickly, but reads and writes to have higher latency and lower
|
* more quickly, but reads and writes to have higher latency and lower
|
||||||
* throughput.
|
* throughput.
|
||||||
*/
|
*/
|
||||||
static uint32_t zfs_vdev_sync_read_min_active = 10;
|
static uint_t zfs_vdev_sync_read_min_active = 10;
|
||||||
static uint32_t zfs_vdev_sync_read_max_active = 10;
|
static uint_t zfs_vdev_sync_read_max_active = 10;
|
||||||
static uint32_t zfs_vdev_sync_write_min_active = 10;
|
static uint_t zfs_vdev_sync_write_min_active = 10;
|
||||||
static uint32_t zfs_vdev_sync_write_max_active = 10;
|
static uint_t zfs_vdev_sync_write_max_active = 10;
|
||||||
static uint32_t zfs_vdev_async_read_min_active = 1;
|
static uint_t zfs_vdev_async_read_min_active = 1;
|
||||||
/* */ uint32_t zfs_vdev_async_read_max_active = 3;
|
/* */ uint_t zfs_vdev_async_read_max_active = 3;
|
||||||
static uint32_t zfs_vdev_async_write_min_active = 2;
|
static uint_t zfs_vdev_async_write_min_active = 2;
|
||||||
/* */ uint32_t zfs_vdev_async_write_max_active = 10;
|
/* */ uint_t zfs_vdev_async_write_max_active = 10;
|
||||||
static uint32_t zfs_vdev_scrub_min_active = 1;
|
static uint_t zfs_vdev_scrub_min_active = 1;
|
||||||
static uint32_t zfs_vdev_scrub_max_active = 3;
|
static uint_t zfs_vdev_scrub_max_active = 3;
|
||||||
static uint32_t zfs_vdev_removal_min_active = 1;
|
static uint_t zfs_vdev_removal_min_active = 1;
|
||||||
static uint32_t zfs_vdev_removal_max_active = 2;
|
static uint_t zfs_vdev_removal_max_active = 2;
|
||||||
static uint32_t zfs_vdev_initializing_min_active = 1;
|
static uint_t zfs_vdev_initializing_min_active = 1;
|
||||||
static uint32_t zfs_vdev_initializing_max_active = 1;
|
static uint_t zfs_vdev_initializing_max_active = 1;
|
||||||
static uint32_t zfs_vdev_trim_min_active = 1;
|
static uint_t zfs_vdev_trim_min_active = 1;
|
||||||
static uint32_t zfs_vdev_trim_max_active = 2;
|
static uint_t zfs_vdev_trim_max_active = 2;
|
||||||
static uint32_t zfs_vdev_rebuild_min_active = 1;
|
static uint_t zfs_vdev_rebuild_min_active = 1;
|
||||||
static uint32_t zfs_vdev_rebuild_max_active = 3;
|
static uint_t zfs_vdev_rebuild_max_active = 3;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
|
* When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
|
||||||
@ -167,8 +167,8 @@ static uint32_t zfs_vdev_rebuild_max_active = 3;
|
|||||||
* zfs_vdev_async_write_max_active. The value is linearly interpolated
|
* zfs_vdev_async_write_max_active. The value is linearly interpolated
|
||||||
* between min and max.
|
* between min and max.
|
||||||
*/
|
*/
|
||||||
int zfs_vdev_async_write_active_min_dirty_percent = 30;
|
uint_t zfs_vdev_async_write_active_min_dirty_percent = 30;
|
||||||
int zfs_vdev_async_write_active_max_dirty_percent = 60;
|
uint_t zfs_vdev_async_write_active_max_dirty_percent = 60;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
|
* For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
|
||||||
@ -198,10 +198,10 @@ static uint_t zfs_vdev_nia_credit = 5;
|
|||||||
* we include spans of optional I/Os to aid aggregation at the disk even when
|
* we include spans of optional I/Os to aid aggregation at the disk even when
|
||||||
* they aren't able to help us aggregate at this level.
|
* they aren't able to help us aggregate at this level.
|
||||||
*/
|
*/
|
||||||
static int zfs_vdev_aggregation_limit = 1 << 20;
|
static uint_t zfs_vdev_aggregation_limit = 1 << 20;
|
||||||
static int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE;
|
static uint_t zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE;
|
||||||
static int zfs_vdev_read_gap_limit = 32 << 10;
|
static uint_t zfs_vdev_read_gap_limit = 32 << 10;
|
||||||
static int zfs_vdev_write_gap_limit = 4 << 10;
|
static uint_t zfs_vdev_write_gap_limit = 4 << 10;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the queue depth percentage for each top-level. This percentage is
|
* Define the queue depth percentage for each top-level. This percentage is
|
||||||
@ -214,9 +214,9 @@ static int zfs_vdev_write_gap_limit = 4 << 10;
|
|||||||
* to 30 allocations per device.
|
* to 30 allocations per device.
|
||||||
*/
|
*/
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
int zfs_vdev_queue_depth_pct = 1000;
|
uint_t zfs_vdev_queue_depth_pct = 1000;
|
||||||
#else
|
#else
|
||||||
int zfs_vdev_queue_depth_pct = 300;
|
uint_t zfs_vdev_queue_depth_pct = 300;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -226,14 +226,14 @@ int zfs_vdev_queue_depth_pct = 300;
|
|||||||
* we assume that the average allocation size is 4k, so we need the queue depth
|
* we assume that the average allocation size is 4k, so we need the queue depth
|
||||||
* to be 32 per allocator to get good aggregation of sequential writes.
|
* to be 32 per allocator to get good aggregation of sequential writes.
|
||||||
*/
|
*/
|
||||||
int zfs_vdev_def_queue_depth = 32;
|
uint_t zfs_vdev_def_queue_depth = 32;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow TRIM I/Os to be aggregated. This should normally not be needed since
|
* Allow TRIM I/Os to be aggregated. This should normally not be needed since
|
||||||
* TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
|
* TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
|
||||||
* by the TRIM code in zfs_trim.c.
|
* by the TRIM code in zfs_trim.c.
|
||||||
*/
|
*/
|
||||||
static int zfs_vdev_aggregate_trim = 0;
|
static uint_t zfs_vdev_aggregate_trim = 0;
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vdev_queue_offset_compare(const void *x1, const void *x2)
|
vdev_queue_offset_compare(const void *x1, const void *x2)
|
||||||
@ -281,7 +281,7 @@ vdev_queue_timestamp_compare(const void *x1, const void *x2)
|
|||||||
return (TREE_PCMP(z1, z2));
|
return (TREE_PCMP(z1, z2));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static uint_t
|
||||||
vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p)
|
vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p)
|
||||||
{
|
{
|
||||||
switch (p) {
|
switch (p) {
|
||||||
@ -313,10 +313,10 @@ vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static uint_t
|
||||||
vdev_queue_max_async_writes(spa_t *spa)
|
vdev_queue_max_async_writes(spa_t *spa)
|
||||||
{
|
{
|
||||||
int writes;
|
uint_t writes;
|
||||||
uint64_t dirty = 0;
|
uint64_t dirty = 0;
|
||||||
dsl_pool_t *dp = spa_get_dsl(spa);
|
dsl_pool_t *dp = spa_get_dsl(spa);
|
||||||
uint64_t min_bytes = zfs_dirty_data_max *
|
uint64_t min_bytes = zfs_dirty_data_max *
|
||||||
@ -359,7 +359,7 @@ vdev_queue_max_async_writes(spa_t *spa)
|
|||||||
return (writes);
|
return (writes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static uint_t
|
||||||
vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p)
|
vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p)
|
||||||
{
|
{
|
||||||
switch (p) {
|
switch (p) {
|
||||||
@ -1031,89 +1031,89 @@ vdev_queue_last_offset(vdev_t *vd)
|
|||||||
return (vd->vdev_queue.vq_last_offset);
|
return (vd->vdev_queue.vq_last_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, UINT, ZMOD_RW,
|
||||||
"Max vdev I/O aggregation size");
|
"Max vdev I/O aggregation size");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, INT,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, UINT,
|
||||||
ZMOD_RW, "Max vdev I/O aggregation size for non-rotating media");
|
ZMOD_RW, "Max vdev I/O aggregation size for non-rotating media");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, UINT, ZMOD_RW,
|
||||||
"Allow TRIM I/O to be aggregated");
|
"Allow TRIM I/O to be aggregated");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, UINT, ZMOD_RW,
|
||||||
"Aggregate read I/O over gap");
|
"Aggregate read I/O over gap");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, UINT, ZMOD_RW,
|
||||||
"Aggregate write I/O over gap");
|
"Aggregate write I/O over gap");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, UINT, ZMOD_RW,
|
||||||
"Maximum number of active I/Os per vdev");
|
"Maximum number of active I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent, INT,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent,
|
||||||
ZMOD_RW, "Async write concurrency max threshold");
|
UINT, ZMOD_RW, "Async write concurrency max threshold");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent, INT,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent,
|
||||||
ZMOD_RW, "Async write concurrency min threshold");
|
UINT, ZMOD_RW, "Async write concurrency min threshold");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, UINT, ZMOD_RW,
|
||||||
"Max active async read I/Os per vdev");
|
"Max active async read I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, UINT, ZMOD_RW,
|
||||||
"Min active async read I/Os per vdev");
|
"Min active async read I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, UINT, ZMOD_RW,
|
||||||
"Max active async write I/Os per vdev");
|
"Max active async write I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, UINT, ZMOD_RW,
|
||||||
"Min active async write I/Os per vdev");
|
"Min active async write I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, UINT, ZMOD_RW,
|
||||||
"Max active initializing I/Os per vdev");
|
"Max active initializing I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, UINT, ZMOD_RW,
|
||||||
"Min active initializing I/Os per vdev");
|
"Min active initializing I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, UINT, ZMOD_RW,
|
||||||
"Max active removal I/Os per vdev");
|
"Max active removal I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, UINT, ZMOD_RW,
|
||||||
"Min active removal I/Os per vdev");
|
"Min active removal I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, UINT, ZMOD_RW,
|
||||||
"Max active scrub I/Os per vdev");
|
"Max active scrub I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, UINT, ZMOD_RW,
|
||||||
"Min active scrub I/Os per vdev");
|
"Min active scrub I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, UINT, ZMOD_RW,
|
||||||
"Max active sync read I/Os per vdev");
|
"Max active sync read I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, UINT, ZMOD_RW,
|
||||||
"Min active sync read I/Os per vdev");
|
"Min active sync read I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, UINT, ZMOD_RW,
|
||||||
"Max active sync write I/Os per vdev");
|
"Max active sync write I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, UINT, ZMOD_RW,
|
||||||
"Min active sync write I/Os per vdev");
|
"Min active sync write I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, UINT, ZMOD_RW,
|
||||||
"Max active trim/discard I/Os per vdev");
|
"Max active trim/discard I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, UINT, ZMOD_RW,
|
||||||
"Min active trim/discard I/Os per vdev");
|
"Min active trim/discard I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, UINT, ZMOD_RW,
|
||||||
"Max active rebuild I/Os per vdev");
|
"Max active rebuild I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, UINT, ZMOD_RW,
|
||||||
"Min active rebuild I/Os per vdev");
|
"Min active rebuild I/Os per vdev");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, UINT, ZMOD_RW,
|
||||||
"Number of non-interactive I/Os to allow in sequence");
|
"Number of non-interactive I/Os to allow in sequence");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, UINT, ZMOD_RW,
|
||||||
"Number of non-interactive I/Os before _max_active");
|
"Number of non-interactive I/Os before _max_active");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, UINT, ZMOD_RW,
|
||||||
"Queue depth percentage for each top-level vdev");
|
"Queue depth percentage for each top-level vdev");
|
||||||
|
@ -94,7 +94,7 @@ typedef struct vdev_copy_arg {
|
|||||||
* doing a device removal. This determines how much i/o we can have
|
* doing a device removal. This determines how much i/o we can have
|
||||||
* in flight concurrently.
|
* in flight concurrently.
|
||||||
*/
|
*/
|
||||||
static const int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
|
static const uint_t zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The largest contiguous segment that we will attempt to allocate when
|
* The largest contiguous segment that we will attempt to allocate when
|
||||||
@ -104,7 +104,7 @@ static const int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
|
|||||||
*
|
*
|
||||||
* See also the accessor function spa_remove_max_segment().
|
* See also the accessor function spa_remove_max_segment().
|
||||||
*/
|
*/
|
||||||
int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
|
uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ignore hard IO errors during device removal. When set if a device
|
* Ignore hard IO errors during device removal. When set if a device
|
||||||
@ -130,7 +130,7 @@ static int zfs_removal_ignore_errors = 0;
|
|||||||
* - we'll do larger allocations, which may fail and fall back on smaller
|
* - we'll do larger allocations, which may fail and fall back on smaller
|
||||||
* allocations
|
* allocations
|
||||||
*/
|
*/
|
||||||
int vdev_removal_max_span = 32 * 1024;
|
uint_t vdev_removal_max_span = 32 * 1024;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is used by the test suite so that it can ensure that certain
|
* This is used by the test suite so that it can ensure that certain
|
||||||
@ -2545,14 +2545,14 @@ spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
|
|||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW,
|
||||||
"Ignore hard IO errors when removing device");
|
"Ignore hard IO errors when removing device");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, UINT, ZMOD_RW,
|
||||||
"Largest contiguous segment to allocate when removing device");
|
"Largest contiguous segment to allocate when removing device");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW,
|
||||||
"Largest span of free chunks a remap segment can span");
|
"Largest span of free chunks a remap segment can span");
|
||||||
|
|
||||||
/* BEGIN CSTYLED */
|
/* BEGIN CSTYLED */
|
||||||
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW,
|
||||||
"Pause device removal after this many bytes are copied "
|
"Pause device removal after this many bytes are copied "
|
||||||
"(debug use only - causes removal to hang)");
|
"(debug use only - causes removal to hang)");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
|
@ -6355,7 +6355,7 @@ zfs_ioc_events_next(zfs_cmd_t *zc)
|
|||||||
static int
|
static int
|
||||||
zfs_ioc_events_clear(zfs_cmd_t *zc)
|
zfs_ioc_events_clear(zfs_cmd_t *zc)
|
||||||
{
|
{
|
||||||
int count;
|
uint_t count;
|
||||||
|
|
||||||
zfs_zevent_drain_all(&count);
|
zfs_zevent_drain_all(&count);
|
||||||
zc->zc_cookie = count;
|
zc->zc_cookie = count;
|
||||||
|
@ -90,7 +90,7 @@
|
|||||||
* committed to stable storage. Please refer to the zil_commit_waiter()
|
* committed to stable storage. Please refer to the zil_commit_waiter()
|
||||||
* function (and the comments within it) for more details.
|
* function (and the comments within it) for more details.
|
||||||
*/
|
*/
|
||||||
static int zfs_commit_timeout_pct = 5;
|
static uint_t zfs_commit_timeout_pct = 5;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See zil.h for more information about these fields.
|
* See zil.h for more information about these fields.
|
||||||
@ -1642,7 +1642,7 @@ static const struct {
|
|||||||
* initialized. Otherwise this should not be used directly; see
|
* initialized. Otherwise this should not be used directly; see
|
||||||
* zl_max_block_size instead.
|
* zl_max_block_size instead.
|
||||||
*/
|
*/
|
||||||
static int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
|
static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start a log block write and advance to the next log block.
|
* Start a log block write and advance to the next log block.
|
||||||
@ -3936,7 +3936,7 @@ EXPORT_SYMBOL(zil_sums_init);
|
|||||||
EXPORT_SYMBOL(zil_sums_fini);
|
EXPORT_SYMBOL(zil_sums_fini);
|
||||||
EXPORT_SYMBOL(zil_kstat_values_update);
|
EXPORT_SYMBOL(zil_kstat_values_update);
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW,
|
||||||
"ZIL block open timeout percentage");
|
"ZIL block open timeout percentage");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
|
||||||
@ -3948,5 +3948,5 @@ ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW,
|
||||||
"Limit in bytes slog sync writes per commit");
|
"Limit in bytes slog sync writes per commit");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
|
||||||
"Limit in bytes of ZIL log block size");
|
"Limit in bytes of ZIL log block size");
|
||||||
|
@ -83,7 +83,7 @@ static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Mark IOs as "slow" if they take longer than 30 seconds */
|
/* Mark IOs as "slow" if they take longer than 30 seconds */
|
||||||
static int zio_slow_io_ms = (30 * MILLISEC);
|
static uint_t zio_slow_io_ms = (30 * MILLISEC);
|
||||||
|
|
||||||
#define BP_SPANB(indblkshift, level) \
|
#define BP_SPANB(indblkshift, level) \
|
||||||
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
|
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
|
||||||
@ -114,9 +114,15 @@ static int zio_slow_io_ms = (30 * MILLISEC);
|
|||||||
* fragmented systems, which may have very few free segments of this size,
|
* fragmented systems, which may have very few free segments of this size,
|
||||||
* and may need to load new metaslabs to satisfy 128K allocations.
|
* and may need to load new metaslabs to satisfy 128K allocations.
|
||||||
*/
|
*/
|
||||||
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
|
|
||||||
static int zfs_sync_pass_dont_compress = 8; /* don't compress s. i. t. p. */
|
/* defer frees starting in this pass */
|
||||||
static int zfs_sync_pass_rewrite = 2; /* rewrite new bps s. i. t. p. */
|
uint_t zfs_sync_pass_deferred_free = 2;
|
||||||
|
|
||||||
|
/* don't compress starting in this pass */
|
||||||
|
static uint_t zfs_sync_pass_dont_compress = 8;
|
||||||
|
|
||||||
|
/* rewrite new bps starting in this pass */
|
||||||
|
static uint_t zfs_sync_pass_rewrite = 2;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* An allocating zio is one that either currently has the DVA allocate
|
* An allocating zio is one that either currently has the DVA allocate
|
||||||
@ -1640,7 +1646,7 @@ zio_write_compress(zio_t *zio)
|
|||||||
blkptr_t *bp = zio->io_bp;
|
blkptr_t *bp = zio->io_bp;
|
||||||
uint64_t lsize = zio->io_lsize;
|
uint64_t lsize = zio->io_lsize;
|
||||||
uint64_t psize = zio->io_size;
|
uint64_t psize = zio->io_size;
|
||||||
int pass = 1;
|
uint32_t pass = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If our children haven't all reached the ready stage,
|
* If our children haven't all reached the ready stage,
|
||||||
@ -5051,13 +5057,13 @@ ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
|
|||||||
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
|
||||||
"Prioritize requeued I/O");
|
"Prioritize requeued I/O");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
|
||||||
"Defer frees starting in this pass");
|
"Defer frees starting in this pass");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
|
||||||
"Don't compress starting in this pass");
|
"Don't compress starting in this pass");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
|
||||||
"Rewrite new bps starting in this pass");
|
"Rewrite new bps starting in this pass");
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
|
||||||
|
@ -50,7 +50,7 @@
|
|||||||
#include "lib/zstd.h"
|
#include "lib/zstd.h"
|
||||||
#include "lib/common/zstd_errors.h"
|
#include "lib/common/zstd_errors.h"
|
||||||
|
|
||||||
static int zstd_earlyabort_pass = 1;
|
static uint_t zstd_earlyabort_pass = 1;
|
||||||
static int zstd_cutoff_level = ZIO_ZSTD_LEVEL_3;
|
static int zstd_cutoff_level = ZIO_ZSTD_LEVEL_3;
|
||||||
static unsigned int zstd_abort_size = (128 * 1024);
|
static unsigned int zstd_abort_size = (128 * 1024);
|
||||||
|
|
||||||
@ -897,7 +897,7 @@ module_init(zstd_init);
|
|||||||
module_exit(zstd_fini);
|
module_exit(zstd_fini);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ZFS_MODULE_PARAM(zfs, zstd_, earlyabort_pass, INT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zstd_, earlyabort_pass, UINT, ZMOD_RW,
|
||||||
"Enable early abort attempts when using zstd");
|
"Enable early abort attempts when using zstd");
|
||||||
ZFS_MODULE_PARAM(zfs, zstd_, abort_size, UINT, ZMOD_RW,
|
ZFS_MODULE_PARAM(zfs, zstd_, abort_size, UINT, ZMOD_RW,
|
||||||
"Minimal size of block to attempt early abort");
|
"Minimal size of block to attempt early abort");
|
||||||
|
Loading…
Reference in New Issue
Block a user