diff --git a/cmd/zhack/zhack.c b/cmd/zhack/zhack.c index 96871d2aa..73ce888c0 100644 --- a/cmd/zhack/zhack.c +++ b/cmd/zhack/zhack.c @@ -53,7 +53,6 @@ #include #include -const char cmdname[] = "zhack"; static importargs_t g_importargs; static char *g_pool; static boolean_t g_readonly; @@ -62,9 +61,9 @@ static __attribute__((noreturn)) void usage(void) { (void) fprintf(stderr, - "Usage: %s [-c cachefile] [-d dir] ...\n" + "Usage: zhack [-c cachefile] [-d dir] ...\n" "where is one of the following:\n" - "\n", cmdname); + "\n"); (void) fprintf(stderr, " feature stat \n" @@ -99,10 +98,10 @@ fatal(spa_t *spa, void *tag, const char *fmt, ...) } va_start(ap, fmt); - (void) fprintf(stderr, "%s: ", cmdname); + (void) fputs("zhack: ", stderr); (void) vfprintf(stderr, fmt, ap); va_end(ap); - (void) fprintf(stderr, "\n"); + (void) fputc('\n', stderr); exit(1); } @@ -277,7 +276,7 @@ zhack_do_feature_enable(int argc, char **argv) spa_t *spa; objset_t *mos; zfeature_info_t feature; - spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; + const spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; /* * Features are not added to the pool's label until their refcounts @@ -374,7 +373,7 @@ zhack_do_feature_ref(int argc, char **argv) spa_t *spa; objset_t *mos; zfeature_info_t feature; - spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; + const spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; /* * fi_desc does not matter here because it was written to disk diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index a99f60964..bb2f14298 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -253,7 +253,7 @@ extern uint64_t metaslab_force_ganging; extern uint64_t metaslab_df_alloc_threshold; extern unsigned long zfs_deadman_synctime_ms; extern int metaslab_preload_limit; -extern boolean_t zfs_compressed_arc_enabled; +extern int zfs_compressed_arc_enabled; extern int zfs_abd_scatter_enabled; extern int dmu_object_alloc_chunk_shift; extern boolean_t zfs_force_some_double_word_sm_entries; diff --git a/include/libzfs.h b/include/libzfs.h index e135ae2ee..fe70a5b3a 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -452,7 +452,7 @@ _LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config); */ struct zfs_cmd; -_LIBZFS_H const char *zfs_history_event_names[]; +_LIBZFS_H const char *const zfs_history_event_names[]; typedef enum { VDEV_NAME_PATH = 1 << 0, diff --git a/include/os/freebsd/zfs/sys/freebsd_crypto.h b/include/os/freebsd/zfs/sys/freebsd_crypto.h index e240f5b0d..a3ed41826 100644 --- a/include/os/freebsd/zfs/sys/freebsd_crypto.h +++ b/include/os/freebsd/zfs/sys/freebsd_crypto.h @@ -88,11 +88,11 @@ void crypto_mac_final(struct hmac_ctx *ctx, void *out_data, size_t out_data_size); int freebsd_crypt_newsession(freebsd_crypt_session_t *sessp, - struct zio_crypt_info *, crypto_key_t *); + const struct zio_crypt_info *, crypto_key_t *); void freebsd_crypt_freesession(freebsd_crypt_session_t *sessp); int freebsd_crypt_uio(boolean_t, freebsd_crypt_session_t *, - struct zio_crypt_info *, zfs_uio_t *, crypto_key_t *, uint8_t *, + const struct zio_crypt_info *, zfs_uio_t *, crypto_key_t *, uint8_t *, size_t, size_t); #endif /* _ZFS_FREEBSD_CRYPTO_H */ diff --git a/include/os/freebsd/zfs/sys/zfs_context_os.h b/include/os/freebsd/zfs/sys/zfs_context_os.h index a32eb52c5..867199501 100644 --- a/include/os/freebsd/zfs/sys/zfs_context_os.h +++ b/include/os/freebsd/zfs/sys/zfs_context_os.h @@ -48,7 +48,7 @@ #define cond_resched() kern_yield(PRI_USER) #define taskq_create_sysdc(a, b, d, e, p, dc, f) \ - (taskq_create(a, b, maxclsyspri, d, e, f)) + ((void) sizeof (dc), taskq_create(a, b, maxclsyspri, d, e, f)) #define tsd_create(keyp, destructor) do { \ *(keyp) = osd_thread_register((destructor)); \ diff --git a/include/os/freebsd/zfs/sys/zfs_znode_impl.h b/include/os/freebsd/zfs/sys/zfs_znode_impl.h index 4456046e6..096c9e16d 100644 --- a/include/os/freebsd/zfs/sys/zfs_znode_impl.h +++ b/include/os/freebsd/zfs/sys/zfs_znode_impl.h @@ -175,8 +175,7 @@ extern void zfs_tstamp_update_setup_ext(struct znode *, uint_t, uint64_t [2], uint64_t [2], boolean_t have_tx); extern void zfs_znode_free(struct znode *); -extern zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE]; -extern int zfsfstype; +extern zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE]; extern int zfs_znode_parent_and_name(struct znode *zp, struct znode **dzpp, char *buf); diff --git a/include/os/linux/spl/rpc/xdr.h b/include/os/linux/spl/rpc/xdr.h index c62080a11..1c03a42a9 100644 --- a/include/os/linux/spl/rpc/xdr.h +++ b/include/os/linux/spl/rpc/xdr.h @@ -36,7 +36,8 @@ enum xdr_op { struct xdr_ops; typedef struct { - struct xdr_ops *x_ops; /* Let caller know xdrmem_create() succeeds */ + const struct xdr_ops *x_ops; + /* Let caller know xdrmem_create() succeeds */ caddr_t x_addr; /* Current buffer addr */ caddr_t x_addr_end; /* End of the buffer */ enum xdr_op x_op; /* Stream direction */ diff --git a/include/os/linux/spl/sys/sysmacros.h b/include/os/linux/spl/sys/sysmacros.h index 98d1ab1d7..be1f77e43 100644 --- a/include/os/linux/spl/sys/sysmacros.h +++ b/include/os/linux/spl/sys/sysmacros.h @@ -113,7 +113,6 @@ #endif /* Missing globals */ -extern char spl_gitrev[64]; extern unsigned long spl_hostid; /* Missing misc functions */ diff --git a/include/os/linux/spl/sys/taskq.h b/include/os/linux/spl/sys/taskq.h index b50175a10..2a6cd8283 100644 --- a/include/os/linux/spl/sys/taskq.h +++ b/include/os/linux/spl/sys/taskq.h @@ -160,7 +160,8 @@ extern taskq_t *taskq_of_curthread(void); #define taskq_create_proc(name, nthreads, pri, min, max, proc, flags) \ taskq_create(name, nthreads, pri, min, max, flags) #define taskq_create_sysdc(name, nthreads, min, max, proc, dc, flags) \ - taskq_create(name, nthreads, maxclsyspri, min, max, flags) + ((void) sizeof (dc), \ + taskq_create(name, nthreads, maxclsyspri, min, max, flags)) int spl_taskq_init(void); void spl_taskq_fini(void); diff --git a/include/os/linux/zfs/sys/zfs_znode_impl.h b/include/os/linux/zfs/sys/zfs_znode_impl.h index de46fc8f2..2e1bd8576 100644 --- a/include/os/linux/zfs/sys/zfs_znode_impl.h +++ b/include/os/linux/zfs/sys/zfs_znode_impl.h @@ -173,8 +173,7 @@ extern caddr_t zfs_map_page(page_t *, enum seg_rw); extern void zfs_unmap_page(page_t *, caddr_t); #endif /* HAVE_UIO_RW */ -extern zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE]; -extern int zfsfstype; +extern zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE]; #ifdef __cplusplus } diff --git a/include/os/linux/zfs/sys/zpl.h b/include/os/linux/zfs/sys/zpl.h index ff86e027b..afb16e5c7 100644 --- a/include/os/linux/zfs/sys/zpl.h +++ b/include/os/linux/zfs/sys/zpl.h @@ -99,7 +99,6 @@ extern const struct inode_operations zpl_ops_root; extern const struct file_operations zpl_fops_snapdir; extern const struct inode_operations zpl_ops_snapdir; -extern const struct dentry_operations zpl_dops_snapdirs; extern const struct file_operations zpl_fops_shares; extern const struct inode_operations zpl_ops_shares; diff --git a/include/sys/crypto/api.h b/include/sys/crypto/api.h index 8aecfeaff..17c9a6459 100644 --- a/include/sys/crypto/api.h +++ b/include/sys/crypto/api.h @@ -58,7 +58,7 @@ typedef struct { */ #define CRYPTO_MECH_INVALID ((uint64_t)-1) -extern crypto_mech_type_t crypto_mech2id(char *name); +extern crypto_mech_type_t crypto_mech2id(const char *name); /* * Create and destroy context templates. diff --git a/include/sys/dmu.h b/include/sys/dmu.h index b9e6731bd..1ddff0d4e 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -862,7 +862,6 @@ int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf, dmu_tx_t *tx); #define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf -extern int zfs_prefetch_disable; extern int zfs_max_recordsize; /* diff --git a/include/sys/dmu_recv.h b/include/sys/dmu_recv.h index 7188b2a02..1fdb986e2 100644 --- a/include/sys/dmu_recv.h +++ b/include/sys/dmu_recv.h @@ -35,7 +35,7 @@ #include #include -extern const char *recv_clone_name; +extern const char *const recv_clone_name; typedef struct dmu_recv_cookie { struct dsl_dataset *drc_ds; diff --git a/include/sys/dsl_dataset.h b/include/sys/dsl_dataset.h index 3c9199b86..29bbf7e18 100644 --- a/include/sys/dsl_dataset.h +++ b/include/sys/dsl_dataset.h @@ -45,7 +45,6 @@ extern "C" { #endif -extern int zfs_allow_redacted_dataset_mount; struct dsl_dataset; struct dsl_dir; struct dsl_pool; @@ -441,8 +440,8 @@ int dsl_dataset_set_compression(const char *dsname, zprop_source_t source, boolean_t dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier, uint64_t earlier_txg); -void dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag); -void dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag); +void dsl_dataset_long_hold(dsl_dataset_t *ds, const void *tag); +void dsl_dataset_long_rele(dsl_dataset_t *ds, const void *tag); boolean_t dsl_dataset_long_held(dsl_dataset_t *ds); int dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone, diff --git a/include/sys/dsl_pool.h b/include/sys/dsl_pool.h index 44900f8ce..0283a8c58 100644 --- a/include/sys/dsl_pool.h +++ b/include/sys/dsl_pool.h @@ -60,7 +60,6 @@ struct dsl_deadlist; extern unsigned long zfs_dirty_data_max; extern unsigned long zfs_dirty_data_max_max; extern unsigned long zfs_wrlog_data_max; -extern int zfs_dirty_data_sync_percent; extern int zfs_dirty_data_max_percent; extern int zfs_dirty_data_max_max_percent; extern int zfs_delay_min_dirty_percent; diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index 287b3beae..6bc82198d 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -206,7 +206,8 @@ typedef enum { ZFS_NUM_USERQUOTA_PROPS } zfs_userquota_prop_t; -_SYS_FS_ZFS_H const char *zfs_userquota_prop_prefixes[ZFS_NUM_USERQUOTA_PROPS]; +_SYS_FS_ZFS_H const char *const zfs_userquota_prop_prefixes[ + ZFS_NUM_USERQUOTA_PROPS]; /* * Pool properties are identified by these constants and must be added to the diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h index ecff65f13..129a68be4 100644 --- a/include/sys/metaslab.h +++ b/include/sys/metaslab.h @@ -43,7 +43,7 @@ typedef struct metaslab_ops { } metaslab_ops_t; -extern metaslab_ops_t *zfs_metaslab_ops; +extern const metaslab_ops_t zfs_metaslab_ops; int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t, metaslab_t **); @@ -101,7 +101,7 @@ void metaslab_stat_fini(void); void metaslab_trace_init(zio_alloc_list_t *); void metaslab_trace_fini(zio_alloc_list_t *); -metaslab_class_t *metaslab_class_create(spa_t *, metaslab_ops_t *); +metaslab_class_t *metaslab_class_create(spa_t *, const metaslab_ops_t *); void metaslab_class_destroy(metaslab_class_t *); int metaslab_class_validate(metaslab_class_t *); void metaslab_class_histogram_verify(metaslab_class_t *); diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h index adf4c03a2..3dbee4c17 100644 --- a/include/sys/metaslab_impl.h +++ b/include/sys/metaslab_impl.h @@ -179,7 +179,7 @@ typedef struct metaslab_class_allocator { struct metaslab_class { kmutex_t mc_lock; spa_t *mc_spa; - metaslab_ops_t *mc_ops; + const metaslab_ops_t *mc_ops; /* * Track the number of metaslab groups that have been initialized diff --git a/include/sys/nvpair.h b/include/sys/nvpair.h index 340b0d79c..81494b62d 100644 --- a/include/sys/nvpair.h +++ b/include/sys/nvpair.h @@ -135,12 +135,12 @@ struct nv_alloc_ops { void (*nv_ao_reset)(nv_alloc_t *); }; -_SYS_NVPAIR_H const nv_alloc_ops_t *nv_fixed_ops; -_SYS_NVPAIR_H nv_alloc_t *nv_alloc_nosleep; +_SYS_NVPAIR_H const nv_alloc_ops_t *const nv_fixed_ops; +_SYS_NVPAIR_H nv_alloc_t *const nv_alloc_nosleep; #if defined(_KERNEL) -_SYS_NVPAIR_H nv_alloc_t *nv_alloc_sleep; -_SYS_NVPAIR_H nv_alloc_t *nv_alloc_pushpage; +_SYS_NVPAIR_H nv_alloc_t *const nv_alloc_sleep; +_SYS_NVPAIR_H nv_alloc_t *const nv_alloc_pushpage; #endif _SYS_NVPAIR_H int nv_alloc_init(nv_alloc_t *, const nv_alloc_ops_t *, diff --git a/include/sys/range_tree.h b/include/sys/range_tree.h index fef3d4d7b..895d80257 100644 --- a/include/sys/range_tree.h +++ b/include/sys/range_tree.h @@ -63,7 +63,7 @@ typedef struct range_tree { */ uint8_t rt_shift; uint64_t rt_start; - range_tree_ops_t *rt_ops; + const range_tree_ops_t *rt_ops; /* rt_btree_compare should only be set if rt_arg is a b-tree */ void *rt_arg; @@ -278,11 +278,11 @@ rs_set_fill(range_seg_t *rs, range_tree_t *rt, uint64_t fill) typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size); -range_tree_t *range_tree_create_impl(range_tree_ops_t *ops, +range_tree_t *range_tree_create_impl(const range_tree_ops_t *ops, range_seg_type_t type, void *arg, uint64_t start, uint64_t shift, int (*zfs_btree_compare) (const void *, const void *), uint64_t gap); -range_tree_t *range_tree_create(range_tree_ops_t *ops, range_seg_type_t type, - void *arg, uint64_t start, uint64_t shift); +range_tree_t *range_tree_create(const range_tree_ops_t *ops, + range_seg_type_t type, void *arg, uint64_t start, uint64_t shift); void range_tree_destroy(range_tree_t *rt); boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size); range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size); @@ -321,7 +321,7 @@ void rt_btree_destroy(range_tree_t *rt, void *arg); void rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg); void rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg); void rt_btree_vacate(range_tree_t *rt, void *arg); -extern range_tree_ops_t rt_btree_ops; +extern const range_tree_ops_t rt_btree_ops; #ifdef __cplusplus } diff --git a/include/sys/sa.h b/include/sys/sa.h index 98eb8f9cd..32f6bd0cc 100644 --- a/include/sys/sa.h +++ b/include/sys/sa.h @@ -143,7 +143,8 @@ uint64_t sa_handle_object(sa_handle_t *); boolean_t sa_attr_would_spill(sa_handle_t *, sa_attr_type_t, int size); void sa_spill_rele(sa_handle_t *); void sa_register_update_callback(objset_t *, sa_update_cb_t *); -int sa_setup(objset_t *, uint64_t, sa_attr_reg_t *, int, sa_attr_type_t **); +int sa_setup(objset_t *, uint64_t, const sa_attr_reg_t *, int, + sa_attr_type_t **); void sa_tear_down(objset_t *); int sa_replace_all_by_template(sa_handle_t *, sa_bulk_attr_t *, int, dmu_tx_t *); diff --git a/include/sys/spa.h b/include/sys/spa.h index 2e365eabe..896b0f956 100644 --- a/include/sys/spa.h +++ b/include/sys/spa.h @@ -1208,6 +1208,9 @@ extern unsigned long zfs_deadman_synctime_ms; extern unsigned long zfs_deadman_ziotime_ms; extern unsigned long zfs_deadman_checktime_ms; +extern kmem_cache_t *zio_buf_cache[]; +extern kmem_cache_t *zio_data_buf_cache[]; + #ifdef __cplusplus } #endif diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h index eee4783fe..9946c4e3c 100644 --- a/include/sys/spa_impl.h +++ b/include/sys/spa_impl.h @@ -444,7 +444,7 @@ struct spa { }; extern char *spa_config_path; -extern char *zfs_deadman_failmode; +extern const char *zfs_deadman_failmode; extern int spa_slop_shift; extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent); diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h index 86959725a..1567182a7 100644 --- a/include/sys/vdev_impl.h +++ b/include/sys/vdev_impl.h @@ -626,8 +626,6 @@ extern uint64_t vdev_get_ndisks(vdev_t *vd); * Global variables */ extern int zfs_vdev_standard_sm_blksz; -/* zdb uses this tunable, so it must be declared here to make lint happy. */ -extern int zfs_vdev_cache_size; /* * Functions from vdev_indirect.c diff --git a/include/sys/vdev_raidz_impl.h b/include/sys/vdev_raidz_impl.h index 908723da0..890e725e1 100644 --- a/include/sys/vdev_raidz_impl.h +++ b/include/sys/vdev_raidz_impl.h @@ -70,8 +70,8 @@ enum raidz_rec_op { RAIDZ_REC_NUM = 7 }; -extern const char *raidz_gen_name[RAIDZ_GEN_NUM]; -extern const char *raidz_rec_name[RAIDZ_REC_NUM]; +extern const char *const raidz_gen_name[RAIDZ_GEN_NUM]; +extern const char *const raidz_rec_name[RAIDZ_REC_NUM]; /* * Methods used to define raidz implementation diff --git a/include/sys/zfs_acl.h b/include/sys/zfs_acl.h index 010686a91..98387a49a 100644 --- a/include/sys/zfs_acl.h +++ b/include/sys/zfs_acl.h @@ -166,7 +166,7 @@ typedef struct zfs_acl { uint64_t z_hints; /* ACL hints (ZFS_INHERIT_ACE ...) */ zfs_acl_node_t *z_curr_node; /* current node iterator is handling */ list_t z_acl; /* chunks of ACE data */ - acl_ops_t *z_ops; /* ACL operations */ + const acl_ops_t *z_ops; /* ACL operations */ } zfs_acl_t; typedef struct acl_locator_cb { diff --git a/include/sys/zfs_context.h b/include/sys/zfs_context.h index b1df9f3f3..6d1fd83df 100644 --- a/include/sys/zfs_context.h +++ b/include/sys/zfs_context.h @@ -492,7 +492,7 @@ extern taskq_t *taskq_create(const char *, int, pri_t, int, int, uint_t); #define taskq_create_proc(a, b, c, d, e, p, f) \ (taskq_create(a, b, c, d, e, f)) #define taskq_create_sysdc(a, b, d, e, p, dc, f) \ - (taskq_create(a, b, maxclsyspri, d, e, f)) + ((void) sizeof (dc), taskq_create(a, b, maxclsyspri, d, e, f)) extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t); extern taskqid_t taskq_dispatch_delay(taskq_t *, task_func_t, void *, uint_t, clock_t); diff --git a/include/sys/zfs_ioctl_impl.h b/include/sys/zfs_ioctl_impl.h index 3db67ae98..f9e4f6e6c 100644 --- a/include/sys/zfs_ioctl_impl.h +++ b/include/sys/zfs_ioctl_impl.h @@ -24,7 +24,6 @@ #define _ZFS_IOCTL_IMPL_H_ extern kmutex_t zfsdev_state_lock; -extern zfsdev_state_t *zfsdev_state_list; extern unsigned long zfs_max_nvlist_src_size; typedef int zfs_ioc_legacy_func_t(zfs_cmd_t *); diff --git a/include/sys/zfs_sa.h b/include/sys/zfs_sa.h index 1ca7ced33..a0c383807 100644 --- a/include/sys/zfs_sa.h +++ b/include/sys/zfs_sa.h @@ -91,8 +91,7 @@ typedef enum zpl_attr { #define SA_FLAGS_OFFSET 48 #define SA_PROJID_OFFSET 128 -extern sa_attr_reg_t zfs_attr_table[ZPL_END + 1]; -extern sa_attr_reg_t zfs_legacy_attr_table[ZPL_END + 1]; +extern const sa_attr_reg_t zfs_attr_table[ZPL_END + 1]; /* * This is a deprecated data structure that only exists for diff --git a/include/sys/zil.h b/include/sys/zil.h index cefbccb32..8e5a49da2 100644 --- a/include/sys/zil.h +++ b/include/sys/zil.h @@ -456,8 +456,6 @@ typedef struct zil_stats { kstat_named_t zil_itx_metaslab_slog_bytes; } zil_stats_t; -extern zil_stats_t zil_stats; - #define ZIL_STAT_INCR(stat, val) \ atomic_add_64(&zil_stats.stat.value.ui64, (val)); #define ZIL_STAT_BUMP(stat) \ @@ -485,7 +483,7 @@ extern zilog_t *zil_open(objset_t *os, zil_get_data_t *get_data); extern void zil_close(zilog_t *zilog); extern void zil_replay(objset_t *os, void *arg, - zil_replay_func_t *replay_func[TX_MAX_TYPE]); + zil_replay_func_t *const replay_func[TX_MAX_TYPE]); extern boolean_t zil_replaying(zilog_t *zilog, dmu_tx_t *tx); extern void zil_destroy(zilog_t *zilog, boolean_t keep_first); extern void zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx); diff --git a/include/sys/zio.h b/include/sys/zio.h index 121b58dea..07135d1e2 100644 --- a/include/sys/zio.h +++ b/include/sys/zio.h @@ -264,7 +264,7 @@ typedef void zio_done_func_t(zio_t *zio); extern int zio_exclude_metadata; extern int zio_dva_throttle_enabled; -extern const char *zio_type_name[ZIO_TYPES]; +extern const char *const zio_type_name[ZIO_TYPES]; /* * A bookmark is a four-tuple that uniquely diff --git a/include/sys/zio_crypt.h b/include/sys/zio_crypt.h index d7a63d695..f1edd76f0 100644 --- a/include/sys/zio_crypt.h +++ b/include/sys/zio_crypt.h @@ -70,7 +70,7 @@ typedef struct zio_crypt_info { char *ci_name; } zio_crypt_info_t; -extern zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS]; +extern const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS]; /* in memory representation of an unwrapped key that is loaded into memory */ typedef struct zio_crypt_key { diff --git a/include/sys/zvol_impl.h b/include/sys/zvol_impl.h index 89fe59800..223393de7 100644 --- a/include/sys/zvol_impl.h +++ b/include/sys/zvol_impl.h @@ -61,12 +61,11 @@ typedef struct zvol_state { } zvol_state_t; -extern list_t zvol_state_list; extern krwlock_t zvol_state_lock; #define ZVOL_HT_SIZE 1024 extern struct hlist_head *zvol_htable; #define ZVOL_HT_HEAD(hash) (&zvol_htable[(hash) & (ZVOL_HT_SIZE-1)]) -extern zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE]; +extern zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE]; extern unsigned int zvol_volmode; extern unsigned int zvol_inhibit_dev; diff --git a/include/zfs_comutil.h b/include/zfs_comutil.h index 3e4716224..ea2da4a15 100644 --- a/include/zfs_comutil.h +++ b/include/zfs_comutil.h @@ -43,7 +43,7 @@ _ZFS_COMUTIL_H int zfs_spa_version_map(int zpl_version); _ZFS_COMUTIL_H boolean_t zfs_dataset_name_hidden(const char *); #define ZFS_NUM_LEGACY_HISTORY_EVENTS 41 -_ZFS_COMUTIL_H const char * +_ZFS_COMUTIL_H const char *const zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS]; #ifdef __cplusplus diff --git a/include/zfs_deleg.h b/include/zfs_deleg.h index 1ae08850f..77f64786b 100644 --- a/include/zfs_deleg.h +++ b/include/zfs_deleg.h @@ -85,7 +85,7 @@ typedef struct zfs_deleg_perm_tab { zfs_deleg_note_t z_note; } zfs_deleg_perm_tab_t; -_ZFS_DELEG_H zfs_deleg_perm_tab_t zfs_deleg_perm_tab[]; +_ZFS_DELEG_H const zfs_deleg_perm_tab_t zfs_deleg_perm_tab[]; _ZFS_DELEG_H int zfs_deleg_verify_nvlist(nvlist_t *nvlist); _ZFS_DELEG_H void zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type, diff --git a/lib/libnvpair/libnvpair.abi b/lib/libnvpair/libnvpair.abi index 4a001c3dc..01be5785a 100644 --- a/lib/libnvpair/libnvpair.abi +++ b/lib/libnvpair/libnvpair.abi @@ -3,8 +3,6 @@ - - @@ -1332,7 +1330,8 @@ - + + @@ -1342,6 +1341,10 @@ + + + + @@ -1354,6 +1357,7 @@ + @@ -1775,16 +1779,17 @@ + - + - + - + @@ -1821,18 +1826,26 @@ + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - @@ -1901,16 +1914,16 @@ - + - + - + - + @@ -1922,30 +1935,13 @@ - - - - - - - - - - - - - - - - - - - + + @@ -2014,6 +2010,7 @@ + @@ -2022,6 +2019,10 @@ + + + + @@ -2710,7 +2711,6 @@ - @@ -2766,10 +2766,11 @@ + - + diff --git a/lib/libnvpair/nvpair_alloc_system.c b/lib/libnvpair/nvpair_alloc_system.c index 9771f58f6..59806ea4d 100644 --- a/lib/libnvpair/nvpair_alloc_system.c +++ b/lib/libnvpair/nvpair_alloc_system.c @@ -51,15 +51,15 @@ static const nv_alloc_ops_t system_ops = { NULL /* nv_ao_reset() */ }; -nv_alloc_t nv_alloc_sleep_def = { +static nv_alloc_t nv_alloc_sleep_def = { &system_ops, (void *)KM_SLEEP }; -nv_alloc_t nv_alloc_nosleep_def = { +static nv_alloc_t nv_alloc_nosleep_def = { &system_ops, (void *)KM_NOSLEEP }; -nv_alloc_t *nv_alloc_sleep = &nv_alloc_sleep_def; -nv_alloc_t *nv_alloc_nosleep = &nv_alloc_nosleep_def; +nv_alloc_t *const nv_alloc_sleep = &nv_alloc_sleep_def; +nv_alloc_t *const nv_alloc_nosleep = &nv_alloc_nosleep_def; diff --git a/lib/libspl/include/umem.h b/lib/libspl/include/umem.h index 65f12595e..eee0dc975 100644 --- a/lib/libspl/include/umem.h +++ b/lib/libspl/include/umem.h @@ -129,9 +129,9 @@ umem_zalloc(size_t size, int flags) } static inline void -umem_free(void *ptr, size_t size __maybe_unused) +umem_free(const void *ptr, size_t size __maybe_unused) { - free(ptr); + free((void *)ptr); } static inline void diff --git a/lib/libzfs/libzfs.abi b/lib/libzfs/libzfs.abi index 1defa9c24..b682f4f70 100644 --- a/lib/libzfs/libzfs.abi +++ b/lib/libzfs/libzfs.abi @@ -10,14 +10,11 @@ - - - @@ -1401,6 +1398,9 @@ + + + @@ -1421,18 +1421,66 @@ - - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1501,16 +1549,16 @@ - + - + - + - + @@ -1522,65 +1570,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1591,10 +1580,15 @@ + + + + + @@ -1996,7 +1990,7 @@ - + @@ -2014,8 +2008,9 @@ + - + @@ -2044,7 +2039,7 @@ - + @@ -2107,7 +2102,8 @@ - + + @@ -2465,7 +2461,7 @@ - + @@ -2542,7 +2538,7 @@ - + @@ -2923,6 +2919,9 @@ + + + @@ -3165,16 +3164,17 @@ + - + - + - + @@ -3228,15 +3228,6 @@ - - - - - - - - - @@ -3263,6 +3254,15 @@ + + + + + + + + + @@ -3280,6 +3280,7 @@ + @@ -3287,6 +3288,7 @@ + @@ -4191,10 +4193,6 @@ - - - - @@ -5639,7 +5637,7 @@ - + diff --git a/module/avl/avl.c b/module/avl/avl.c index e53e2fa5d..c86dc9835 100644 --- a/module/avl/avl.c +++ b/module/avl/avl.c @@ -119,7 +119,7 @@ * additional memory reference. Since the translation arrays are both very * small the data should remain efficiently in cache. */ -static const int avl_child2balance[2] = {-1, 1}; +static const int avl_child2balance[] = {-1, 1}; static const int avl_balance2child[] = {0, 0, 1}; diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c index a5b88b8aa..c238bee21 100644 --- a/module/icp/algs/aes/aes_impl.c +++ b/module/icp/algs/aes/aes_impl.c @@ -225,7 +225,7 @@ static aes_impl_ops_t aes_fastest_impl = { }; /* All compiled in implementations */ -const aes_impl_ops_t *aes_all_impl[] = { +static const aes_impl_ops_t *aes_all_impl[] = { &aes_generic_impl, #if defined(__x86_64) &aes_x86_64_impl, diff --git a/module/icp/algs/edonr/edonr_byteorder.h b/module/icp/algs/edonr/edonr_byteorder.h index 2b5d48287..cd35e5e4c 100644 --- a/module/icp/algs/edonr/edonr_byteorder.h +++ b/module/icp/algs/edonr/edonr_byteorder.h @@ -61,7 +61,7 @@ #endif /* !MACHINE_IS_BIG_ENDIAN && !MACHINE_IS_LITTLE_ENDIAN */ #if !defined(MACHINE_IS_BIG_ENDIAN) && !defined(MACHINE_IS_LITTLE_ENDIAN) -#error unknown machine byte sex +#error unknown machine byte order #endif #define BYTEORDER_INCLUDED diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c index 3a1660d93..d9796cd0e 100644 --- a/module/icp/algs/modes/gcm.c +++ b/module/icp/algs/modes/gcm.c @@ -779,7 +779,7 @@ static gcm_impl_ops_t gcm_fastest_impl = { }; /* All compiled in implementations */ -const gcm_impl_ops_t *gcm_all_impl[] = { +static const gcm_impl_ops_t *gcm_all_impl[] = { &gcm_generic_impl, #if defined(__x86_64) && defined(HAVE_PCLMULQDQ) &gcm_pclmulqdq_impl, @@ -1046,9 +1046,6 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation."); #define GCM_AVX_MAX_CHUNK_SIZE \ (((128*1024)/GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES) -/* Get the chunk size module parameter. */ -#define GCM_CHUNK_SIZE_READ *(volatile uint32_t *) &gcm_avx_chunk_size - /* Clear the FPU registers since they hold sensitive internal state. */ #define clear_fpu_regs() clear_fpu_regs_avx() #define GHASH_AVX(ctx, in, len) \ @@ -1057,6 +1054,9 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation."); #define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1) +/* Get the chunk size module parameter. */ +#define GCM_CHUNK_SIZE_READ *(volatile uint32_t *) &gcm_avx_chunk_size + /* * Module parameter: number of bytes to process at once while owning the FPU. * Rounded down to the next GCM_AVX_MIN_DECRYPT_BYTES byte boundary and is diff --git a/module/icp/algs/sha2/sha2.c b/module/icp/algs/sha2/sha2.c index 75f6a3c1a..6f1e9b719 100644 --- a/module/icp/algs/sha2/sha2.c +++ b/module/icp/algs/sha2/sha2.c @@ -65,7 +65,7 @@ static void SHA256Transform(SHA2_CTX *, const uint8_t *); static void SHA512Transform(SHA2_CTX *, const uint8_t *); #endif /* __amd64 && _KERNEL */ -static uint8_t PADDING[128] = { 0x80, /* all zeros */ }; +static const uint8_t PADDING[128] = { 0x80, /* all zeros */ }; /* * The low-level checksum routines use a lot of stack space. On systems where diff --git a/module/icp/algs/skein/skein_impl.h b/module/icp/algs/skein/skein_impl.h index 2f6307fa7..1fa249e95 100644 --- a/module/icp/algs/skein/skein_impl.h +++ b/module/icp/algs/skein/skein_impl.h @@ -263,8 +263,6 @@ extern const uint64_t SKEIN_256_IV_128[]; extern const uint64_t SKEIN_256_IV_160[]; extern const uint64_t SKEIN_256_IV_224[]; extern const uint64_t SKEIN_256_IV_256[]; -extern const uint64_t SKEIN_512_IV_128[]; -extern const uint64_t SKEIN_512_IV_160[]; extern const uint64_t SKEIN_512_IV_224[]; extern const uint64_t SKEIN_512_IV_256[]; extern const uint64_t SKEIN_512_IV_384[]; diff --git a/module/icp/algs/skein/skein_iv.c b/module/icp/algs/skein/skein_iv.c index 140d38f76..84cefe454 100644 --- a/module/icp/algs/skein/skein_iv.c +++ b/module/icp/algs/skein/skein_iv.c @@ -52,30 +52,6 @@ const uint64_t SKEIN_256_IV_256[] = { MK_64(0x6A54E920, 0xFDE8DA69) }; -/* blkSize = 512 bits. hashSize = 128 bits */ -const uint64_t SKEIN_512_IV_128[] = { - MK_64(0xA8BC7BF3, 0x6FBF9F52), - MK_64(0x1E9872CE, 0xBD1AF0AA), - MK_64(0x309B1790, 0xB32190D3), - MK_64(0xBCFBB854, 0x3F94805C), - MK_64(0x0DA61BCD, 0x6E31B11B), - MK_64(0x1A18EBEA, 0xD46A32E3), - MK_64(0xA2CC5B18, 0xCE84AA82), - MK_64(0x6982AB28, 0x9D46982D) -}; - -/* blkSize = 512 bits. hashSize = 160 bits */ -const uint64_t SKEIN_512_IV_160[] = { - MK_64(0x28B81A2A, 0xE013BD91), - MK_64(0xC2F11668, 0xB5BDF78F), - MK_64(0x1760D8F3, 0xF6A56F12), - MK_64(0x4FB74758, 0x8239904F), - MK_64(0x21EDE07F, 0x7EAF5056), - MK_64(0xD908922E, 0x63ED70B8), - MK_64(0xB8EC76FF, 0xECCB52FA), - MK_64(0x01A47BB8, 0xA3F27A6E) -}; - /* blkSize = 512 bits. hashSize = 224 bits */ const uint64_t SKEIN_512_IV_224[] = { MK_64(0xCCD06162, 0x48677224), diff --git a/module/icp/api/kcf_miscapi.c b/module/icp/api/kcf_miscapi.c index c0f415b26..5c0d60391 100644 --- a/module/icp/api/kcf_miscapi.c +++ b/module/icp/api/kcf_miscapi.c @@ -62,7 +62,7 @@ static kcf_ntfy_elem_t *ntfy_list_head; * CRYPTO_MECH_INVALID otherwise. */ crypto_mech_type_t -crypto_mech2id(char *mechname) +crypto_mech2id(const char *mechname) { return (crypto_mech2id_common(mechname, B_TRUE)); } diff --git a/module/icp/asm-x86_64/aes/aes_amd64.S b/module/icp/asm-x86_64/aes/aes_amd64.S index 272720e51..d16cc9996 100644 --- a/module/icp/asm-x86_64/aes/aes_amd64.S +++ b/module/icp/asm-x86_64/aes/aes_amd64.S @@ -693,7 +693,7 @@ aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4], * int aes_encrypt(const unsigned char *in, * unsigned char *out, const aes_encrypt_ctx cx[1])/ */ -.data +.section .rodata .align 64 enc_tab: enc_vals(u8) @@ -798,7 +798,7 @@ ENTRY_NP(aes_encrypt_amd64) * int aes_decrypt(const unsigned char *in, * unsigned char *out, const aes_encrypt_ctx cx[1])/ */ -.data +.section .rodata .align 64 dec_tab: dec_vals(v8) diff --git a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S index 0e1e04b78..74eacbbe6 100644 --- a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S +++ b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S @@ -101,7 +101,7 @@ gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) { // static uint8_t byte_swap16_mask[] = { // 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 }; -.data +.section .rodata .align XMM_ALIGN .Lbyte_swap16_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S index ccd4a3e6b..951297c72 100644 --- a/module/icp/asm-x86_64/sha2/sha256_impl.S +++ b/module/icp/asm-x86_64/sha2/sha256_impl.S @@ -2062,7 +2062,7 @@ ENTRY_NP(SHA256TransformBlocks) .cfi_endproc SET_SIZE(SHA256TransformBlocks) -.data +.section .rodata .align 64 .type K256,@object K256: diff --git a/module/icp/asm-x86_64/sha2/sha512_impl.S b/module/icp/asm-x86_64/sha2/sha512_impl.S index c6e7efd86..921d3d8cd 100644 --- a/module/icp/asm-x86_64/sha2/sha512_impl.S +++ b/module/icp/asm-x86_64/sha2/sha512_impl.S @@ -2063,7 +2063,7 @@ ENTRY_NP(SHA512TransformBlocks) .cfi_endproc SET_SIZE(SHA512TransformBlocks) -.data +.section .rodata .align 64 .type K512,@object K512: diff --git a/module/icp/core/kcf_callprov.c b/module/icp/core/kcf_callprov.c index fd2f7e1aa..345014d0a 100644 --- a/module/icp/core/kcf_callprov.c +++ b/module/icp/core/kcf_callprov.c @@ -116,7 +116,7 @@ kcf_get_hardware_provider(crypto_mech_type_t mech_type_1, kcf_provider_list_t *p; kcf_ops_class_t class; kcf_mech_entry_t *me; - kcf_mech_entry_tab_t *me_tab; + const kcf_mech_entry_tab_t *me_tab; int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; /* get the mech entry for the specified mechanism */ @@ -258,7 +258,7 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, kcf_ops_class_t class; int index; kcf_mech_entry_t *me; - kcf_mech_entry_tab_t *me_tab; + const kcf_mech_entry_tab_t *me_tab; class = KCF_MECH2CLASS(mech_type); if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 461b7946c..60055e78a 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -82,14 +82,14 @@ /* RFE 4687834 Will deal with the extensibility of these tables later */ -kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST]; -kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER]; -kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC]; -kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN]; -kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS]; -kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC]; +static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST]; +static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER]; +static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC]; +static kcf_mech_entry_t kcf_sign_mechs_tab[KCF_MAXSIGN]; +static kcf_mech_entry_t kcf_keyops_mechs_tab[KCF_MAXKEYOPS]; +static kcf_mech_entry_t kcf_misc_mechs_tab[KCF_MAXMISC]; -kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { +const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { {0, NULL}, /* No class zero */ {KCF_MAXDIGEST, kcf_digest_mechs_tab}, {KCF_MAXCIPHER, kcf_cipher_mechs_tab}, @@ -108,22 +108,22 @@ kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { * There is room for refinement here. * */ -int kcf_md5_threshold = 512; -int kcf_sha1_threshold = 512; -int kcf_des_threshold = 512; -int kcf_des3_threshold = 512; -int kcf_aes_threshold = 512; -int kcf_bf_threshold = 512; -int kcf_rc4_threshold = 512; +static const int kcf_md5_threshold = 512; +static const int kcf_sha1_threshold = 512; +static const int kcf_des_threshold = 512; +static const int kcf_des3_threshold = 512; +static const int kcf_aes_threshold = 512; +static const int kcf_bf_threshold = 512; +static const int kcf_rc4_threshold = 512; -kmutex_t kcf_mech_tabs_lock; +static kmutex_t kcf_mech_tabs_lock; static uint32_t kcf_gen_swprov = 0; -int kcf_mech_hash_size = 256; -mod_hash_t *kcf_mech_hash; /* mech name to id hash */ +static const int kcf_mech_hash_size = 256; +static mod_hash_t *kcf_mech_hash; /* mech name to id hash */ static crypto_mech_type_t -kcf_mech_hash_find(char *mechname) +kcf_mech_hash_find(const char *mechname) { mod_hash_val_t hv; crypto_mech_type_t mt; @@ -166,7 +166,6 @@ kcf_destroy_mech_tabs(void) void kcf_init_mech_tabs(void) { - int i, max; kcf_ops_class_t class; kcf_mech_entry_t *me_tab; @@ -249,9 +248,9 @@ kcf_init_mech_tabs(void) kcf_mech_hash_size, mod_hash_null_valdtor); for (class = KCF_FIRST_OPSCLASS; class <= KCF_LAST_OPSCLASS; class++) { - max = kcf_mech_tabs_tab[class].met_size; + int max = kcf_mech_tabs_tab[class].met_size; me_tab = kcf_mech_tabs_tab[class].met_tab; - for (i = 0; i < max; i++) { + for (int i = 0; i < max; i++) { mutex_init(&(me_tab[i].me_mutex), NULL, MUTEX_DEFAULT, NULL); if (me_tab[i].me_name[0] != 0) { @@ -747,7 +746,7 @@ kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep) { kcf_ops_class_t class; int index; - kcf_mech_entry_tab_t *me_tab; + const kcf_mech_entry_tab_t *me_tab; ASSERT(mep != NULL); @@ -778,7 +777,7 @@ kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep) * to load it. */ crypto_mech_type_t -crypto_mech2id_common(char *mechname, boolean_t load_module) +crypto_mech2id_common(const char *mechname, boolean_t load_module) { (void) load_module; return (kcf_mech_hash_find(mechname)); diff --git a/module/icp/core/kcf_prov_tabs.c b/module/icp/core/kcf_prov_tabs.c index 9d303d022..664e96da9 100644 --- a/module/icp/core/kcf_prov_tabs.c +++ b/module/icp/core/kcf_prov_tabs.c @@ -205,7 +205,8 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id) } static void -allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count) +allocate_ops_v1(const crypto_ops_t *src, crypto_ops_t *dst, + uint_t *mech_list_count) { if (src->co_control_ops != NULL) dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t), @@ -274,7 +275,7 @@ allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count) } static void -allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst) +allocate_ops_v2(const crypto_ops_t *src, crypto_ops_t *dst) { if (src->co_mech_ops != NULL) dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t), @@ -282,7 +283,7 @@ allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst) } static void -allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst) +allocate_ops_v3(const crypto_ops_t *src, crypto_ops_t *dst) { if (src->co_nostore_key_ops != NULL) dst->co_nostore_key_ops = @@ -297,12 +298,11 @@ allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst) * since it is invoked from user context during provider registration. */ kcf_provider_desc_t * -kcf_alloc_provider_desc(crypto_provider_info_t *info) +kcf_alloc_provider_desc(const crypto_provider_info_t *info) { - int i, j; kcf_provider_desc_t *desc; uint_t mech_list_count = info->pi_mech_list_count; - crypto_ops_t *src_ops = info->pi_ops_vector; + const crypto_ops_t *src_ops = info->pi_ops_vector; desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP); @@ -330,21 +330,22 @@ kcf_alloc_provider_desc(crypto_provider_info_t *info) * KCF needs to allocate storage where copies of the ops * vectors are copied. */ - desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP); + crypto_ops_t *opvec = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP); if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { - allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count); + allocate_ops_v1(src_ops, opvec, &mech_list_count); if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) - allocate_ops_v2(src_ops, desc->pd_ops_vector); + allocate_ops_v2(src_ops, opvec); if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) - allocate_ops_v3(src_ops, desc->pd_ops_vector); + allocate_ops_v3(src_ops, opvec); } + desc->pd_ops_vector = opvec; desc->pd_mech_list_count = mech_list_count; desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) * mech_list_count, KM_SLEEP); - for (i = 0; i < KCF_OPS_CLASSSIZE; i++) - for (j = 0; j < KCF_MAXMECHTAB; j++) + for (int i = 0; i < KCF_OPS_CLASSSIZE; i++) + for (int j = 0; j < KCF_MAXMECHTAB; j++) desc->pd_mech_indx[i][j] = KCF_INVALID_INDX; desc->pd_prov_id = KCF_PROVID_INVALID; diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c index e4ccdbde9..062e96059 100644 --- a/module/icp/core/kcf_sched.c +++ b/module/icp/core/kcf_sched.c @@ -35,15 +35,12 @@ #include #include -kcf_global_swq_t *gswq; /* Global software queue */ +static kcf_global_swq_t *gswq; /* Global software queue */ /* Thread pool related variables */ static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ -int kcf_maxthreads = 2; -int kcf_minthreads = 1; -int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */ -static ulong_t kcf_idlethr_timeout; -#define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */ +static const int kcf_maxthreads = 2; +static const int kcf_minthreads = 1; /* kmem caches used by the scheduler */ static kmem_cache_t *kcf_sreq_cache; @@ -1289,8 +1286,6 @@ kcfpool_alloc() mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL); - - kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT; } /* diff --git a/module/icp/include/sys/crypto/impl.h b/module/icp/include/sys/crypto/impl.h index 8ebc4be48..4906549b5 100644 --- a/module/icp/include/sys/crypto/impl.h +++ b/module/icp/include/sys/crypto/impl.h @@ -210,7 +210,7 @@ typedef struct kcf_provider_desc { struct kcf_provider_list *pd_provider_list; kcondvar_t pd_resume_cv; crypto_provider_handle_t pd_prov_handle; - crypto_ops_t *pd_ops_vector; + const crypto_ops_t *pd_ops_vector; ushort_t pd_mech_indx[KCF_OPS_CLASSSIZE]\ [KCF_MAXMECHTAB]; crypto_mech_info_t *pd_mechanisms; @@ -397,19 +397,6 @@ extern kcf_soft_conf_entry_t *soft_config_list; #define KCF_MAXKEYOPS 116 /* Key generation and derivation */ #define KCF_MAXMISC 16 /* Others ... */ -#define KCF_MAXMECHS KCF_MAXDIGEST + KCF_MAXCIPHER + KCF_MAXMAC + \ - KCF_MAXSIGN + KCF_MAXKEYOPS + \ - KCF_MAXMISC - -extern kcf_mech_entry_t kcf_digest_mechs_tab[]; -extern kcf_mech_entry_t kcf_cipher_mechs_tab[]; -extern kcf_mech_entry_t kcf_mac_mechs_tab[]; -extern kcf_mech_entry_t kcf_sign_mechs_tab[]; -extern kcf_mech_entry_t kcf_keyops_mechs_tab[]; -extern kcf_mech_entry_t kcf_misc_mechs_tab[]; - -extern kmutex_t kcf_mech_tabs_lock; - typedef enum { KCF_DIGEST_CLASS = 1, KCF_CIPHER_CLASS, @@ -429,7 +416,7 @@ typedef struct kcf_mech_entry_tab { kcf_mech_entry_t *met_tab; /* the table */ } kcf_mech_entry_tab_t; -extern kcf_mech_entry_tab_t kcf_mech_tabs_tab[]; +extern const kcf_mech_entry_tab_t kcf_mech_tabs_tab[]; #define KCF_MECHID(class, index) \ (((crypto_mech_type_t)(class) << 32) | (crypto_mech_type_t)(index)) @@ -1283,12 +1270,13 @@ extern int kcf_add_mech_provider(short, kcf_provider_desc_t *, kcf_prov_mech_desc_t **); extern void kcf_remove_mech_provider(char *, kcf_provider_desc_t *); extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **); -extern kcf_provider_desc_t *kcf_alloc_provider_desc(crypto_provider_info_t *); +extern kcf_provider_desc_t *kcf_alloc_provider_desc( + const crypto_provider_info_t *); extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *); extern void kcf_free_provider_desc(kcf_provider_desc_t *); extern void kcf_soft_config_init(void); extern int get_sw_provider_for_mech(crypto_mech_name_t, char **); -extern crypto_mech_type_t crypto_mech2id_common(char *, boolean_t); +extern crypto_mech_type_t crypto_mech2id_common(const char *, boolean_t); extern void undo_register_provider(kcf_provider_desc_t *, boolean_t); extern void redo_register_provider(kcf_provider_desc_t *); extern void kcf_rnd_init(void); diff --git a/module/icp/include/sys/crypto/sched_impl.h b/module/icp/include/sys/crypto/sched_impl.h index 85ea0ba1d..29ef8021f 100644 --- a/module/icp/include/sys/crypto/sched_impl.h +++ b/module/icp/include/sys/crypto/sched_impl.h @@ -457,12 +457,9 @@ typedef struct kcf_ntfy_elem { #define CRYPTO_TASKQ_MIN 64 #define CRYPTO_TASKQ_MAX 2 * 1024 * 1024 -extern int crypto_taskq_threads; -extern int crypto_taskq_minalloc; -extern int crypto_taskq_maxalloc; -extern kcf_global_swq_t *gswq; -extern int kcf_maxthreads; -extern int kcf_minthreads; +extern const int crypto_taskq_threads; +extern const int crypto_taskq_minalloc; +extern const int crypto_taskq_maxalloc; /* * All pending crypto bufcalls are put on a list. cbuf_list_lock diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h index 2c62b5706..0f1b455c8 100644 --- a/module/icp/include/sys/crypto/spi.h +++ b/module/icp/include/sys/crypto/spi.h @@ -498,10 +498,10 @@ typedef struct crypto_nostore_key_ops { * by calling crypto_register_provider(9F). */ typedef struct crypto_ops_v1 { - crypto_control_ops_t *co_control_ops; - crypto_digest_ops_t *co_digest_ops; - crypto_cipher_ops_t *co_cipher_ops; - crypto_mac_ops_t *co_mac_ops; + const crypto_control_ops_t *co_control_ops; + const crypto_digest_ops_t *co_digest_ops; + const crypto_cipher_ops_t *co_cipher_ops; + const crypto_mac_ops_t *co_mac_ops; crypto_sign_ops_t *co_sign_ops; crypto_verify_ops_t *co_verify_ops; crypto_dual_ops_t *co_dual_ops; @@ -511,7 +511,7 @@ typedef struct crypto_ops_v1 { crypto_object_ops_t *co_object_ops; crypto_key_ops_t *co_key_ops; crypto_provider_management_ops_t *co_provider_ops; - crypto_ctx_ops_t *co_ctx_ops; + const crypto_ctx_ops_t *co_ctx_ops; } crypto_ops_v1_t; typedef struct crypto_ops_v2 { @@ -653,9 +653,9 @@ typedef struct crypto_provider_info_v1 { char *pi_provider_description; crypto_provider_type_t pi_provider_type; crypto_provider_handle_t pi_provider_handle; - crypto_ops_t *pi_ops_vector; + const crypto_ops_t *pi_ops_vector; uint_t pi_mech_list_count; - crypto_mech_info_t *pi_mechanisms; + const crypto_mech_info_t *pi_mechanisms; uint_t pi_logical_provider_count; crypto_kcf_provider_handle_t *pi_logical_providers; } crypto_provider_info_v1_t; @@ -711,7 +711,7 @@ typedef struct crypto_provider_info { * of state changes, and notify the kernel when a asynchronous request * completed. */ -extern int crypto_register_provider(crypto_provider_info_t *, +extern int crypto_register_provider(const crypto_provider_info_t *, crypto_kcf_provider_handle_t *); extern int crypto_unregister_provider(crypto_kcf_provider_handle_t); extern void crypto_provider_notification(crypto_kcf_provider_handle_t, uint_t); diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index 2f24739a4..be1736864 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -39,7 +39,7 @@ /* * Mechanism info structure passed to KCF during registration. */ -static crypto_mech_info_t aes_mech_info_tab[] = { +static const crypto_mech_info_t aes_mech_info_tab[] = { /* AES_ECB */ {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | @@ -77,7 +77,7 @@ static crypto_mech_info_t aes_mech_info_tab[] = { static void aes_provider_status(crypto_provider_handle_t, uint_t *); -static crypto_control_ops_t aes_control_ops = { +static const crypto_control_ops_t aes_control_ops = { aes_provider_status }; @@ -110,7 +110,7 @@ static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); -static crypto_cipher_ops_t aes_cipher_ops = { +static const crypto_cipher_ops_t aes_cipher_ops = { .encrypt_init = aes_encrypt_init, .encrypt = aes_encrypt, .encrypt_update = aes_encrypt_update, @@ -130,7 +130,7 @@ static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); -static crypto_mac_ops_t aes_mac_ops = { +static const crypto_mac_ops_t aes_mac_ops = { .mac_init = NULL, .mac = NULL, .mac_update = NULL, @@ -144,12 +144,12 @@ static int aes_create_ctx_template(crypto_provider_handle_t, size_t *, crypto_req_handle_t); static int aes_free_context(crypto_ctx_t *); -static crypto_ctx_ops_t aes_ctx_ops = { +static const crypto_ctx_ops_t aes_ctx_ops = { .create_ctx_template = aes_create_ctx_template, .free_context = aes_free_context }; -static crypto_ops_t aes_crypto_ops = {{{{{ +static const crypto_ops_t aes_crypto_ops = {{{{{ &aes_control_ops, NULL, &aes_cipher_ops, @@ -166,13 +166,13 @@ static crypto_ops_t aes_crypto_ops = {{{{{ &aes_ctx_ops }}}}}; -static crypto_provider_info_t aes_prov_info = {{{{ +static const crypto_provider_info_t aes_prov_info = {{{{ CRYPTO_SPI_VERSION_1, "AES Software Provider", CRYPTO_SW_PROVIDER, NULL, &aes_crypto_ops, - sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t), + sizeof (aes_mech_info_tab) / sizeof (crypto_mech_info_t), aes_mech_info_tab }}}}; diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index c096a3ce1..a43c7c5b7 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -60,7 +60,7 @@ /* * Mechanism info structure passed to KCF during registration. */ -static crypto_mech_info_t sha2_mech_info_tab[] = { +static const crypto_mech_info_t sha2_mech_info_tab[] = { /* SHA256 */ {SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE, CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC, @@ -107,7 +107,7 @@ static crypto_mech_info_t sha2_mech_info_tab[] = { static void sha2_provider_status(crypto_provider_handle_t, uint_t *); -static crypto_control_ops_t sha2_control_ops = { +static const crypto_control_ops_t sha2_control_ops = { sha2_provider_status }; @@ -123,7 +123,7 @@ static int sha2_digest_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_data_t *, crypto_data_t *, crypto_req_handle_t); -static crypto_digest_ops_t sha2_digest_ops = { +static const crypto_digest_ops_t sha2_digest_ops = { .digest_init = sha2_digest_init, .digest = sha2_digest, .digest_update = sha2_digest_update, @@ -144,7 +144,7 @@ static int sha2_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); -static crypto_mac_ops_t sha2_mac_ops = { +static const crypto_mac_ops_t sha2_mac_ops = { .mac_init = sha2_mac_init, .mac = NULL, .mac_update = sha2_mac_update, @@ -158,12 +158,12 @@ static int sha2_create_ctx_template(crypto_provider_handle_t, size_t *, crypto_req_handle_t); static int sha2_free_context(crypto_ctx_t *); -static crypto_ctx_ops_t sha2_ctx_ops = { +static const crypto_ctx_ops_t sha2_ctx_ops = { .create_ctx_template = sha2_create_ctx_template, .free_context = sha2_free_context }; -static crypto_ops_t sha2_crypto_ops = {{{{{ +static const crypto_ops_t sha2_crypto_ops = {{{{{ &sha2_control_ops, &sha2_digest_ops, NULL, @@ -180,13 +180,13 @@ static crypto_ops_t sha2_crypto_ops = {{{{{ &sha2_ctx_ops }}}}}; -static crypto_provider_info_t sha2_prov_info = {{{{ +static const crypto_provider_info_t sha2_prov_info = {{{{ CRYPTO_SPI_VERSION_1, "SHA2 Software Provider", CRYPTO_SW_PROVIDER, NULL, &sha2_crypto_ops, - sizeof (sha2_mech_info_tab)/sizeof (crypto_mech_info_t), + sizeof (sha2_mech_info_tab) / sizeof (crypto_mech_info_t), sha2_mech_info_tab }}}}; diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index 250c0001a..d0917e71b 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -30,7 +30,7 @@ #define SKEIN_MODULE_IMPL #include -static crypto_mech_info_t skein_mech_info_tab[] = { +static const crypto_mech_info_t skein_mech_info_tab[] = { {CKM_SKEIN_256, SKEIN_256_MECH_INFO_TYPE, CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC, 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS}, @@ -53,7 +53,7 @@ static crypto_mech_info_t skein_mech_info_tab[] = { static void skein_provider_status(crypto_provider_handle_t, uint_t *); -static crypto_control_ops_t skein_control_ops = { +static const crypto_control_ops_t skein_control_ops = { skein_provider_status }; @@ -67,7 +67,7 @@ static int skein_digest_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_data_t *, crypto_data_t *, crypto_req_handle_t); -static crypto_digest_ops_t skein_digest_ops = { +static const crypto_digest_ops_t skein_digest_ops = { .digest_init = skein_digest_init, .digest = skein_digest, .digest_update = skein_update, @@ -82,7 +82,7 @@ static int skein_mac_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); -static crypto_mac_ops_t skein_mac_ops = { +static const crypto_mac_ops_t skein_mac_ops = { .mac_init = skein_mac_init, .mac = NULL, .mac_update = skein_update, /* using regular digest update is OK here */ @@ -96,12 +96,12 @@ static int skein_create_ctx_template(crypto_provider_handle_t, size_t *, crypto_req_handle_t); static int skein_free_context(crypto_ctx_t *); -static crypto_ctx_ops_t skein_ctx_ops = { +static const crypto_ctx_ops_t skein_ctx_ops = { .create_ctx_template = skein_create_ctx_template, .free_context = skein_free_context }; -static crypto_ops_t skein_crypto_ops = {{{{{ +static const crypto_ops_t skein_crypto_ops = {{{{{ &skein_control_ops, &skein_digest_ops, NULL, @@ -118,7 +118,7 @@ static crypto_ops_t skein_crypto_ops = {{{{{ &skein_ctx_ops, }}}}}; -static crypto_provider_info_t skein_prov_info = {{{{ +static const crypto_provider_info_t skein_prov_info = {{{{ CRYPTO_SPI_VERSION_1, "Skein Software Provider", CRYPTO_SW_PROVIDER, diff --git a/module/icp/spi/kcf_spi.c b/module/icp/spi/kcf_spi.c index 34b36b81c..25fe9b5b6 100644 --- a/module/icp/spi/kcf_spi.c +++ b/module/icp/spi/kcf_spi.c @@ -39,18 +39,19 @@ /* * minalloc and maxalloc values to be used for taskq_create(). */ -int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; -int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN; -int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; +const int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; +const int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN; +const int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; static void remove_provider(kcf_provider_desc_t *); -static void process_logical_providers(crypto_provider_info_t *, +static void process_logical_providers(const crypto_provider_info_t *, + kcf_provider_desc_t *); +static int init_prov_mechs(const crypto_provider_info_t *, kcf_provider_desc_t *); -static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *); static int kcf_prov_kstat_update(kstat_t *, int); static void delete_kstat(kcf_provider_desc_t *); -static kcf_prov_stats_t kcf_stats_ks_data_template = { +static const kcf_prov_stats_t kcf_stats_ks_data_template = { { "kcf_ops_total", KSTAT_DATA_UINT64 }, { "kcf_ops_passed", KSTAT_DATA_UINT64 }, { "kcf_ops_failed", KSTAT_DATA_UINT64 }, @@ -58,7 +59,7 @@ static kcf_prov_stats_t kcf_stats_ks_data_template = { }; #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \ - *((dst)->ops) = *((src)->ops); + memcpy((void *) (dst)->ops, (src)->ops, sizeof (*(src)->ops)); /* * Copy an ops vector from src to dst. Used during provider registration @@ -69,7 +70,7 @@ static kcf_prov_stats_t kcf_stats_ks_data_template = { * persistent. */ static void -copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) +copy_ops_vector_v1(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops) { KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops); KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops); @@ -88,13 +89,13 @@ copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) } static void -copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) +copy_ops_vector_v2(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops) { KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops); } static void -copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) +copy_ops_vector_v3(const crypto_ops_t *src_ops, crypto_ops_t *dst_ops) { KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops); } @@ -108,7 +109,7 @@ copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) * routines. Software providers call this routine in their _init() routine. */ int -crypto_register_provider(crypto_provider_info_t *info, +crypto_register_provider(const crypto_provider_info_t *info, crypto_kcf_provider_handle_t *handle) { char *ks_name; @@ -158,16 +159,14 @@ crypto_register_provider(crypto_provider_info_t *info, if (info->pi_ops_vector == NULL) { goto bail; } - copy_ops_vector_v1(info->pi_ops_vector, - prov_desc->pd_ops_vector); + crypto_ops_t *pvec = (crypto_ops_t *)prov_desc->pd_ops_vector; + copy_ops_vector_v1(info->pi_ops_vector, pvec); if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { - copy_ops_vector_v2(info->pi_ops_vector, - prov_desc->pd_ops_vector); + copy_ops_vector_v2(info->pi_ops_vector, pvec); prov_desc->pd_flags = info->pi_flags; } if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { - copy_ops_vector_v3(info->pi_ops_vector, - prov_desc->pd_ops_vector); + copy_ops_vector_v3(info->pi_ops_vector, pvec); } } @@ -199,8 +198,8 @@ crypto_register_provider(crypto_provider_info_t *info, */ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", - crypto_taskq_threads, minclsyspri, - crypto_taskq_minalloc, crypto_taskq_maxalloc, + CRYPTO_TASKQ_THREADS, minclsyspri, + CRYPTO_TASKQ_MIN, CRYPTO_TASKQ_MAX, TASKQ_PREPOPULATE); else prov_desc->pd_sched_info.ks_taskq = NULL; @@ -566,7 +565,7 @@ crypto_kmflag(crypto_req_handle_t handle) * if the table of mechanisms is full. */ static int -init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc) +init_prov_mechs(const crypto_provider_info_t *info, kcf_provider_desc_t *desc) { uint_t mech_idx; uint_t cleanup_idx; @@ -811,7 +810,8 @@ remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) * descriptors (kcf_provider_desc_t) attached to a logical provider. */ static void -process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp) +process_logical_providers(const crypto_provider_info_t *info, + kcf_provider_desc_t *hp) { kcf_provider_desc_t *lp; crypto_provider_id_t handle; diff --git a/module/nvpair/nvpair.c b/module/nvpair/nvpair.c index 8230cca20..b4463dd73 100644 --- a/module/nvpair/nvpair.c +++ b/module/nvpair/nvpair.c @@ -146,12 +146,12 @@ static int nvlist_add_common(nvlist_t *nvl, const char *name, data_type_t type, ((i_nvp_t *)((size_t)(nvp) - offsetof(i_nvp_t, nvi_nvp))) #ifdef _KERNEL -int nvpair_max_recursion = 20; +static const int nvpair_max_recursion = 20; #else -int nvpair_max_recursion = 100; +static const int nvpair_max_recursion = 100; #endif -uint64_t nvlist_hashtable_init_size = (1 << 4); +static const uint64_t nvlist_hashtable_init_size = (1 << 4); int nv_alloc_init(nv_alloc_t *nva, const nv_alloc_ops_t *nvo, /* args */ ...) diff --git a/module/nvpair/nvpair_alloc_fixed.c b/module/nvpair/nvpair_alloc_fixed.c index ca3f68676..d7d3e7afd 100644 --- a/module/nvpair/nvpair_alloc_fixed.c +++ b/module/nvpair/nvpair_alloc_fixed.c @@ -100,7 +100,7 @@ nv_fixed_reset(nv_alloc_t *nva) nvb->nvb_cur = (uintptr_t)&nvb[1]; } -const nv_alloc_ops_t nv_fixed_ops_def = { +static const nv_alloc_ops_t nv_fixed_ops_def = { .nv_ao_init = nv_fixed_init, .nv_ao_fini = NULL, .nv_ao_alloc = nv_fixed_alloc, @@ -108,7 +108,7 @@ const nv_alloc_ops_t nv_fixed_ops_def = { .nv_ao_reset = nv_fixed_reset }; -const nv_alloc_ops_t *nv_fixed_ops = &nv_fixed_ops_def; +const nv_alloc_ops_t *const nv_fixed_ops = &nv_fixed_ops_def; #if defined(_KERNEL) EXPORT_SYMBOL(nv_fixed_ops); diff --git a/module/nvpair/nvpair_alloc_spl.c b/module/nvpair/nvpair_alloc_spl.c index ed8fa4d09..aa344b642 100644 --- a/module/nvpair/nvpair_alloc_spl.c +++ b/module/nvpair/nvpair_alloc_spl.c @@ -52,7 +52,7 @@ nv_free_spl(nv_alloc_t *nva, void *buf, size_t size) kmem_free(buf, size); } -const nv_alloc_ops_t spl_sleep_ops_def = { +static const nv_alloc_ops_t spl_sleep_ops_def = { .nv_ao_init = NULL, .nv_ao_fini = NULL, .nv_ao_alloc = nv_alloc_sleep_spl, @@ -60,7 +60,7 @@ const nv_alloc_ops_t spl_sleep_ops_def = { .nv_ao_reset = NULL }; -const nv_alloc_ops_t spl_pushpage_ops_def = { +static const nv_alloc_ops_t spl_pushpage_ops_def = { .nv_ao_init = NULL, .nv_ao_fini = NULL, .nv_ao_alloc = nv_alloc_pushpage_spl, @@ -68,7 +68,7 @@ const nv_alloc_ops_t spl_pushpage_ops_def = { .nv_ao_reset = NULL }; -const nv_alloc_ops_t spl_nosleep_ops_def = { +static const nv_alloc_ops_t spl_nosleep_ops_def = { .nv_ao_init = NULL, .nv_ao_fini = NULL, .nv_ao_alloc = nv_alloc_nosleep_spl, @@ -76,21 +76,21 @@ const nv_alloc_ops_t spl_nosleep_ops_def = { .nv_ao_reset = NULL }; -nv_alloc_t nv_alloc_sleep_def = { +static nv_alloc_t nv_alloc_sleep_def = { &spl_sleep_ops_def, NULL }; -nv_alloc_t nv_alloc_pushpage_def = { +static nv_alloc_t nv_alloc_pushpage_def = { &spl_pushpage_ops_def, NULL }; -nv_alloc_t nv_alloc_nosleep_def = { +static nv_alloc_t nv_alloc_nosleep_def = { &spl_nosleep_ops_def, NULL }; -nv_alloc_t *nv_alloc_sleep = &nv_alloc_sleep_def; -nv_alloc_t *nv_alloc_pushpage = &nv_alloc_pushpage_def; -nv_alloc_t *nv_alloc_nosleep = &nv_alloc_nosleep_def; +nv_alloc_t *const nv_alloc_sleep = &nv_alloc_sleep_def; +nv_alloc_t *const nv_alloc_pushpage = &nv_alloc_pushpage_def; +nv_alloc_t *const nv_alloc_nosleep = &nv_alloc_nosleep_def; diff --git a/module/os/freebsd/zfs/abd_os.c b/module/os/freebsd/zfs/abd_os.c index fa1034ff8..722a8898c 100644 --- a/module/os/freebsd/zfs/abd_os.c +++ b/module/os/freebsd/zfs/abd_os.c @@ -93,7 +93,7 @@ struct { * of multi-page linear ABDs are expensive operations due to KVA mapping and * unmapping, and with time they cause KVA fragmentations. */ -size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1; +static size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1; #if defined(_KERNEL) SYSCTL_DECL(_vfs_zfs); diff --git a/module/os/freebsd/zfs/crypto_os.c b/module/os/freebsd/zfs/crypto_os.c index 6a67dbc9f..f971b62bd 100644 --- a/module/os/freebsd/zfs/crypto_os.c +++ b/module/os/freebsd/zfs/crypto_os.c @@ -198,7 +198,7 @@ zfs_crypto_dispatch(freebsd_crypt_session_t *session, struct cryptop *crp) static void freebsd_crypt_uio_debug_log(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, - struct zio_crypt_info *c_info, + const struct zio_crypt_info *c_info, zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, @@ -241,7 +241,7 @@ freebsd_crypt_uio_debug_log(boolean_t encrypt, #if __FreeBSD_version >= 1300087 int freebsd_crypt_newsession(freebsd_crypt_session_t *sessp, - struct zio_crypt_info *c_info, crypto_key_t *key) + const struct zio_crypt_info *c_info, crypto_key_t *key) { struct crypto_session_params csp; int error = 0; @@ -322,7 +322,7 @@ bad: int freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, - struct zio_crypt_info *c_info, + const struct zio_crypt_info *c_info, zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, @@ -382,7 +382,7 @@ out: #else int freebsd_crypt_newsession(freebsd_crypt_session_t *sessp, - struct zio_crypt_info *c_info, crypto_key_t *key) + const struct zio_crypt_info *c_info, crypto_key_t *key) { struct cryptoini cria, crie, *crip; struct enc_xform *xform; @@ -492,7 +492,7 @@ bad: int freebsd_crypt_uio(boolean_t encrypt, freebsd_crypt_session_t *input_sessionp, - struct zio_crypt_info *c_info, + const struct zio_crypt_info *c_info, zfs_uio_t *data_uio, crypto_key_t *key, uint8_t *ivbuf, diff --git a/module/os/freebsd/zfs/vdev_file.c b/module/os/freebsd/zfs/vdev_file.c index 2d9268136..ef87d6610 100644 --- a/module/os/freebsd/zfs/vdev_file.c +++ b/module/os/freebsd/zfs/vdev_file.c @@ -40,8 +40,8 @@ static taskq_t *vdev_file_taskq; -unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; -unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; +static unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; +static unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; void vdev_file_init(void) diff --git a/module/os/freebsd/zfs/zfs_acl.c b/module/os/freebsd/zfs/zfs_acl.c index ae758bcef..bd22cda41 100644 --- a/module/os/freebsd/zfs/zfs_acl.c +++ b/module/os/freebsd/zfs/zfs_acl.c @@ -171,7 +171,7 @@ zfs_ace_v0_data(void *acep, void **datap) return (0); } -static acl_ops_t zfs_acl_v0_ops = { +static const acl_ops_t zfs_acl_v0_ops = { zfs_ace_v0_get_mask, zfs_ace_v0_set_mask, zfs_ace_v0_get_flags, @@ -307,7 +307,7 @@ zfs_ace_fuid_data(void *acep, void **datap) } } -static acl_ops_t zfs_acl_fuid_ops = { +static const acl_ops_t zfs_acl_fuid_ops = { zfs_ace_fuid_get_mask, zfs_ace_fuid_set_mask, zfs_ace_fuid_get_flags, diff --git a/module/os/freebsd/zfs/zfs_debug.c b/module/os/freebsd/zfs/zfs_debug.c index dad342b06..0ff22cfe7 100644 --- a/module/os/freebsd/zfs/zfs_debug.c +++ b/module/os/freebsd/zfs/zfs_debug.c @@ -33,11 +33,11 @@ typedef struct zfs_dbgmsg { char zdm_msg[1]; /* variable length allocation */ } zfs_dbgmsg_t; -list_t zfs_dbgmsgs; -int zfs_dbgmsg_size = 0; -kmutex_t zfs_dbgmsgs_lock; +static list_t zfs_dbgmsgs; +static int zfs_dbgmsg_size = 0; +static kmutex_t zfs_dbgmsgs_lock; int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ -kstat_t *zfs_dbgmsg_kstat; +static kstat_t *zfs_dbgmsg_kstat; /* * Internal ZFS debug messages are enabled by default. @@ -51,7 +51,7 @@ kstat_t *zfs_dbgmsg_kstat; * # Disable the kernel debug message log. * sysctl vfs.zfs.dbgmsg_enable=0 */ -int zfs_dbgmsg_enable = 1; +int zfs_dbgmsg_enable = B_TRUE; static int zfs_dbgmsg_headers(char *buf, size_t size) diff --git a/module/os/freebsd/zfs/zio_crypt.c b/module/os/freebsd/zfs/zio_crypt.c index 038f2250e..c55c1ac25 100644 --- a/module/os/freebsd/zfs/zio_crypt.c +++ b/module/os/freebsd/zfs/zio_crypt.c @@ -185,13 +185,7 @@ #define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000 #define ZFS_CURRENT_MAX_SALT_USES \ (MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT)) -unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT; - -/* - * Set to a nonzero value to cause zio_do_crypt_uio() to fail 1/this many - * calls, to test decryption error handling code paths. - */ -uint64_t zio_decrypt_fail_fraction = 0; +static unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT; typedef struct blkptr_auth_buf { uint64_t bab_prop; /* blk_prop - portable mask */ @@ -199,7 +193,7 @@ typedef struct blkptr_auth_buf { uint64_t bab_pad; /* reserved for future use */ } blkptr_auth_buf_t; -zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { +const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { {"", ZC_TYPE_NONE, 0, "inherit"}, {"", ZC_TYPE_NONE, 0, "on"}, {"", ZC_TYPE_NONE, 0, "off"}, @@ -237,7 +231,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key) int ret; crypto_mechanism_t mech __unused; uint_t keydata_len; - zio_crypt_info_t *ci = NULL; + const zio_crypt_info_t *ci = NULL; ASSERT3P(key, !=, NULL); ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); @@ -406,16 +400,13 @@ zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess, uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen, zfs_uio_t *uio, uint_t auth_len) { - zio_crypt_info_t *ci; - int ret; - - ci = &zio_crypt_table[crypt]; + const zio_crypt_info_t *ci = &zio_crypt_table[crypt]; if (ci->ci_crypt_type != ZC_TYPE_GCM && ci->ci_crypt_type != ZC_TYPE_CCM) return (ENOTSUP); - ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf, + int ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf, datalen, auth_len); if (ret != 0) { #ifdef FCRYPTO_DEBUG diff --git a/module/os/linux/spl/spl-generic.c b/module/os/linux/spl/spl-generic.c index 5ea4fc635..a72393859 100644 --- a/module/os/linux/spl/spl-generic.c +++ b/module/os/linux/spl/spl-generic.c @@ -44,17 +44,14 @@ #include #include #include -#include "zfs_gitrev.h" #include #include #include -char spl_gitrev[64] = ZFS_META_GITREV; - /* BEGIN CSTYLED */ unsigned long spl_hostid = 0; EXPORT_SYMBOL(spl_hostid); -/* BEGIN CSTYLED */ + module_param(spl_hostid, ulong, 0644); MODULE_PARM_DESC(spl_hostid, "The system hostid."); /* END CSTYLED */ @@ -632,7 +629,7 @@ spl_getattr(struct file *filp, struct kstat *stat) * */ -char *spl_hostid_path = HW_HOSTID_PATH; +static char *spl_hostid_path = HW_HOSTID_PATH; module_param(spl_hostid_path, charp, 0444); MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)"); diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c index 2151ef008..3d9261736 100644 --- a/module/os/linux/spl/spl-kmem-cache.c +++ b/module/os/linux/spl/spl-kmem-cache.c @@ -72,7 +72,7 @@ * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines * may never be entirely disabled in this implementation. */ -unsigned int spl_kmem_cache_magazine_size = 0; +static unsigned int spl_kmem_cache_magazine_size = 0; module_param(spl_kmem_cache_magazine_size, uint, 0444); MODULE_PARM_DESC(spl_kmem_cache_magazine_size, "Default magazine size (2-256), set automatically (0)"); @@ -84,15 +84,15 @@ MODULE_PARM_DESC(spl_kmem_cache_magazine_size, * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache * is reclaimed. This may increase the likelihood of out of memory events. */ -unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; +static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; module_param(spl_kmem_cache_reclaim, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); -unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; +static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; module_param(spl_kmem_cache_obj_per_slab, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); -unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; +static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; module_param(spl_kmem_cache_max_size, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); @@ -103,7 +103,7 @@ MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); * of 16K was determined to be optimal for architectures using 4K pages and * to also work well on architecutres using larger 64K page sizes. */ -unsigned int spl_kmem_cache_slab_limit = 16384; +static unsigned int spl_kmem_cache_slab_limit = 16384; module_param(spl_kmem_cache_slab_limit, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_slab_limit, "Objects less than N bytes use the Linux slab"); @@ -112,7 +112,7 @@ MODULE_PARM_DESC(spl_kmem_cache_slab_limit, * The number of threads available to allocate new slabs for caches. This * should not need to be tuned but it is available for performance analysis. */ -unsigned int spl_kmem_cache_kmem_threads = 4; +static unsigned int spl_kmem_cache_kmem_threads = 4; module_param(spl_kmem_cache_kmem_threads, uint, 0444); MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, "Number of spl_kmem_cache threads"); diff --git a/module/os/linux/spl/spl-kstat.c b/module/os/linux/spl/spl-kstat.c index 0c4670832..a417d4d7c 100644 --- a/module/os/linux/spl/spl-kstat.c +++ b/module/os/linux/spl/spl-kstat.c @@ -358,7 +358,7 @@ kstat_seq_stop(struct seq_file *f, void *v) mutex_exit(ksp->ks_lock); } -static struct seq_operations kstat_seq_ops = { +static const struct seq_operations kstat_seq_ops = { .show = kstat_seq_show, .start = kstat_seq_start, .next = kstat_seq_next, diff --git a/module/os/linux/spl/spl-proc.c b/module/os/linux/spl/spl-proc.c index c4af27a7f..f500492ea 100644 --- a/module/os/linux/spl/spl-proc.c +++ b/module/os/linux/spl/spl-proc.c @@ -35,6 +35,7 @@ #include #include #include +#include "zfs_gitrev.h" #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) typedef struct ctl_table __no_const spl_ctl_table; @@ -461,7 +462,7 @@ slab_seq_stop(struct seq_file *f, void *v) up_read(&spl_kmem_cache_sem); } -static struct seq_operations slab_seq_ops = { +static const struct seq_operations slab_seq_ops = { .show = slab_seq_show, .start = slab_seq_start, .next = slab_seq_next, @@ -494,14 +495,14 @@ taskq_seq_stop(struct seq_file *f, void *v) up_read(&tq_list_sem); } -static struct seq_operations taskq_all_seq_ops = { +static const struct seq_operations taskq_all_seq_ops = { .show = taskq_all_seq_show, .start = taskq_seq_start, .next = taskq_seq_next, .stop = taskq_seq_stop, }; -static struct seq_operations taskq_seq_ops = { +static const struct seq_operations taskq_seq_ops = { .show = taskq_seq_show, .start = taskq_seq_start, .next = taskq_seq_next, @@ -612,8 +613,8 @@ static struct ctl_table spl_table[] = { */ { .procname = "gitrev", - .data = spl_gitrev, - .maxlen = sizeof (spl_gitrev), + .data = (char *)ZFS_META_GITREV, + .maxlen = sizeof (ZFS_META_GITREV), .mode = 0444, .proc_handler = &proc_dostring, }, diff --git a/module/os/linux/spl/spl-procfs-list.c b/module/os/linux/spl/spl-procfs-list.c index cae13228c..1922825c9 100644 --- a/module/os/linux/spl/spl-procfs-list.c +++ b/module/os/linux/spl/spl-procfs-list.c @@ -158,7 +158,7 @@ procfs_list_seq_stop(struct seq_file *f, void *p) mutex_exit(&procfs_list->pl_lock); } -static struct seq_operations procfs_list_seq_ops = { +static const struct seq_operations procfs_list_seq_ops = { .show = procfs_list_seq_show, .start = procfs_list_seq_start, .next = procfs_list_seq_next, diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c index a879c2856..0aab14897 100644 --- a/module/os/linux/spl/spl-taskq.c +++ b/module/os/linux/spl/spl-taskq.c @@ -32,21 +32,21 @@ #include #endif -int spl_taskq_thread_bind = 0; +static int spl_taskq_thread_bind = 0; module_param(spl_taskq_thread_bind, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); -int spl_taskq_thread_dynamic = 1; +static int spl_taskq_thread_dynamic = 1; module_param(spl_taskq_thread_dynamic, int, 0444); MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); -int spl_taskq_thread_priority = 1; +static int spl_taskq_thread_priority = 1; module_param(spl_taskq_thread_priority, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_priority, "Allow non-default priority for taskq threads"); -int spl_taskq_thread_sequential = 4; +static int spl_taskq_thread_sequential = 4; module_param(spl_taskq_thread_sequential, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_sequential, "Create new taskq threads after N sequential tasks"); diff --git a/module/os/linux/spl/spl-xdr.c b/module/os/linux/spl/spl-xdr.c index 5e763c256..6b7752418 100644 --- a/module/os/linux/spl/spl-xdr.c +++ b/module/os/linux/spl/spl-xdr.c @@ -127,8 +127,8 @@ * space or MMIO space), the computer may explode. */ -static struct xdr_ops xdrmem_encode_ops; -static struct xdr_ops xdrmem_decode_ops; +static const struct xdr_ops xdrmem_encode_ops; +static const struct xdr_ops xdrmem_decode_ops; void xdrmem_create(XDR *xdrs, const caddr_t addr, const uint_t size, @@ -489,7 +489,7 @@ fail: return (FALSE); } -static struct xdr_ops xdrmem_encode_ops = { +static const struct xdr_ops xdrmem_encode_ops = { .xdr_control = xdrmem_control, .xdr_char = xdrmem_enc_char, .xdr_u_short = xdrmem_enc_ushort, @@ -500,7 +500,7 @@ static struct xdr_ops xdrmem_encode_ops = { .xdr_array = xdr_enc_array }; -static struct xdr_ops xdrmem_decode_ops = { +static const struct xdr_ops xdrmem_decode_ops = { .xdr_control = xdrmem_control, .xdr_char = xdrmem_dec_char, .xdr_u_short = xdrmem_dec_ushort, diff --git a/module/os/linux/zfs/abd_os.c b/module/os/linux/zfs/abd_os.c index 372c1dadc..113aee585 100644 --- a/module/os/linux/zfs/abd_os.c +++ b/module/os/linux/zfs/abd_os.c @@ -149,8 +149,6 @@ struct { #define abd_for_each_sg(abd, sg, n, i) \ for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i) -unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; - /* * zfs_abd_scatter_min_size is the minimum allocation size to use scatter * ABD's. Smaller allocations will use linear ABD's which uses @@ -173,7 +171,7 @@ unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; * By default we use linear allocations for 512B and 1KB, and scatter * allocations for larger (1.5KB and up). */ -int zfs_abd_scatter_min_size = 512 * 3; +static int zfs_abd_scatter_min_size = 512 * 3; /* * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are @@ -221,6 +219,8 @@ abd_free_struct_impl(abd_t *abd) } #ifdef _KERNEL +static unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1; + /* * Mark zfs data pages so they can be excluded from kernel crash dumps */ diff --git a/module/os/linux/zfs/vdev_disk.c b/module/os/linux/zfs/vdev_disk.c index 46b459f5c..c06ba613b 100644 --- a/module/os/linux/zfs/vdev_disk.c +++ b/module/os/linux/zfs/vdev_disk.c @@ -919,7 +919,7 @@ param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp) return (error); } -char *zfs_vdev_scheduler = "unused"; +static const char *zfs_vdev_scheduler = "unused"; module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler, param_get_charp, &zfs_vdev_scheduler, 0644); MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler"); diff --git a/module/os/linux/zfs/vdev_file.c b/module/os/linux/zfs/vdev_file.c index 98338e604..f07314532 100644 --- a/module/os/linux/zfs/vdev_file.c +++ b/module/os/linux/zfs/vdev_file.c @@ -53,8 +53,8 @@ static taskq_t *vdev_file_taskq; * impact the vdev_ashift setting which can only be set at vdev creation * time. */ -unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; -unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; +static unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT; +static unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT; static void vdev_file_hold(vdev_t *vd) diff --git a/module/os/linux/zfs/zfs_acl.c b/module/os/linux/zfs/zfs_acl.c index cf37aecf8..94b20dd6e 100644 --- a/module/os/linux/zfs/zfs_acl.c +++ b/module/os/linux/zfs/zfs_acl.c @@ -171,7 +171,7 @@ zfs_ace_v0_data(void *acep, void **datap) return (0); } -static acl_ops_t zfs_acl_v0_ops = { +static const acl_ops_t zfs_acl_v0_ops = { .ace_mask_get = zfs_ace_v0_get_mask, .ace_mask_set = zfs_ace_v0_set_mask, .ace_flags_get = zfs_ace_v0_get_flags, @@ -307,7 +307,7 @@ zfs_ace_fuid_data(void *acep, void **datap) } } -static acl_ops_t zfs_acl_fuid_ops = { +static const acl_ops_t zfs_acl_fuid_ops = { .ace_mask_get = zfs_ace_fuid_get_mask, .ace_mask_set = zfs_ace_fuid_set_mask, .ace_flags_get = zfs_ace_fuid_get_flags, @@ -2702,7 +2702,7 @@ zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr) } /* See zfs_zaccess_delete() */ -int zfs_write_implies_delete_child = 1; +static const boolean_t zfs_write_implies_delete_child = B_TRUE; /* * Determine whether delete access should be granted. diff --git a/module/os/linux/zfs/zfs_ctldir.c b/module/os/linux/zfs/zfs_ctldir.c index c58d851d7..f7e71461a 100644 --- a/module/os/linux/zfs/zfs_ctldir.c +++ b/module/os/linux/zfs/zfs_ctldir.c @@ -110,7 +110,7 @@ static krwlock_t zfs_snapshot_lock; * Control Directory Tunables (.zfs) */ int zfs_expire_snapshot = ZFSCTL_EXPIRE_SNAPSHOT; -int zfs_admin_snapshot = 0; +static int zfs_admin_snapshot = 0; typedef struct { char *se_name; /* full snapshot name */ diff --git a/module/os/linux/zfs/zfs_debug.c b/module/os/linux/zfs/zfs_debug.c index a32a4663e..be65f0a2e 100644 --- a/module/os/linux/zfs/zfs_debug.c +++ b/module/os/linux/zfs/zfs_debug.c @@ -33,8 +33,8 @@ typedef struct zfs_dbgmsg { char zdm_msg[1]; /* variable length allocation */ } zfs_dbgmsg_t; -procfs_list_t zfs_dbgmsgs; -int zfs_dbgmsg_size = 0; +static procfs_list_t zfs_dbgmsgs; +static int zfs_dbgmsg_size = 0; int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ /* @@ -49,7 +49,7 @@ int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ * # Clear the kernel debug message log. * echo 0 >/proc/spl/kstat/zfs/dbgmsg */ -int zfs_dbgmsg_enable = 1; +int zfs_dbgmsg_enable = B_TRUE; static int zfs_dbgmsg_show_header(struct seq_file *f) diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c index 6c7de9830..aff3c4ad4 100644 --- a/module/os/linux/zfs/zfs_vnops_os.c +++ b/module/os/linux/zfs/zfs_vnops_os.c @@ -320,7 +320,7 @@ mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio) } #endif /* _KERNEL */ -unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT; +static unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT; /* * Write the bytes to a file. diff --git a/module/os/linux/zfs/zfs_znode.c b/module/os/linux/zfs/zfs_znode.c index 859c51baf..5b1573a6d 100644 --- a/module/os/linux/zfs/zfs_znode.c +++ b/module/os/linux/zfs/zfs_znode.c @@ -80,7 +80,7 @@ unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ; * This is used by the test suite so that it can delay znodes from being * freed in order to inspect the unlinked set. */ -int zfs_unlink_suspend_progress = 0; +static int zfs_unlink_suspend_progress = 0; /* * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c index 2c82be2d7..9f8b9f53e 100644 --- a/module/os/linux/zfs/zio_crypt.c +++ b/module/os/linux/zfs/zio_crypt.c @@ -186,7 +186,7 @@ #define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000 #define ZFS_CURRENT_MAX_SALT_USES \ (MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT)) -unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT; +static unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT; typedef struct blkptr_auth_buf { uint64_t bab_prop; /* blk_prop - portable mask */ @@ -194,7 +194,7 @@ typedef struct blkptr_auth_buf { uint64_t bab_pad; /* reserved for future use */ } blkptr_auth_buf_t; -zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { +const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { {"", ZC_TYPE_NONE, 0, "inherit"}, {"", ZC_TYPE_NONE, 0, "on"}, {"", ZC_TYPE_NONE, 0, "off"}, diff --git a/module/os/linux/zfs/zpl_ctldir.c b/module/os/linux/zfs/zpl_ctldir.c index 9b526afd0..a640930a0 100644 --- a/module/os/linux/zfs/zpl_ctldir.c +++ b/module/os/linux/zfs/zpl_ctldir.c @@ -201,7 +201,7 @@ zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags) return (!!dentry->d_inode); } -dentry_operations_t zpl_dops_snapdirs = { +static const dentry_operations_t zpl_dops_snapdirs = { /* * Auto mounting of snapshots is only supported for 2.6.37 and * newer kernels. Prior to this kernel the ops->follow_link() diff --git a/module/os/linux/zfs/zpl_file.c b/module/os/linux/zfs/zpl_file.c index ff324222d..21926f170 100644 --- a/module/os/linux/zfs/zpl_file.c +++ b/module/os/linux/zfs/zpl_file.c @@ -41,7 +41,7 @@ * When using fallocate(2) to preallocate space, inflate the requested * capacity check by 10% to account for the required metadata blocks. */ -unsigned int zfs_fallocate_reserve_percent = 110; +static unsigned int zfs_fallocate_reserve_percent = 110; static int zpl_open(struct inode *ip, struct file *filp) diff --git a/module/os/linux/zfs/zpl_xattr.c b/module/os/linux/zfs/zpl_xattr.c index e7726e845..a1921ed08 100644 --- a/module/os/linux/zfs/zpl_xattr.c +++ b/module/os/linux/zfs/zpl_xattr.c @@ -746,7 +746,7 @@ __zpl_xattr_user_set(struct inode *ip, const char *name, } ZPL_XATTR_SET_WRAPPER(zpl_xattr_user_set); -xattr_handler_t zpl_xattr_user_handler = +static xattr_handler_t zpl_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = zpl_xattr_user_list, @@ -815,8 +815,7 @@ __zpl_xattr_trusted_set(struct inode *ip, const char *name, } ZPL_XATTR_SET_WRAPPER(zpl_xattr_trusted_set); -xattr_handler_t zpl_xattr_trusted_handler = -{ +static xattr_handler_t zpl_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = zpl_xattr_trusted_list, .get = zpl_xattr_trusted_get, @@ -910,7 +909,7 @@ zpl_xattr_security_init(struct inode *ip, struct inode *dip, /* * Security xattr namespace handlers. */ -xattr_handler_t zpl_xattr_security_handler = { +static xattr_handler_t zpl_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .list = zpl_xattr_security_list, .get = zpl_xattr_security_get, @@ -1333,8 +1332,7 @@ ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_default); * Use .name instead of .prefix when available. xattr_resolve_name will match * whole name and reject anything that has .name only as prefix. */ -xattr_handler_t zpl_xattr_acl_access_handler = -{ +static xattr_handler_t zpl_xattr_acl_access_handler = { #ifdef HAVE_XATTR_HANDLER_NAME .name = XATTR_NAME_POSIX_ACL_ACCESS, #else @@ -1356,8 +1354,7 @@ xattr_handler_t zpl_xattr_acl_access_handler = * Use .name instead of .prefix when available. xattr_resolve_name will match * whole name and reject anything that has .name only as prefix. */ -xattr_handler_t zpl_xattr_acl_default_handler = -{ +static xattr_handler_t zpl_xattr_acl_default_handler = { #ifdef HAVE_XATTR_HANDLER_NAME .name = XATTR_NAME_POSIX_ACL_DEFAULT, #else diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 69479b3f7..cef52e224 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -41,12 +41,12 @@ #include #include -unsigned int zvol_major = ZVOL_MAJOR; -unsigned int zvol_request_sync = 0; -unsigned int zvol_prefetch_bytes = (128 * 1024); -unsigned long zvol_max_discard_blocks = 16384; -unsigned int zvol_threads = 32; -unsigned int zvol_open_timeout_ms = 1000; +static unsigned int zvol_major = ZVOL_MAJOR; +static unsigned int zvol_request_sync = 0; +static unsigned int zvol_prefetch_bytes = (128 * 1024); +static unsigned long zvol_max_discard_blocks = 16384; +static unsigned int zvol_threads = 32; +static const unsigned int zvol_open_timeout_ms = 1000; struct zvol_state_os { struct gendisk *zvo_disk; /* generic disk */ @@ -802,7 +802,7 @@ zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo) return (0); } -static struct block_device_operations zvol_ops = { +static const struct block_device_operations zvol_ops = { .open = zvol_open, .release = zvol_release, .ioctl = zvol_ioctl, diff --git a/module/unicode/u8_textprep.c b/module/unicode/u8_textprep.c index bce5f1962..863f69f7b 100644 --- a/module/unicode/u8_textprep.c +++ b/module/unicode/u8_textprep.c @@ -200,7 +200,7 @@ typedef enum { #define I_ U8_ILLEGAL_CHAR #define O_ U8_OUT_OF_RANGE_CHAR -const int8_t u8_number_of_bytes[0x100] = { +static const int8_t u8_number_of_bytes[0x100] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -238,7 +238,7 @@ const int8_t u8_number_of_bytes[0x100] = { #undef I_ #undef O_ -const uint8_t u8_valid_min_2nd_byte[0x100] = { +static const uint8_t u8_valid_min_2nd_byte[0x100] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -280,7 +280,7 @@ const uint8_t u8_valid_min_2nd_byte[0x100] = { 0, 0, 0, 0, 0, 0, 0, 0, }; -const uint8_t u8_valid_max_2nd_byte[0x100] = { +static const uint8_t u8_valid_max_2nd_byte[0x100] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/module/unicode/uconv.c b/module/unicode/uconv.c index fe84979d0..6854aeab2 100644 --- a/module/unicode/uconv.c +++ b/module/unicode/uconv.c @@ -139,11 +139,7 @@ static const uchar_t remaining_bytes_tbl[0x100] = { * the first byte of a UTF-8 character. Index is remaining bytes at above of * the character. */ -#ifdef _KERNEL -const uchar_t u8_masks_tbl[6] = { 0x00, 0x1f, 0x0f, 0x07, 0x03, 0x01 }; -#else static const uchar_t u8_masks_tbl[6] = { 0x00, 0x1f, 0x0f, 0x07, 0x03, 0x01 }; -#endif /* _KERNEL */ /* * The following two vectors are to provide valid minimum and diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c index c7278fa00..529c52316 100644 --- a/module/zcommon/zfeature_common.c +++ b/module/zcommon/zfeature_common.c @@ -325,7 +325,7 @@ zfeature_register(spa_feature_t fid, const char *guid, const char *name, const struct zfs_mod_supported_features *sfeatures) { zfeature_info_t *feature = &spa_feature_table[fid]; - static spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; + static const spa_feature_t nodeps[] = { SPA_FEATURE_NONE }; ASSERT(name != NULL); ASSERT(desc != NULL); diff --git a/module/zcommon/zfs_comutil.c b/module/zcommon/zfs_comutil.c index 886167759..020e7e86c 100644 --- a/module/zcommon/zfs_comutil.c +++ b/module/zcommon/zfs_comutil.c @@ -158,13 +158,11 @@ static zfs_version_spa_map_t zfs_version_table[] = { int zfs_zpl_version_map(int spa_version) { - int i; int version = -1; - for (i = 0; zfs_version_table[i].version_spa; i++) { + for (int i = 0; zfs_version_table[i].version_spa; i++) if (spa_version >= zfs_version_table[i].version_spa) version = zfs_version_table[i].version_zpl; - } return (version); } @@ -176,22 +174,18 @@ zfs_zpl_version_map(int spa_version) int zfs_spa_version_map(int zpl_version) { - int i; - int version = -1; - - for (i = 0; zfs_version_table[i].version_zpl; i++) { + for (int i = 0; zfs_version_table[i].version_zpl; i++) if (zfs_version_table[i].version_zpl >= zpl_version) return (zfs_version_table[i].version_spa); - } - return (version); + return (-1); } /* * This is the table of legacy internal event names; it should not be modified. * The internal events are now stored in the history log as strings. */ -const char *zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = { +const char *const zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = { "invalid event", "pool create", "vdev add", @@ -243,9 +237,7 @@ zfs_dataset_name_hidden(const char *name) * internal datasets (which have a $ in their name), and * temporary datasets (which have a % in their name). */ - if (strchr(name, '$') != NULL) - return (B_TRUE); - if (strchr(name, '%') != NULL) + if (strpbrk(name, "$%") != NULL) return (B_TRUE); if (!INGLOBALZONE(curproc) && !zone_dataset_visible(name, NULL)) return (B_TRUE); diff --git a/module/zcommon/zfs_deleg.c b/module/zcommon/zfs_deleg.c index e1f5a353b..8a4a6ca86 100644 --- a/module/zcommon/zfs_deleg.c +++ b/module/zcommon/zfs_deleg.c @@ -42,7 +42,7 @@ #include "zfs_deleg.h" #include "zfs_namecheck.h" -zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = { +const zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = { {ZFS_DELEG_PERM_ALLOW}, {ZFS_DELEG_PERM_BOOKMARK}, {ZFS_DELEG_PERM_CLONE}, @@ -89,15 +89,12 @@ zfs_valid_permission_name(const char *perm) const char * zfs_deleg_canonicalize_perm(const char *perm) { - int i; - zfs_prop_t prop; - - for (i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) { + for (int i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) { if (strcmp(perm, zfs_deleg_perm_tab[i].z_perm) == 0) return (perm); } - prop = zfs_name_to_prop(perm); + zfs_prop_t prop = zfs_name_to_prop(perm); if (prop != ZPROP_INVAL && zfs_prop_delegatable(prop)) return (zfs_prop_to_name(prop)); return (NULL); diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c index 800885ee6..36f30859d 100644 --- a/module/zcommon/zfs_prop.c +++ b/module/zcommon/zfs_prop.c @@ -50,7 +50,7 @@ static zprop_desc_t zfs_prop_table[ZFS_NUM_PROPS]; /* Note this is indexed by zfs_userquota_prop_t, keep the order the same */ -const char *zfs_userquota_prop_prefixes[] = { +const char *const zfs_userquota_prop_prefixes[] = { "userused@", "userquota@", "groupused@", diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 46d7788d6..bf019df16 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -352,7 +352,7 @@ static list_t arc_evict_waiters; * can still happen, even during the potentially long time that arc_size is * more than arc_c. */ -int zfs_arc_eviction_pct = 200; +static int zfs_arc_eviction_pct = 200; /* * The number of headers to evict in arc_evict_state_impl() before @@ -361,7 +361,7 @@ int zfs_arc_eviction_pct = 200; * oldest header in the arc state), but comes with higher overhead * (i.e. more invocations of arc_evict_state_impl()). */ -int zfs_arc_evict_batch_limit = 10; +static int zfs_arc_evict_batch_limit = 10; /* number of seconds before growing cache again */ int arc_grow_retry = 5; @@ -369,13 +369,13 @@ int arc_grow_retry = 5; /* * Minimum time between calls to arc_kmem_reap_soon(). */ -int arc_kmem_cache_reap_retry_ms = 1000; +static const int arc_kmem_cache_reap_retry_ms = 1000; /* shift of arc_c for calculating overflow limit in arc_get_data_impl */ -int zfs_arc_overflow_shift = 8; +static int zfs_arc_overflow_shift = 8; /* shift of arc_c for calculating both min and max arc_p */ -int arc_p_min_shift = 4; +static int arc_p_min_shift = 4; /* log2(fraction of arc to reclaim) */ int arc_shrink_shift = 7; @@ -421,19 +421,22 @@ unsigned long zfs_arc_max = 0; unsigned long zfs_arc_min = 0; unsigned long zfs_arc_meta_limit = 0; unsigned long zfs_arc_meta_min = 0; -unsigned long zfs_arc_dnode_limit = 0; -unsigned long zfs_arc_dnode_reduce_percent = 10; -int zfs_arc_grow_retry = 0; -int zfs_arc_shrink_shift = 0; -int zfs_arc_p_min_shift = 0; +static unsigned long zfs_arc_dnode_limit = 0; +static unsigned long zfs_arc_dnode_reduce_percent = 10; +static int zfs_arc_grow_retry = 0; +static int zfs_arc_shrink_shift = 0; +static int zfs_arc_p_min_shift = 0; int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ /* - * ARC dirty data constraints for arc_tempreserve_space() throttle. + * ARC dirty data constraints for arc_tempreserve_space() throttle: + * * total dirty data limit + * * anon block dirty limit + * * each pool's anon allowance */ -unsigned long zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */ -unsigned long zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */ -unsigned long zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */ +static const unsigned long zfs_arc_dirty_limit_percent = 50; +static const unsigned long zfs_arc_anon_limit_percent = 25; +static const unsigned long zfs_arc_pool_dirty_percent = 20; /* * Enable or disable compressed arc buffers. @@ -444,24 +447,24 @@ int zfs_compressed_arc_enabled = B_TRUE; * ARC will evict meta buffers that exceed arc_meta_limit. This * tunable make arc_meta_limit adjustable for different workloads. */ -unsigned long zfs_arc_meta_limit_percent = 75; +static unsigned long zfs_arc_meta_limit_percent = 75; /* * Percentage that can be consumed by dnodes of ARC meta buffers. */ -unsigned long zfs_arc_dnode_limit_percent = 10; +static unsigned long zfs_arc_dnode_limit_percent = 10; /* - * These tunables are Linux specific + * These tunables are Linux-specific */ -unsigned long zfs_arc_sys_free = 0; -int zfs_arc_min_prefetch_ms = 0; -int zfs_arc_min_prescient_prefetch_ms = 0; -int zfs_arc_p_dampener_disable = 1; -int zfs_arc_meta_prune = 10000; -int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; -int zfs_arc_meta_adjust_restarts = 4096; -int zfs_arc_lotsfree_percent = 10; +static unsigned long zfs_arc_sys_free = 0; +static int zfs_arc_min_prefetch_ms = 0; +static int zfs_arc_min_prescient_prefetch_ms = 0; +static int zfs_arc_p_dampener_disable = 1; +static int zfs_arc_meta_prune = 10000; +static int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; +static int zfs_arc_meta_adjust_restarts = 4096; +static int zfs_arc_lotsfree_percent = 10; /* * Number of arc_prune threads @@ -651,7 +654,7 @@ arc_sums_t arc_sums; ARCSTAT(stat) = x; \ } while (0) -kstat_t *arc_ksp; +static kstat_t *arc_ksp; /* * There are several ARC variables that are critical to export as kstats -- @@ -785,7 +788,7 @@ unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ int l2arc_feed_again = B_TRUE; /* turbo warmup */ int l2arc_norw = B_FALSE; /* no reads during writes */ -int l2arc_meta_percent = 33; /* limit on headers size */ +static int l2arc_meta_percent = 33; /* limit on headers size */ /* * L2ARC Internals @@ -886,7 +889,7 @@ int l2arc_exclude_special = 0; * l2arc_mfuonly : A ZFS module parameter that controls whether only MFU * metadata and data are cached from ARC into L2ARC. */ -int l2arc_mfuonly = 0; +static int l2arc_mfuonly = 0; /* * L2ARC TRIM @@ -903,7 +906,7 @@ int l2arc_mfuonly = 0; * will vary depending of how well the specific device handles * these commands. */ -unsigned long l2arc_trim_ahead = 0; +static unsigned long l2arc_trim_ahead = 0; /* * Performance tuning of L2ARC persistence: @@ -918,8 +921,8 @@ unsigned long l2arc_trim_ahead = 0; * data. In this case do not write log blocks in L2ARC in order * not to waste space. */ -int l2arc_rebuild_enabled = B_TRUE; -unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024; +static int l2arc_rebuild_enabled = B_TRUE; +static unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024; /* L2ARC persistence rebuild control routines. */ void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen); @@ -4810,8 +4813,6 @@ arc_kmem_reap_soon(void) size_t i; kmem_cache_t *prev_cache = NULL; kmem_cache_t *prev_data_cache = NULL; - extern kmem_cache_t *zio_buf_cache[]; - extern kmem_cache_t *zio_data_buf_cache[]; #ifdef _KERNEL if ((aggsum_compare(&arc_sums.arcstat_meta_used, diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 8443704c7..96dcb2564 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -55,7 +55,7 @@ #include #include -kstat_t *dbuf_ksp; +static kstat_t *dbuf_ksp; typedef struct dbuf_stats { /* @@ -225,12 +225,12 @@ typedef struct dbuf_cache { dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; /* Size limits for the caches */ -unsigned long dbuf_cache_max_bytes = ULONG_MAX; -unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX; +static unsigned long dbuf_cache_max_bytes = ULONG_MAX; +static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX; /* Set the default sizes of the caches to log2 fraction of arc size */ -int dbuf_cache_shift = 5; -int dbuf_metadata_cache_shift = 6; +static int dbuf_cache_shift = 5; +static int dbuf_metadata_cache_shift = 6; static unsigned long dbuf_cache_target_bytes(void); static unsigned long dbuf_metadata_cache_target_bytes(void); @@ -277,8 +277,8 @@ static unsigned long dbuf_metadata_cache_target_bytes(void); /* * The percentage above and below the maximum cache size. */ -uint_t dbuf_cache_hiwater_pct = 10; -uint_t dbuf_cache_lowater_pct = 10; +static uint_t dbuf_cache_hiwater_pct = 10; +static uint_t dbuf_cache_lowater_pct = 10; static int dbuf_cons(void *vdb, void *unused, int kmflag) diff --git a/module/zfs/ddt_zap.c b/module/zfs/ddt_zap.c index c5c9eda0b..d0127f22e 100644 --- a/module/zfs/ddt_zap.c +++ b/module/zfs/ddt_zap.c @@ -31,8 +31,8 @@ #include #include -int ddt_zap_leaf_blockshift = 12; -int ddt_zap_indirect_blockshift = 12; +static const int ddt_zap_leaf_blockshift = 12; +static const int ddt_zap_indirect_blockshift = 12; static int ddt_zap_create(objset_t *os, uint64_t *objectp, dmu_tx_t *tx, boolean_t prehash) diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index e6f391066..d802ce557 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -62,7 +62,7 @@ /* * Enable/disable nopwrite feature. */ -int zfs_nopwrite_enabled = 1; +static int zfs_nopwrite_enabled = 1; /* * Tunable to control percentage of dirtied L1 blocks from frees allowed into @@ -70,7 +70,7 @@ int zfs_nopwrite_enabled = 1; * will wait until the next TXG. * A value of zero will disable this throttle. */ -unsigned long zfs_per_txg_dirty_frees_percent = 5; +static unsigned long zfs_per_txg_dirty_frees_percent = 5; /* * Enable/disable forcing txg sync when dirty checking for holes with lseek(). @@ -79,14 +79,14 @@ unsigned long zfs_per_txg_dirty_frees_percent = 5; * Disabling this option will result in holes never being reported in dirty * files which is always safe. */ -int zfs_dmu_offset_next_sync = 1; +static int zfs_dmu_offset_next_sync = 1; /* * Limit the amount we can prefetch with one call to this amount. This * helps to limit the amount of memory that can be used by prefetching. * Larger objects should be prefetched a bit at a time. */ -int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; +static int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, @@ -1940,7 +1940,7 @@ dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, * When the "redundant_metadata" property is set to "most", only indirect * blocks of this level and higher will have an additional ditto block. */ -int zfs_redundant_metadata_most_ditto_level = 2; +static const int zfs_redundant_metadata_most_ditto_level = 2; void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index a8975797e..9a74fa9ce 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -78,16 +78,16 @@ krwlock_t os_lock; * datasets. * Default is 4 times the number of leaf vdevs. */ -int dmu_find_threads = 0; +static const int dmu_find_threads = 0; /* * Backfill lower metadnode objects after this many have been freed. * Backfilling negatively impacts object creation rates, so only do it * if there are enough holes to fill. */ -int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT; +static const int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT; -static char *upgrade_tag = "upgrade_tag"; +static const char *upgrade_tag = "upgrade_tag"; static void dmu_objset_find_dp_cb(void *arg); diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index 0ec46bdb4..f132219c9 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -64,12 +64,12 @@ #endif #include -int zfs_recv_queue_length = SPA_MAXBLOCKSIZE; -int zfs_recv_queue_ff = 20; -int zfs_recv_write_batch_size = 1024 * 1024; +static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE; +static int zfs_recv_queue_ff = 20; +static int zfs_recv_write_batch_size = 1024 * 1024; -static char *dmu_recv_tag = "dmu_recv_tag"; -const char *recv_clone_name = "%recv"; +static void *const dmu_recv_tag = "dmu_recv_tag"; +const char *const recv_clone_name = "%recv"; static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len, void *buf); diff --git a/module/zfs/dmu_redact.c b/module/zfs/dmu_redact.c index 7efe423d3..46f498289 100644 --- a/module/zfs/dmu_redact.c +++ b/module/zfs/dmu_redact.c @@ -40,13 +40,14 @@ * This controls the number of entries in the buffer the redaction_list_update * synctask uses to buffer writes to the redaction list. */ -int redact_sync_bufsize = 1024; +static const int redact_sync_bufsize = 1024; /* * Controls how often to update the redaction list when creating a redaction * list. */ -uint64_t redaction_list_update_interval_ns = 1000 * 1000 * 1000ULL; /* NS */ +static const uint64_t redaction_list_update_interval_ns = + 1000 * 1000 * 1000ULL; /* 1s */ /* * This tunable controls the length of the queues that zfs redact worker threads @@ -56,7 +57,7 @@ uint64_t redaction_list_update_interval_ns = 1000 * 1000 * 1000ULL; /* NS */ * available IO resources, or the queues are consuming too much memory, this * variable may need to be decreased. */ -int zfs_redact_queue_length = 1024 * 1024; +static const int zfs_redact_queue_length = 1024 * 1024; /* * These tunables control the fill fraction of the queues by zfs redact. The @@ -65,7 +66,7 @@ int zfs_redact_queue_length = 1024 * 1024; * should be tuned down. If the queues empty before the signalled thread can * catch up, then these should be tuned up. */ -uint64_t zfs_redact_queue_ff = 20; +static const uint64_t zfs_redact_queue_ff = 20; struct redact_record { bqueue_node_t ln; diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 6cff7fd58..fbb1947a5 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -67,7 +67,7 @@ #endif /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ -int zfs_send_corrupt_data = B_FALSE; +static int zfs_send_corrupt_data = B_FALSE; /* * This tunable controls the amount of data (measured in bytes) that will be * prefetched by zfs send. If the main thread is blocking on reads that haven't @@ -75,7 +75,7 @@ int zfs_send_corrupt_data = B_FALSE; * thread is issuing new reads because the prefetches have fallen out of the * cache, this may need to be decreased. */ -int zfs_send_queue_length = SPA_MAXBLOCKSIZE; +static int zfs_send_queue_length = SPA_MAXBLOCKSIZE; /* * This tunable controls the length of the queues that zfs send worker threads * use to communicate. If the send_main_thread is blocking on these queues, @@ -83,7 +83,7 @@ int zfs_send_queue_length = SPA_MAXBLOCKSIZE; * at the start of a send as these threads consume all the available IO * resources, this variable may need to be decreased. */ -int zfs_send_no_prefetch_queue_length = 1024 * 1024; +static int zfs_send_no_prefetch_queue_length = 1024 * 1024; /* * These tunables control the fill fraction of the queues by zfs send. The fill * fraction controls the frequency with which threads have to be cv_signaled. @@ -91,19 +91,19 @@ int zfs_send_no_prefetch_queue_length = 1024 * 1024; * down. If the queues empty before the signalled thread can catch up, then * these should be tuned up. */ -int zfs_send_queue_ff = 20; -int zfs_send_no_prefetch_queue_ff = 20; +static int zfs_send_queue_ff = 20; +static int zfs_send_no_prefetch_queue_ff = 20; /* * Use this to override the recordsize calculation for fast zfs send estimates. */ -int zfs_override_estimate_recordsize = 0; +static int zfs_override_estimate_recordsize = 0; /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ -int zfs_send_set_freerecords_bit = B_TRUE; +static const boolean_t zfs_send_set_freerecords_bit = B_TRUE; /* Set this tunable to FALSE is disable sending unmodified spill blocks. */ -int zfs_send_unmodified_spill_blocks = B_TRUE; +static int zfs_send_unmodified_spill_blocks = B_TRUE; static inline boolean_t overflow_multiply(uint64_t a, uint64_t b, uint64_t *c) diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c index 2f1c2978b..3763c17de 100644 --- a/module/zfs/dmu_traverse.c +++ b/module/zfs/dmu_traverse.c @@ -39,9 +39,9 @@ #include #include -int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ -int32_t send_holes_without_birth_time = 1; -int32_t zfs_traverse_indirect_prefetch_limit = 32; +static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ +static int32_t send_holes_without_birth_time = 1; +static int32_t zfs_traverse_indirect_prefetch_limit = 32; typedef struct prefetch_data { kmutex_t pd_mtx; diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c index 5fa516866..b4735bb7f 100644 --- a/module/zfs/dmu_tx.c +++ b/module/zfs/dmu_tx.c @@ -683,8 +683,7 @@ dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) * If we can't do 10 iops, something is wrong. Let us go ahead * and hit zfs_dirty_data_max. */ -hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ -int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ +static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ /* * We delay transactions when we've determined that the backend storage diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c index f31728eae..fdf0a1759 100644 --- a/module/zfs/dmu_zfetch.c +++ b/module/zfs/dmu_zfetch.c @@ -43,12 +43,12 @@ * so it can't hurt performance. */ -int zfs_prefetch_disable = B_FALSE; +static int zfs_prefetch_disable = B_FALSE; /* max # of streams per zfetch */ -unsigned int zfetch_max_streams = 8; +static unsigned int zfetch_max_streams = 8; /* min time before stream reclaim */ -unsigned int zfetch_min_sec_reap = 2; +static unsigned int zfetch_min_sec_reap = 2; /* max bytes to prefetch per stream (default 8MB) */ unsigned int zfetch_max_distance = 8 * 1024 * 1024; /* max bytes to prefetch indirects for per stream (default 64MB) */ @@ -83,7 +83,7 @@ struct { wmsum_add(&zfetch_sums.stat, val) -kstat_t *zfetch_ksp; +static kstat_t *zfetch_ksp; static int zfetch_kstats_update(kstat_t *ksp, int rw) diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 4623bcec4..115f3df5d 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -79,7 +79,7 @@ * of this setting. */ int zfs_max_recordsize = 1 * 1024 * 1024; -int zfs_allow_redacted_dataset_mount = 0; +static int zfs_allow_redacted_dataset_mount = 0; #define SWITCH64(x, y) \ { \ @@ -896,14 +896,14 @@ dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags, * and accessed. */ void -dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag) +dsl_dataset_long_hold(dsl_dataset_t *ds, const void *tag) { ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool)); (void) zfs_refcount_add(&ds->ds_longholds, tag); } void -dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag) +dsl_dataset_long_rele(dsl_dataset_t *ds, const void *tag) { (void) zfs_refcount_remove(&ds->ds_longholds, tag); } diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 5d1522a7b..ef7e9d2d2 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -117,7 +117,7 @@ unsigned long zfs_wrlog_data_max = 0; * zfs_dirty_data_max), push out a txg. This should be less than * zfs_vdev_async_write_active_min_dirty_percent. */ -int zfs_dirty_data_sync_percent = 20; +static int zfs_dirty_data_sync_percent = 20; /* * Once there is this amount of dirty data, the dmu_tx_delay() will kick in @@ -144,7 +144,7 @@ unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; /* * This determines the number of threads used by the dp_sync_taskq. */ -int zfs_sync_taskq_batch_pct = 75; +static int zfs_sync_taskq_batch_pct = 75; /* * These tunables determine the behavior of how zil_itxg_clean() is @@ -172,9 +172,9 @@ int zfs_sync_taskq_batch_pct = 75; * Additionally, the number of threads used by the taskq can be * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. */ -int zfs_zil_clean_taskq_nthr_pct = 100; -int zfs_zil_clean_taskq_minalloc = 1024; -int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; +static int zfs_zil_clean_taskq_nthr_pct = 100; +static int zfs_zil_clean_taskq_minalloc = 1024; +static int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 859a865dd..677d320ed 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -136,7 +136,7 @@ extern int zfs_vdev_async_write_active_min_dirty_percent; * this value can be set to 1 to enable checking before scanning each * block. */ -int zfs_scan_strict_mem_lim = B_FALSE; +static int zfs_scan_strict_mem_lim = B_FALSE; /* * Maximum number of parallelly executed bytes per leaf vdev. We attempt @@ -146,41 +146,42 @@ int zfs_scan_strict_mem_lim = B_FALSE; * overload the drives with I/O, since that is protected by * zfs_vdev_scrub_max_active. */ -unsigned long zfs_scan_vdev_limit = 4 << 20; +static unsigned long zfs_scan_vdev_limit = 4 << 20; -int zfs_scan_issue_strategy = 0; -int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ -unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ +static int zfs_scan_issue_strategy = 0; +static int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ +static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ /* * fill_weight is non-tunable at runtime, so we copy it at module init from * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would * break queue sorting. */ -int zfs_scan_fill_weight = 3; +static int zfs_scan_fill_weight = 3; static uint64_t fill_weight; /* See dsl_scan_should_clear() for details on the memory limit tunables */ -uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ -uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ -int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ -int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ +static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ +static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ +static int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ +static int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ -int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ -int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ -int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ -int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ -int zfs_scan_checkpoint_intval = 7200; /* in seconds */ +static int zfs_scrub_min_time_ms = 1000; /* min millis to scrub per txg */ +static int zfs_obsolete_min_time_ms = 500; /* min millis to obsolete per txg */ +static int zfs_free_min_time_ms = 1000; /* min millis to free per txg */ +static int zfs_resilver_min_time_ms = 3000; /* min millis to resilver per txg */ +static int zfs_scan_checkpoint_intval = 7200; /* in seconds */ int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ -int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ -int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ -enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; +static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ +static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ +static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; /* max number of blocks to free in a single TXG */ -unsigned long zfs_async_block_max_blocks = ULONG_MAX; +static unsigned long zfs_async_block_max_blocks = ULONG_MAX; /* max number of dedup blocks to free in a single TXG */ -unsigned long zfs_max_async_dedup_frees = 100000; +static unsigned long zfs_max_async_dedup_frees = 100000; -int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */ +/* set to disable resilver deferring */ +static int zfs_resilver_disable_defer = B_FALSE; /* * We wait a few txgs after importing a pool to begin scanning so that @@ -201,7 +202,7 @@ int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */ /* * Enable/disable the processing of the free_bpobj object. */ -int zfs_free_bpobj_enabled = 1; +static int zfs_free_bpobj_enabled = 1; /* the order has to match pool_scan_type */ static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { diff --git a/module/zfs/fm.c b/module/zfs/fm.c index f4b001b27..5a52a881c 100644 --- a/module/zfs/fm.c +++ b/module/zfs/fm.c @@ -68,7 +68,7 @@ #include #include -int zfs_zevent_len_max = 512; +static int zfs_zevent_len_max = 512; static int zevent_len_cur = 0; static int zevent_waiters = 0; @@ -497,7 +497,7 @@ i_fm_free(nv_alloc_t *nva, void *buf, size_t size) kmem_free(buf, size); } -const nv_alloc_ops_t fm_mem_alloc_ops = { +static const nv_alloc_ops_t fm_mem_alloc_ops = { .nv_ao_init = NULL, .nv_ao_fini = NULL, .nv_ao_alloc = i_fm_alloc, diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 9e216c38d..145f79fae 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -51,7 +51,7 @@ * operation, we will try to write this amount of data to a top-level vdev * before moving on to the next one. */ -unsigned long metaslab_aliquot = 512 << 10; +static unsigned long metaslab_aliquot = 512 << 10; /* * For testing, make some blocks above a certain size be gang blocks. @@ -96,7 +96,7 @@ int zfs_condense_pct = 200; * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold * blocks. */ -int zfs_metaslab_condense_block_threshold = 4; +static const int zfs_metaslab_condense_block_threshold = 4; /* * The zfs_mg_noalloc_threshold defines which metaslab groups should @@ -111,7 +111,7 @@ int zfs_metaslab_condense_block_threshold = 4; * eligible to allocate on any metaslab group. The default value of 0 means * no metaslab group will be excluded based on this criterion. */ -int zfs_mg_noalloc_threshold = 0; +static int zfs_mg_noalloc_threshold = 0; /* * Metaslab groups are considered eligible for allocations if their @@ -135,7 +135,7 @@ int zfs_mg_noalloc_threshold = 0; * enough to avoid hitting the speed bump on pools that are being pushed * to the edge. */ -int zfs_mg_fragmentation_threshold = 95; +static int zfs_mg_fragmentation_threshold = 95; /* * Allow metaslabs to keep their active state as long as their fragmentation @@ -143,17 +143,17 @@ int zfs_mg_fragmentation_threshold = 95; * active metaslab that exceeds this threshold will no longer keep its active * status allowing better metaslabs to be selected. */ -int zfs_metaslab_fragmentation_threshold = 70; +static int zfs_metaslab_fragmentation_threshold = 70; /* * When set will load all metaslabs when pool is first opened. */ -int metaslab_debug_load = 0; +int metaslab_debug_load = B_FALSE; /* * When set will prevent metaslabs from being unloaded. */ -int metaslab_debug_unload = 0; +static int metaslab_debug_unload = B_FALSE; /* * Minimum size which forces the dynamic allocator to change @@ -184,14 +184,14 @@ int metaslab_df_free_pct = 4; * With the default setting of 16MB this is 16*1024 (with ashift=9) or * 2048 (with ashift=12). */ -int metaslab_df_max_search = 16 * 1024 * 1024; +static int metaslab_df_max_search = 16 * 1024 * 1024; /* * Forces the metaslab_block_picker function to search for at least this many * segments forwards until giving up on finding a segment that the allocation * will fit into. */ -uint32_t metaslab_min_search_count = 100; +static const uint32_t metaslab_min_search_count = 100; /* * If we are not searching forward (due to metaslab_df_max_search, @@ -200,7 +200,7 @@ uint32_t metaslab_min_search_count = 100; * segment. If it is not set, we will use a segment of exactly the requested * size (or larger). */ -int metaslab_df_use_largest_segment = B_FALSE; +static int metaslab_df_use_largest_segment = B_FALSE; /* * Percentage of all cpus that can be used by the metaslab taskq. @@ -215,8 +215,8 @@ int metaslab_load_pct = 50; * unloaded sooner. These settings are intended to be generous -- to keep * metaslabs loaded for a long time, reducing the rate of metaslab loading. */ -int metaslab_unload_delay = 32; -int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ +static int metaslab_unload_delay = 32; +static int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ /* * Max number of metaslabs per group to preload. @@ -226,45 +226,45 @@ int metaslab_preload_limit = 10; /* * Enable/disable preloading of metaslab. */ -int metaslab_preload_enabled = B_TRUE; +static int metaslab_preload_enabled = B_TRUE; /* * Enable/disable fragmentation weighting on metaslabs. */ -int metaslab_fragmentation_factor_enabled = B_TRUE; +static int metaslab_fragmentation_factor_enabled = B_TRUE; /* * Enable/disable lba weighting (i.e. outer tracks are given preference). */ -int metaslab_lba_weighting_enabled = B_TRUE; +static int metaslab_lba_weighting_enabled = B_TRUE; /* * Enable/disable metaslab group biasing. */ -int metaslab_bias_enabled = B_TRUE; +static int metaslab_bias_enabled = B_TRUE; /* * Enable/disable remapping of indirect DVAs to their concrete vdevs. */ -boolean_t zfs_remap_blkptr_enable = B_TRUE; +static const boolean_t zfs_remap_blkptr_enable = B_TRUE; /* * Enable/disable segment-based metaslab selection. */ -int zfs_metaslab_segment_weight_enabled = B_TRUE; +static int zfs_metaslab_segment_weight_enabled = B_TRUE; /* * When using segment-based metaslab selection, we will continue * allocating from the active metaslab until we have exhausted * zfs_metaslab_switch_threshold of its buckets. */ -int zfs_metaslab_switch_threshold = 2; +static int zfs_metaslab_switch_threshold = 2; /* * Internal switch to enable/disable the metaslab allocation tracing * facility. */ -boolean_t metaslab_trace_enabled = B_FALSE; +static const boolean_t metaslab_trace_enabled = B_FALSE; /* * Maximum entries that the metaslab allocation tracing facility will keep @@ -274,32 +274,32 @@ boolean_t metaslab_trace_enabled = B_FALSE; * to every exceed this value. In debug mode, the system will panic if this * limit is ever reached allowing for further investigation. */ -uint64_t metaslab_trace_max_entries = 5000; +static const uint64_t metaslab_trace_max_entries = 5000; /* * Maximum number of metaslabs per group that can be disabled * simultaneously. */ -int max_disabled_ms = 3; +static const int max_disabled_ms = 3; /* * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. * To avoid 64-bit overflow, don't set above UINT32_MAX. */ -unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */ +static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ /* * Maximum percentage of memory to use on storing loaded metaslabs. If loading * a metaslab would take it over this percentage, the oldest selected metaslab * is automatically unloaded. */ -int zfs_metaslab_mem_limit = 25; +static int zfs_metaslab_mem_limit = 25; /* * Force the per-metaslab range trees to use 64-bit integers to store * segments. Used for debugging purposes. */ -boolean_t zfs_metaslab_force_large_segs = B_FALSE; +static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; /* * By default we only store segments over a certain size in the size-sorted @@ -308,7 +308,7 @@ boolean_t zfs_metaslab_force_large_segs = B_FALSE; * improves load and unload times at the cost of causing us to use slightly * larger segments than we would otherwise in some cases. */ -uint32_t metaslab_by_size_min_shift = 14; +static const uint32_t metaslab_by_size_min_shift = 14; /* * If not set, we will first try normal allocation. If that fails then @@ -321,7 +321,7 @@ uint32_t metaslab_by_size_min_shift = 14; * allocation. If that fails we will do a "try hard" gang allocation. If * that fails then we will have a multi-layer gang block. */ -int zfs_metaslab_try_hard_before_gang = B_FALSE; +static int zfs_metaslab_try_hard_before_gang = B_FALSE; /* * When not trying hard, we only consider the best zfs_metaslab_find_max_tries @@ -337,7 +337,7 @@ int zfs_metaslab_try_hard_before_gang = B_FALSE; * subsequent metaslab has ms_max_size >60KB (but fewer segments in this * bucket, and therefore a lower weight). */ -int zfs_metaslab_find_max_tries = 100; +static int zfs_metaslab_find_max_tries = 100; static uint64_t metaslab_weight(metaslab_t *, boolean_t); static void metaslab_set_fragmentation(metaslab_t *, boolean_t); @@ -370,7 +370,7 @@ static metaslab_stats_t metaslab_stats = { atomic_inc_64(&metaslab_stats.stat.value.ui64); -kstat_t *metaslab_ksp; +static kstat_t *metaslab_ksp; void metaslab_stat_init(void) @@ -406,7 +406,7 @@ metaslab_stat_fini(void) * ========================================================================== */ metaslab_class_t * -metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) +metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) { metaslab_class_t *mc; @@ -1478,7 +1478,7 @@ metaslab_rt_vacate(range_tree_t *rt, void *arg) metaslab_rt_create(rt, arg); } -static range_tree_ops_t metaslab_rt_ops = { +static const range_tree_ops_t metaslab_rt_ops = { .rtop_create = metaslab_rt_create, .rtop_destroy = metaslab_rt_destroy, .rtop_add = metaslab_rt_add, @@ -1712,11 +1712,9 @@ metaslab_df_alloc(metaslab_t *msp, uint64_t size) return (offset); } -static metaslab_ops_t metaslab_df_ops = { +const metaslab_ops_t zfs_metaslab_ops = { metaslab_df_alloc }; - -metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; #endif /* WITH_DF_BLOCK_ALLOCATOR */ #if defined(WITH_CF_BLOCK_ALLOCATOR) @@ -1762,11 +1760,9 @@ metaslab_cf_alloc(metaslab_t *msp, uint64_t size) return (offset); } -static metaslab_ops_t metaslab_cf_ops = { +const metaslab_ops_t zfs_metaslab_ops = { metaslab_cf_alloc }; - -metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops; #endif /* WITH_CF_BLOCK_ALLOCATOR */ #if defined(WITH_NDF_BLOCK_ALLOCATOR) @@ -1826,11 +1822,9 @@ metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) return (-1ULL); } -static metaslab_ops_t metaslab_ndf_ops = { +const metaslab_ops_t zfs_metaslab_ops = { metaslab_ndf_alloc }; - -metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops; #endif /* WITH_NDF_BLOCK_ALLOCATOR */ @@ -2237,7 +2231,7 @@ metaslab_potentially_evict(metaslab_class_t *mc) } } #else - (void) mc; + (void) mc, (void) zfs_metaslab_mem_limit; #endif } @@ -2855,7 +2849,7 @@ metaslab_fini(metaslab_t *msp) * of the table. Since the fragmentation value is never stored on disk, it * is possible to change these calculations in the future. */ -int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { +static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 100, /* 512B */ 100, /* 1K */ 98, /* 2K */ diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index f67a4eb22..abdce3a32 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -186,7 +186,7 @@ uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS; */ uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS; -char *mmp_tag = "mmp_write_uberblock"; +static void *const mmp_tag = "mmp_write_uberblock"; static void mmp_thread(void *arg); void diff --git a/module/zfs/range_tree.c b/module/zfs/range_tree.c index 67910f9ff..f9fb97476 100644 --- a/module/zfs/range_tree.c +++ b/module/zfs/range_tree.c @@ -188,8 +188,8 @@ range_tree_seg_gap_compare(const void *x1, const void *x2) } range_tree_t * -range_tree_create_impl(range_tree_ops_t *ops, range_seg_type_t type, void *arg, - uint64_t start, uint64_t shift, +range_tree_create_impl(const range_tree_ops_t *ops, range_seg_type_t type, + void *arg, uint64_t start, uint64_t shift, int (*zfs_btree_compare) (const void *, const void *), uint64_t gap) { @@ -232,7 +232,7 @@ range_tree_create_impl(range_tree_ops_t *ops, range_seg_type_t type, void *arg, } range_tree_t * -range_tree_create(range_tree_ops_t *ops, range_seg_type_t type, +range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type, void *arg, uint64_t start, uint64_t shift) { return (range_tree_create_impl(ops, type, arg, start, shift, NULL, 0)); @@ -801,7 +801,7 @@ rt_btree_vacate(range_tree_t *rt, void *arg) rt_btree_create(rt, arg); } -range_tree_ops_t rt_btree_ops = { +const range_tree_ops_t rt_btree_ops = { .rtop_create = rt_btree_create, .rtop_destroy = rt_btree_destroy, .rtop_add = rt_btree_add, diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c index 35a379dde..390b2fded 100644 --- a/module/zfs/refcount.c +++ b/module/zfs/refcount.c @@ -26,15 +26,15 @@ #include #include +#ifdef ZFS_DEBUG /* * Reference count tracking is disabled by default. It's memory requirements * are reasonable, however as implemented it consumes a significant amount of * cpu time. Until its performance is improved it should be manually enabled. */ -int reference_tracking_enable = FALSE; -int reference_history = 3; /* tunable */ +int reference_tracking_enable = B_FALSE; +static int reference_history = 3; /* tunable */ -#ifdef ZFS_DEBUG static kmem_cache_t *reference_cache; static kmem_cache_t *reference_history_cache; @@ -327,10 +327,10 @@ EXPORT_SYMBOL(zfs_refcount_remove); EXPORT_SYMBOL(zfs_refcount_held); /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs, ,reference_tracking_enable, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW, "Track reference holders to refcount_t objects"); -ZFS_MODULE_PARAM(zfs, ,reference_history, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, , reference_history, INT, ZMOD_RW, "Maximum reference holders being tracked"); /* END CSTYLED */ #endif /* ZFS_DEBUG */ diff --git a/module/zfs/sa.c b/module/zfs/sa.c index b69b0c68f..a078af159 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -141,7 +141,7 @@ static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr, sa_data_op_t action, sa_data_locator_t *locator, void *datastart, uint16_t buflen, dmu_tx_t *tx); -arc_byteswap_func_t sa_bswap_table[] = { +static const arc_byteswap_func_t sa_bswap_table[] = { byteswap_uint64_array, byteswap_uint32_array, byteswap_uint16_array, @@ -178,7 +178,7 @@ do { \ * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will * use this static table. */ -sa_attr_reg_t sa_legacy_attrs[] = { +static const sa_attr_reg_t sa_legacy_attrs[] = { {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0}, {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1}, {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2}, @@ -200,16 +200,16 @@ sa_attr_reg_t sa_legacy_attrs[] = { /* * This is only used for objects of type DMU_OT_ZNODE */ -sa_attr_type_t sa_legacy_zpl_layout[] = { +static const sa_attr_type_t sa_legacy_zpl_layout[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; /* * Special dummy layout used for buffers with no attributes. */ -sa_attr_type_t sa_dummy_zpl_layout[] = { 0 }; +static const sa_attr_type_t sa_dummy_zpl_layout[] = { 0 }; -static int sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs); +static const size_t sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs); static kmem_cache_t *sa_cache = NULL; static int @@ -285,12 +285,11 @@ sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count) #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF]) static uint64_t -sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count) +sa_layout_info_hash(const sa_attr_type_t *attrs, int attr_count) { - int i; uint64_t crc = -1ULL; - for (i = 0; i != attr_count; i++) + for (int i = 0; i != attr_count; i++) crc ^= SA_ATTR_HASH(attrs[i]); return (crc); @@ -402,7 +401,7 @@ sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count, } static sa_lot_t * -sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count, +sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count, uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx) { sa_os_t *sa = os->os_sa; @@ -831,7 +830,7 @@ sa_free_attr_table(sa_os_t *sa) } static int -sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count) +sa_attr_table_setup(objset_t *os, const sa_attr_reg_t *reg_attrs, int count) { sa_os_t *sa = os->os_sa; uint64_t sa_attr_count = 0; @@ -992,8 +991,8 @@ bail: } int -sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count, - sa_attr_type_t **user_table) +sa_setup(objset_t *os, uint64_t sa_obj, const sa_attr_reg_t *reg_attrs, + int count, sa_attr_type_t **user_table) { zap_cursor_t zc; zap_attribute_t za; diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 3c0316990..01af91b7b 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -150,7 +150,7 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that * need to be handled with minimum delay. */ -const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { +static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ { ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */ @@ -167,12 +167,12 @@ static boolean_t spa_has_active_shared_spare(spa_t *spa); static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport); static void spa_vdev_resilver_done(spa_t *spa); -uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ -uint_t zio_taskq_batch_tpq; /* threads per taskq */ -boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ -uint_t zio_taskq_basedc = 80; /* base duty cycle */ +static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */ +static uint_t zio_taskq_batch_tpq; /* threads per taskq */ +static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ +static const uint_t zio_taskq_basedc = 80; /* base duty cycle */ -boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ +static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */ /* * Report any spa_load_verify errors found, but do not fail spa_load. @@ -195,7 +195,7 @@ boolean_t spa_mode_readable_spacemaps = B_FALSE; /* * For debugging purposes: print out vdev tree during pool import. */ -int spa_load_print_vdev_tree = B_FALSE; +static int spa_load_print_vdev_tree = B_FALSE; /* * A non-zero value for zfs_max_missing_tvds means that we allow importing @@ -244,28 +244,28 @@ uint64_t zfs_max_missing_tvds_scan = 0; /* * Debugging aid that pauses spa_sync() towards the end. */ -boolean_t zfs_pause_spa_sync = B_FALSE; +static const boolean_t zfs_pause_spa_sync = B_FALSE; /* * Variables to indicate the livelist condense zthr func should wait at certain * points for the livelist to be removed - used to test condense/destroy races */ -int zfs_livelist_condense_zthr_pause = 0; -int zfs_livelist_condense_sync_pause = 0; +static int zfs_livelist_condense_zthr_pause = 0; +static int zfs_livelist_condense_sync_pause = 0; /* * Variables to track whether or not condense cancellation has been * triggered in testing. */ -int zfs_livelist_condense_sync_cancel = 0; -int zfs_livelist_condense_zthr_cancel = 0; +static int zfs_livelist_condense_sync_cancel = 0; +static int zfs_livelist_condense_zthr_cancel = 0; /* * Variable to track whether or not extra ALLOC blkptrs were added to a * livelist entry while it was being condensed (caused by the way we track * remapped blkptrs in dbuf_remap_impl) */ -int zfs_livelist_condense_new_alloc = 0; +static int zfs_livelist_condense_new_alloc = 0; /* * ========================================================================== @@ -1048,6 +1048,7 @@ spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) if (batch) flags |= TASKQ_DC_BATCH; + (void) zio_taskq_basedc; tq = taskq_create_sysdc(name, value, 50, INT_MAX, spa->spa_proc, zio_taskq_basedc, flags); } else { @@ -1249,12 +1250,12 @@ spa_activate(spa_t *spa, spa_mode_t mode) spa->spa_mode = mode; spa->spa_read_spacemaps = spa_mode_readable_spacemaps; - spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); - spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); + spa->spa_normal_class = metaslab_class_create(spa, &zfs_metaslab_ops); + spa->spa_log_class = metaslab_class_create(spa, &zfs_metaslab_ops); spa->spa_embedded_log_class = - metaslab_class_create(spa, zfs_metaslab_ops); - spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops); - spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops); + metaslab_class_create(spa, &zfs_metaslab_ops); + spa->spa_special_class = metaslab_class_create(spa, &zfs_metaslab_ops); + spa->spa_dedup_class = metaslab_class_create(spa, &zfs_metaslab_ops); /* Try to create a covering process */ mutex_enter(&spa->spa_proc_lock); @@ -1262,6 +1263,7 @@ spa_activate(spa_t *spa, spa_mode_t mode) ASSERT(spa->spa_proc == &p0); spa->spa_did = 0; + (void) spa_create_process; #ifdef HAVE_SPA_THREAD /* Only create a process if we're going to be around a while. */ if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { @@ -2281,9 +2283,9 @@ spa_load_verify_done(zio_t *zio) * Maximum number of inflight bytes is the log2 fraction of the arc size. * By default, we set it to 1/16th of the arc. */ -int spa_load_verify_shift = 4; -int spa_load_verify_metadata = B_TRUE; -int spa_load_verify_data = B_TRUE; +static int spa_load_verify_shift = 4; +static int spa_load_verify_metadata = B_TRUE; +static int spa_load_verify_data = B_TRUE; static int spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, diff --git a/module/zfs/spa_checkpoint.c b/module/zfs/spa_checkpoint.c index ddcdb6801..68c3ae2e0 100644 --- a/module/zfs/spa_checkpoint.c +++ b/module/zfs/spa_checkpoint.c @@ -158,7 +158,7 @@ * amount of checkpointed data that has been freed within them while * the pool had a checkpoint. */ -unsigned long zfs_spa_discard_memory_limit = 16 * 1024 * 1024; +static unsigned long zfs_spa_discard_memory_limit = 16 * 1024 * 1024; int spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs) diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c index ad82932ce..b9c6ac042 100644 --- a/module/zfs/spa_config.c +++ b/module/zfs/spa_config.c @@ -68,7 +68,9 @@ static uint64_t spa_config_generation = 1; * userland pools when doing testing. */ char *spa_config_path = ZPOOL_CACHE; -int zfs_autoimport_disable = 1; +#ifdef _KERNEL +static int zfs_autoimport_disable = B_TRUE; +#endif /* * Called when the module is first loaded, this routine loads the configuration diff --git a/module/zfs/spa_log_spacemap.c b/module/zfs/spa_log_spacemap.c index 6fd302b8d..341917a6d 100644 --- a/module/zfs/spa_log_spacemap.c +++ b/module/zfs/spa_log_spacemap.c @@ -177,7 +177,7 @@ * block size as we expect to be writing a lot of data to them at * once. */ -unsigned long zfs_log_sm_blksz = 1ULL << 17; +static const unsigned long zfs_log_sm_blksz = 1ULL << 17; /* * Percentage of the overall system's memory that ZFS allows to be @@ -188,13 +188,13 @@ unsigned long zfs_log_sm_blksz = 1ULL << 17; * (thus the _ppm suffix; reads as "parts per million"). As an example, * the default of 1000 allows 0.1% of memory to be used. */ -unsigned long zfs_unflushed_max_mem_ppm = 1000; +static unsigned long zfs_unflushed_max_mem_ppm = 1000; /* * Specific hard-limit in memory that ZFS allows to be used for * unflushed changes. */ -unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30; +static unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30; /* * The following tunable determines the number of blocks that can be used for @@ -243,28 +243,28 @@ unsigned long zfs_unflushed_max_mem_amt = 1ULL << 30; * provide upper and lower bounds for the log block limit. * [see zfs_unflushed_log_block_{min,max}] */ -unsigned long zfs_unflushed_log_block_pct = 400; +static unsigned long zfs_unflushed_log_block_pct = 400; /* * If the number of metaslabs is small and our incoming rate is high, we could * get into a situation that we are flushing all our metaslabs every TXG. Thus * we always allow at least this many log blocks. */ -unsigned long zfs_unflushed_log_block_min = 1000; +static unsigned long zfs_unflushed_log_block_min = 1000; /* * If the log becomes too big, the import time of the pool can take a hit in * terms of performance. Thus we have a hard limit in the size of the log in * terms of blocks. */ -unsigned long zfs_unflushed_log_block_max = (1ULL << 18); +static unsigned long zfs_unflushed_log_block_max = (1ULL << 18); /* * Max # of rows allowed for the log_summary. The tradeoff here is accuracy and * stability of the flushing algorithm (longer summary) vs its runtime overhead * (smaller summary is faster to traverse). */ -unsigned long zfs_max_logsm_summary_length = 10; +static unsigned long zfs_max_logsm_summary_length = 10; /* * Tunable that sets the lower bound on the metaslabs to flush every TXG. @@ -277,7 +277,7 @@ unsigned long zfs_max_logsm_summary_length = 10; * The point of this tunable is to be used in extreme cases where we really * want to flush more metaslabs than our adaptable heuristic plans to flush. */ -unsigned long zfs_min_metaslabs_to_flush = 1; +static unsigned long zfs_min_metaslabs_to_flush = 1; /* * Tunable that specifies how far in the past do we want to look when trying to @@ -288,7 +288,7 @@ unsigned long zfs_min_metaslabs_to_flush = 1; * average over all the blocks that we walk * [see spa_estimate_incoming_log_blocks]. */ -unsigned long zfs_max_log_walking = 5; +static unsigned long zfs_max_log_walking = 5; /* * This tunable exists solely for testing purposes. It ensures that the log diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 6ab602deb..a04766e7e 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -235,14 +235,13 @@ static avl_tree_t spa_namespace_avl; kmutex_t spa_namespace_lock; static kcondvar_t spa_namespace_cv; -int spa_max_replication_override = SPA_DVAS_PER_BP; +static const int spa_max_replication_override = SPA_DVAS_PER_BP; static kmutex_t spa_spare_lock; static avl_tree_t spa_spare_avl; static kmutex_t spa_l2cache_lock; static avl_tree_t spa_l2cache_avl; -kmem_cache_t *spa_buffer_pool; spa_mode_t spa_mode_global = SPA_MODE_UNINIT; #ifdef ZFS_DEBUG @@ -304,25 +303,25 @@ int zfs_free_leak_on_eio = B_FALSE; * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting * in one of three behaviors controlled by zfs_deadman_failmode. */ -unsigned long zfs_deadman_synctime_ms = 600000UL; +unsigned long zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ /* * This value controls the maximum amount of time zio_wait() will block for an * outstanding IO. By default this is 300 seconds at which point the "hung" * behavior will be applied as described for zfs_deadman_synctime_ms. */ -unsigned long zfs_deadman_ziotime_ms = 300000UL; +unsigned long zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ /* * Check time in milliseconds. This defines the frequency at which we check * for hung I/O. */ -unsigned long zfs_deadman_checktime_ms = 60000UL; +unsigned long zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ /* * By default the deadman is enabled. */ -int zfs_deadman_enabled = 1; +int zfs_deadman_enabled = B_TRUE; /* * Controls the behavior of the deadman when it detects a "hung" I/O. @@ -332,7 +331,7 @@ int zfs_deadman_enabled = 1; * continue - Attempt to recover from a "hung" I/O * panic - Panic the system */ -char *zfs_deadman_failmode = "wait"; +const char *zfs_deadman_failmode = "wait"; /* * The worst case is single-sector max-parity RAID-Z blocks, in which @@ -384,9 +383,9 @@ int spa_asize_inflation = 24; * See also the comments in zfs_space_check_t. */ int spa_slop_shift = 5; -uint64_t spa_min_slop = 128ULL * 1024 * 1024; -uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; -int spa_allocators = 4; +static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; +static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; +static const int spa_allocators = 4; void @@ -420,15 +419,15 @@ spa_load_note(spa_t *spa, const char *fmt, ...) /* * By default dedup and user data indirects land in the special class */ -int zfs_ddt_data_is_special = B_TRUE; -int zfs_user_indirect_is_special = B_TRUE; +static int zfs_ddt_data_is_special = B_TRUE; +static int zfs_user_indirect_is_special = B_TRUE; /* * The percentage of special class final space reserved for metadata only. * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only * let metadata into the class. */ -int zfs_special_class_metadata_reserve_pct = 25; +static int zfs_special_class_metadata_reserve_pct = 25; /* * ========================================================================== diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index 534ac72fe..d89f79480 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -28,22 +28,22 @@ /* * Keeps stats on last N reads per spa_t, disabled by default. */ -int zfs_read_history = 0; +static int zfs_read_history = B_FALSE; /* * Include cache hits in history, disabled by default. */ -int zfs_read_history_hits = 0; +static int zfs_read_history_hits = B_FALSE; /* * Keeps stats on the last 100 txgs by default. */ -int zfs_txg_history = 100; +static int zfs_txg_history = 100; /* * Keeps stats on the last N MMP updates, disabled by default. */ -int zfs_multihost_history = 0; +int zfs_multihost_history = B_FALSE; /* * ========================================================================== @@ -830,7 +830,7 @@ spa_health_destroy(spa_t *spa) mutex_destroy(&shk->lock); } -static spa_iostats_t spa_iostats_template = { +static const spa_iostats_t spa_iostats_template = { { "trim_extents_written", KSTAT_DATA_UINT64 }, { "trim_bytes_written", KSTAT_DATA_UINT64 }, { "trim_extents_skipped", KSTAT_DATA_UINT64 }, diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 451d1b9a0..5481902f2 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -81,22 +81,22 @@ * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced * (by more than 1<vdev_op_type, type) == 0) diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c index 35ed1a335..b6e680334 100644 --- a/module/zfs/vdev_cache.c +++ b/module/zfs/vdev_cache.c @@ -83,13 +83,13 @@ * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11 * has made these same changes. */ -int zfs_vdev_cache_max = 1<<14; /* 16KB */ -int zfs_vdev_cache_size = 0; -int zfs_vdev_cache_bshift = 16; +static int zfs_vdev_cache_max = 1<<14; /* 16KB */ +static int zfs_vdev_cache_size = 0; +static int zfs_vdev_cache_bshift = 16; #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ -kstat_t *vdc_ksp = NULL; +static kstat_t *vdc_ksp = NULL; typedef struct vdc_stats { kstat_named_t vdc_stat_delegations; diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c index 8762855d4..aa4801e67 100644 --- a/module/zfs/vdev_indirect.c +++ b/module/zfs/vdev_indirect.c @@ -172,7 +172,7 @@ * object. */ -int zfs_condense_indirect_vdevs_enable = B_TRUE; +static int zfs_condense_indirect_vdevs_enable = B_TRUE; /* * Condense if at least this percent of the bytes in the mapping is @@ -181,7 +181,7 @@ int zfs_condense_indirect_vdevs_enable = B_TRUE; * condenses. Higher values will condense less often (causing less * i/o); lower values will reduce the mapping size more quickly. */ -int zfs_condense_indirect_obsolete_pct = 25; +static int zfs_condense_indirect_obsolete_pct = 25; /* * Condense if the obsolete space map takes up more than this amount of @@ -189,14 +189,14 @@ int zfs_condense_indirect_obsolete_pct = 25; * consumed by the obsolete space map; the default of 1GB is small enough * that we typically don't mind "wasting" it. */ -unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; +static unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024; /* * Don't bother condensing if the mapping uses less than this amount of * memory. The default of 128KB is considered a "trivial" amount of * memory and not worth reducing. */ -unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; +static unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; /* * This is used by the test suite so that it can ensure that certain @@ -204,7 +204,7 @@ unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; * complete too quickly). If used to reduce the performance impact of * condensing in production, a maximum value of 1 should be sufficient. */ -int zfs_condense_indirect_commit_entry_delay_ms = 0; +static int zfs_condense_indirect_commit_entry_delay_ms = 0; /* * If an indirect split block contains more than this many possible unique diff --git a/module/zfs/vdev_initialize.c b/module/zfs/vdev_initialize.c index 6ffd0d618..eda71faea 100644 --- a/module/zfs/vdev_initialize.c +++ b/module/zfs/vdev_initialize.c @@ -37,16 +37,16 @@ * Value that is written to disk during initialization. */ #ifdef _ILP32 -unsigned long zfs_initialize_value = 0xdeadbeefUL; +static unsigned long zfs_initialize_value = 0xdeadbeefUL; #else -unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL; +static unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL; #endif /* maximum number of I/Os outstanding per leaf vdev */ -int zfs_initialize_limit = 1; +static const int zfs_initialize_limit = 1; /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */ -unsigned long zfs_initialize_chunk_size = 1024 * 1024; +static unsigned long zfs_initialize_chunk_size = 1024 * 1024; static boolean_t vdev_initialize_should_stop(vdev_t *vd) diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c index 45b744b2e..30d0e7de5 100644 --- a/module/zfs/vdev_mirror.c +++ b/module/zfs/vdev_mirror.c @@ -121,7 +121,7 @@ typedef struct mirror_map { mirror_child_t mm_child[]; } mirror_map_t; -static int vdev_mirror_shift = 21; +static const int vdev_mirror_shift = 21; /* * The load configuration settings below are tuned by default for diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index af612ba9c..ae0a322f8 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -141,24 +141,24 @@ uint32_t zfs_vdev_max_active = 1000; * more quickly, but reads and writes to have higher latency and lower * throughput. */ -uint32_t zfs_vdev_sync_read_min_active = 10; -uint32_t zfs_vdev_sync_read_max_active = 10; -uint32_t zfs_vdev_sync_write_min_active = 10; -uint32_t zfs_vdev_sync_write_max_active = 10; -uint32_t zfs_vdev_async_read_min_active = 1; -uint32_t zfs_vdev_async_read_max_active = 3; -uint32_t zfs_vdev_async_write_min_active = 2; -uint32_t zfs_vdev_async_write_max_active = 10; -uint32_t zfs_vdev_scrub_min_active = 1; -uint32_t zfs_vdev_scrub_max_active = 3; -uint32_t zfs_vdev_removal_min_active = 1; -uint32_t zfs_vdev_removal_max_active = 2; -uint32_t zfs_vdev_initializing_min_active = 1; -uint32_t zfs_vdev_initializing_max_active = 1; -uint32_t zfs_vdev_trim_min_active = 1; -uint32_t zfs_vdev_trim_max_active = 2; -uint32_t zfs_vdev_rebuild_min_active = 1; -uint32_t zfs_vdev_rebuild_max_active = 3; +static uint32_t zfs_vdev_sync_read_min_active = 10; +static uint32_t zfs_vdev_sync_read_max_active = 10; +static uint32_t zfs_vdev_sync_write_min_active = 10; +static uint32_t zfs_vdev_sync_write_max_active = 10; +static uint32_t zfs_vdev_async_read_min_active = 1; +/* */ uint32_t zfs_vdev_async_read_max_active = 3; +static uint32_t zfs_vdev_async_write_min_active = 2; +/* */ uint32_t zfs_vdev_async_write_max_active = 10; +static uint32_t zfs_vdev_scrub_min_active = 1; +static uint32_t zfs_vdev_scrub_max_active = 3; +static uint32_t zfs_vdev_removal_min_active = 1; +static uint32_t zfs_vdev_removal_max_active = 2; +static uint32_t zfs_vdev_initializing_min_active = 1; +static uint32_t zfs_vdev_initializing_max_active = 1; +static uint32_t zfs_vdev_trim_min_active = 1; +static uint32_t zfs_vdev_trim_max_active = 2; +static uint32_t zfs_vdev_rebuild_min_active = 1; +static uint32_t zfs_vdev_rebuild_max_active = 3; /* * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent @@ -178,7 +178,7 @@ int zfs_vdev_async_write_active_max_dirty_percent = 60; * interactive I/O, then the vdev is considered to be "idle", and the number * of concurrently-active non-interactive I/O's is increased to *_max_active. */ -uint_t zfs_vdev_nia_delay = 5; +static uint_t zfs_vdev_nia_delay = 5; /* * Some HDDs tend to prioritize sequential I/O so high that concurrent @@ -190,7 +190,7 @@ uint_t zfs_vdev_nia_delay = 5; * I/Os. This enforced wait ensures the HDD services the interactive I/O * within a reasonable amount of time. */ -uint_t zfs_vdev_nia_credit = 5; +static uint_t zfs_vdev_nia_credit = 5; /* * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. @@ -198,10 +198,10 @@ uint_t zfs_vdev_nia_credit = 5; * we include spans of optional I/Os to aid aggregation at the disk even when * they aren't able to help us aggregate at this level. */ -int zfs_vdev_aggregation_limit = 1 << 20; -int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; -int zfs_vdev_read_gap_limit = 32 << 10; -int zfs_vdev_write_gap_limit = 4 << 10; +static int zfs_vdev_aggregation_limit = 1 << 20; +static int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; +static int zfs_vdev_read_gap_limit = 32 << 10; +static int zfs_vdev_write_gap_limit = 4 << 10; /* * Define the queue depth percentage for each top-level. This percentage is @@ -233,7 +233,7 @@ int zfs_vdev_def_queue_depth = 32; * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted * by the TRIM code in zfs_trim.c. */ -int zfs_vdev_aggregate_trim = 0; +static int zfs_vdev_aggregate_trim = 0; static int vdev_queue_offset_compare(const void *x1, const void *x2) diff --git a/module/zfs/vdev_raidz_math.c b/module/zfs/vdev_raidz_math.c index 03df2df5a..50b8dab74 100644 --- a/module/zfs/vdev_raidz_math.c +++ b/module/zfs/vdev_raidz_math.c @@ -43,7 +43,7 @@ static raidz_impl_ops_t vdev_raidz_fastest_impl = { }; /* All compiled in implementations */ -const raidz_impl_ops_t *raidz_all_maths[] = { +static const raidz_impl_ops_t *const raidz_all_maths[] = { &vdev_raidz_original_impl, &vdev_raidz_scalar_impl, #if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */ @@ -268,10 +268,10 @@ vdev_raidz_math_reconstruct(raidz_map_t *rm, raidz_row_t *rr, return (rec_fn(rr, dt)); } -const char *raidz_gen_name[] = { +const char *const raidz_gen_name[] = { "gen_p", "gen_pq", "gen_pqr" }; -const char *raidz_rec_name[] = { +const char *const raidz_rec_name[] = { "rec_p", "rec_q", "rec_r", "rec_pq", "rec_pr", "rec_qr", "rec_pqr" }; @@ -283,18 +283,15 @@ const char *raidz_rec_name[] = { static int raidz_math_kstat_headers(char *buf, size_t size) { - int i; - ssize_t off; - ASSERT3U(size, >=, RAIDZ_KSTAT_LINE_LEN); - off = snprintf(buf, size, "%-17s", "implementation"); + ssize_t off = snprintf(buf, size, "%-17s", "implementation"); - for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) + for (int i = 0; i < ARRAY_SIZE(raidz_gen_name); i++) off += snprintf(buf + off, size - off, "%-16s", raidz_gen_name[i]); - for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) + for (int i = 0; i < ARRAY_SIZE(raidz_rec_name); i++) off += snprintf(buf + off, size - off, "%-16s", raidz_rec_name[i]); diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c index 4d7de0c6c..fd2490c0a 100644 --- a/module/zfs/vdev_rebuild.c +++ b/module/zfs/vdev_rebuild.c @@ -103,7 +103,7 @@ * Size of rebuild reads; defaults to 1MiB per data disk and is capped at * SPA_MAXBLOCKSIZE. */ -unsigned long zfs_rebuild_max_segment = 1024 * 1024; +static unsigned long zfs_rebuild_max_segment = 1024 * 1024; /* * Maximum number of parallelly executed bytes per leaf vdev caused by a @@ -121,14 +121,14 @@ unsigned long zfs_rebuild_max_segment = 1024 * 1024; * With a value of 32MB the sequential resilver write rate was measured at * 800MB/s sustained while rebuilding to a distributed spare. */ -unsigned long zfs_rebuild_vdev_limit = 32 << 20; +static unsigned long zfs_rebuild_vdev_limit = 32 << 20; /* * Automatically start a pool scrub when the last active sequential resilver * completes in order to verify the checksums of all blocks which have been * resilvered. This option is enabled by default and is strongly recommended. */ -int zfs_rebuild_scrub_enabled = 1; +static int zfs_rebuild_scrub_enabled = 1; /* * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync(). diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c index 2a3ee90fe..149de633d 100644 --- a/module/zfs/vdev_removal.c +++ b/module/zfs/vdev_removal.c @@ -94,7 +94,7 @@ typedef struct vdev_copy_arg { * doing a device removal. This determines how much i/o we can have * in flight concurrently. */ -int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; +static const int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; /* * The largest contiguous segment that we will attempt to allocate when @@ -112,7 +112,7 @@ int zfs_remove_max_segment = SPA_MAXBLOCKSIZE; * not be cancelled. This can result in a normally recoverable block * becoming permanently damaged and is not recommended. */ -int zfs_removal_ignore_errors = 0; +static int zfs_removal_ignore_errors = 0; /* * Allow a remap segment to span free chunks of at most this size. The main diff --git a/module/zfs/vdev_trim.c b/module/zfs/vdev_trim.c index 2bae33b2b..18aa2b3bf 100644 --- a/module/zfs/vdev_trim.c +++ b/module/zfs/vdev_trim.c @@ -96,12 +96,12 @@ /* * Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths. */ -unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024; +static unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024; /* * Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped. */ -unsigned int zfs_trim_extent_bytes_min = 32 * 1024; +static unsigned int zfs_trim_extent_bytes_min = 32 * 1024; /* * Skip uninitialized metaslabs during the TRIM process. This option is @@ -118,7 +118,7 @@ unsigned int zfs_trim_metaslab_skip = 0; * concurrent TRIM I/Os issued to the device is controlled by the * zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options. */ -unsigned int zfs_trim_queue_limit = 10; +static unsigned int zfs_trim_queue_limit = 10; /* * The minimum number of transaction groups between automatic trims of a @@ -134,7 +134,7 @@ unsigned int zfs_trim_queue_limit = 10; * has the opposite effect. The default value of 32 was determined though * testing to be a reasonable compromise. */ -unsigned int zfs_trim_txg_batch = 32; +static unsigned int zfs_trim_txg_batch = 32; /* * The trim_args are a control structure which describe how a leaf vdev diff --git a/module/zfs/zap.c b/module/zfs/zap.c index 6f03beef3..98ed284c9 100644 --- a/module/zfs/zap.c +++ b/module/zfs/zap.c @@ -76,7 +76,7 @@ * the zfs-specific implementation of the directory's st_size (which is * the number of entries). */ -int zap_iterate_prefetch = B_TRUE; +static int zap_iterate_prefetch = B_TRUE; int fzap_default_block_shift = 14; /* 16k blocksize */ diff --git a/module/zfs/zcp.c b/module/zfs/zcp.c index f200b928b..0a7906838 100644 --- a/module/zfs/zcp.c +++ b/module/zfs/zcp.c @@ -108,7 +108,7 @@ #define ZCP_NVLIST_MAX_DEPTH 20 -uint64_t zfs_lua_check_instrlimit_interval = 100; +static const uint64_t zfs_lua_check_instrlimit_interval = 100; unsigned long zfs_lua_max_instrlimit = ZCP_MAX_INSTRLIMIT; unsigned long zfs_lua_max_memlimit = ZCP_MAX_MEMLIMIT; @@ -631,11 +631,11 @@ zcp_dataset_hold(lua_State *state, dsl_pool_t *dp, const char *dsname, } static int zcp_debug(lua_State *); -static zcp_lib_info_t zcp_debug_info = { +static const zcp_lib_info_t zcp_debug_info = { .name = "debug", .func = zcp_debug, .pargs = { - { .za_name = "debug string", .za_lua_type = LUA_TSTRING}, + { .za_name = "debug string", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -648,7 +648,7 @@ zcp_debug(lua_State *state) { const char *dbgstring; zcp_run_info_t *ri = zcp_run_info(state); - zcp_lib_info_t *libinfo = &zcp_debug_info; + const zcp_lib_info_t *libinfo = &zcp_debug_info; zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs); @@ -661,11 +661,11 @@ zcp_debug(lua_State *state) } static int zcp_exists(lua_State *); -static zcp_lib_info_t zcp_exists_info = { +static const zcp_lib_info_t zcp_exists_info = { .name = "exists", .func = zcp_exists, .pargs = { - { .za_name = "dataset", .za_lua_type = LUA_TSTRING}, + { .za_name = "dataset", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -678,7 +678,7 @@ zcp_exists(lua_State *state) { zcp_run_info_t *ri = zcp_run_info(state); dsl_pool_t *dp = ri->zri_pool; - zcp_lib_info_t *libinfo = &zcp_exists_info; + const zcp_lib_info_t *libinfo = &zcp_exists_info; zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs); diff --git a/module/zfs/zcp_get.c b/module/zfs/zcp_get.c index 7256e4de1..fe712afd7 100644 --- a/module/zfs/zcp_get.c +++ b/module/zfs/zcp_get.c @@ -743,12 +743,12 @@ zcp_get_written_prop(lua_State *state, dsl_pool_t *dp, } static int zcp_get_prop(lua_State *state); -static zcp_lib_info_t zcp_get_prop_info = { +static const zcp_lib_info_t zcp_get_prop_info = { .name = "get_prop", .func = zcp_get_prop, .pargs = { - { .za_name = "dataset", .za_lua_type = LUA_TSTRING}, - { .za_name = "property", .za_lua_type = LUA_TSTRING}, + { .za_name = "dataset", .za_lua_type = LUA_TSTRING }, + { .za_name = "property", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -762,7 +762,7 @@ zcp_get_prop(lua_State *state) const char *dataset_name; const char *property_name; dsl_pool_t *dp = zcp_run_info(state)->zri_pool; - zcp_lib_info_t *libinfo = &zcp_get_prop_info; + const zcp_lib_info_t *libinfo = &zcp_get_prop_info; zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs); diff --git a/module/zfs/zcp_iter.c b/module/zfs/zcp_iter.c index f727c56f2..ed575738f 100644 --- a/module/zfs/zcp_iter.c +++ b/module/zfs/zcp_iter.c @@ -107,12 +107,12 @@ zcp_clones_iter(lua_State *state) } static int zcp_clones_list(lua_State *); -static zcp_list_info_t zcp_clones_list_info = { +static const zcp_list_info_t zcp_clones_list_info = { .name = "clones", .func = zcp_clones_list, .gc = NULL, .pargs = { - { .za_name = "snapshot", .za_lua_type = LUA_TSTRING}, + { .za_name = "snapshot", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -194,12 +194,12 @@ zcp_snapshots_iter(lua_State *state) } static int zcp_snapshots_list(lua_State *); -static zcp_list_info_t zcp_snapshots_list_info = { +static const zcp_list_info_t zcp_snapshots_list_info = { .name = "snapshots", .func = zcp_snapshots_list, .gc = NULL, .pargs = { - { .za_name = "filesystem | volume", .za_lua_type = LUA_TSTRING}, + { .za_name = "filesystem | volume", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -281,12 +281,12 @@ zcp_children_iter(lua_State *state) } static int zcp_children_list(lua_State *); -static zcp_list_info_t zcp_children_list_info = { +static const zcp_list_info_t zcp_children_list_info = { .name = "children", .func = zcp_children_list, .gc = NULL, .pargs = { - { .za_name = "filesystem | volume", .za_lua_type = LUA_TSTRING}, + { .za_name = "filesystem | volume", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -361,13 +361,13 @@ zcp_user_props_iter(lua_State *state) } static int zcp_user_props_list(lua_State *); -static zcp_list_info_t zcp_user_props_list_info = { +static const zcp_list_info_t zcp_user_props_list_info = { .name = "user_properties", .func = zcp_user_props_list, .gc = zcp_user_props_list_gc, .pargs = { { .za_name = "filesystem | snapshot | volume", - .za_lua_type = LUA_TSTRING}, + .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -383,13 +383,13 @@ static zcp_list_info_t zcp_user_props_list_info = { * versions of ZFS, we declare 'properties' as an alias for * 'user_properties'. */ -static zcp_list_info_t zcp_props_list_info = { +static const zcp_list_info_t zcp_props_list_info = { .name = "properties", .func = zcp_user_props_list, .gc = zcp_user_props_list_gc, .pargs = { { .za_name = "filesystem | snapshot | volume", - .za_lua_type = LUA_TSTRING}, + .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -444,11 +444,11 @@ zcp_dataset_system_props(dsl_dataset_t *ds, nvlist_t *nv) } static int zcp_system_props_list(lua_State *); -static zcp_list_info_t zcp_system_props_list_info = { +static const zcp_list_info_t zcp_system_props_list_info = { .name = "system_properties", .func = zcp_system_props_list, .pargs = { - { .za_name = "dataset", .za_lua_type = LUA_TSTRING}, + { .za_name = "dataset", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -467,7 +467,7 @@ zcp_system_props_list(lua_State *state) char errbuf[128]; const char *dataset_name; dsl_pool_t *dp = zcp_run_info(state)->zri_pool; - zcp_list_info_t *libinfo = &zcp_system_props_list_info; + const zcp_list_info_t *libinfo = &zcp_system_props_list_info; zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs); dataset_name = lua_tostring(state, 1); nvlist_t *nv = fnvlist_alloc(); @@ -566,11 +566,11 @@ zcp_bookmarks_iter(lua_State *state) } static int zcp_bookmarks_list(lua_State *); -static zcp_list_info_t zcp_bookmarks_list_info = { +static const zcp_list_info_t zcp_bookmarks_list_info = { .name = "bookmarks", .func = zcp_bookmarks_list, .pargs = { - { .za_name = "dataset", .za_lua_type = LUA_TSTRING}, + { .za_name = "dataset", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -654,12 +654,12 @@ zcp_holds_iter(lua_State *state) } static int zcp_holds_list(lua_State *); -static zcp_list_info_t zcp_holds_list_info = { +static const zcp_list_info_t zcp_holds_list_info = { .name = "holds", .func = zcp_holds_list, .gc = NULL, .pargs = { - { .za_name = "snapshot", .za_lua_type = LUA_TSTRING}, + { .za_name = "snapshot", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -710,8 +710,7 @@ zcp_list_func(lua_State *state) int zcp_load_list_lib(lua_State *state) { - int i; - zcp_list_info_t *zcp_list_funcs[] = { + const zcp_list_info_t *zcp_list_funcs[] = { &zcp_children_list_info, &zcp_snapshots_list_info, &zcp_user_props_list_info, @@ -725,8 +724,8 @@ zcp_load_list_lib(lua_State *state) lua_newtable(state); - for (i = 0; zcp_list_funcs[i] != NULL; i++) { - zcp_list_info_t *info = zcp_list_funcs[i]; + for (int i = 0; zcp_list_funcs[i] != NULL; i++) { + const zcp_list_info_t *info = zcp_list_funcs[i]; if (info->gc != NULL) { /* @@ -741,10 +740,9 @@ zcp_load_list_lib(lua_State *state) lua_pop(state, 1); } - lua_pushlightuserdata(state, info); + lua_pushlightuserdata(state, (void *)(uintptr_t)info); lua_pushcclosure(state, &zcp_list_func, 1); lua_setfield(state, -2, info->name); - info++; } return (1); diff --git a/module/zfs/zcp_synctask.c b/module/zfs/zcp_synctask.c index bfcdbcf9c..403856ae3 100644 --- a/module/zfs/zcp_synctask.c +++ b/module/zfs/zcp_synctask.c @@ -114,15 +114,15 @@ zcp_sync_task(lua_State *state, dsl_checkfunc_t *checkfunc, static int zcp_synctask_destroy(lua_State *, boolean_t, nvlist_t *); -static zcp_synctask_info_t zcp_synctask_destroy_info = { +static const zcp_synctask_info_t zcp_synctask_destroy_info = { .name = "destroy", .func = zcp_synctask_destroy, .pargs = { - {.za_name = "filesystem | snapshot", .za_lua_type = LUA_TSTRING}, + {.za_name = "filesystem | snapshot", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { - {.za_name = "defer", .za_lua_type = LUA_TBOOLEAN}, + {.za_name = "defer", .za_lua_type = LUA_TBOOLEAN }, {NULL, 0} }, .space_check = ZFS_SPACE_CHECK_DESTROY, @@ -167,11 +167,11 @@ zcp_synctask_destroy(lua_State *state, boolean_t sync, nvlist_t *err_details) } static int zcp_synctask_promote(lua_State *, boolean_t, nvlist_t *); -static zcp_synctask_info_t zcp_synctask_promote_info = { +static const zcp_synctask_info_t zcp_synctask_promote_info = { .name = "promote", .func = zcp_synctask_promote, .pargs = { - {.za_name = "clone", .za_lua_type = LUA_TSTRING}, + {.za_name = "clone", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -205,13 +205,13 @@ zcp_synctask_promote(lua_State *state, boolean_t sync, nvlist_t *err_details) } static int zcp_synctask_rollback(lua_State *, boolean_t, nvlist_t *err_details); -static zcp_synctask_info_t zcp_synctask_rollback_info = { +static const zcp_synctask_info_t zcp_synctask_rollback_info = { .name = "rollback", .func = zcp_synctask_rollback, .space_check = ZFS_SPACE_CHECK_RESERVED, .blocks_modified = 1, .pargs = { - {.za_name = "filesystem", .za_lua_type = LUA_TSTRING}, + {.za_name = "filesystem", .za_lua_type = LUA_TSTRING }, {0, 0} }, .kwargs = { @@ -236,12 +236,12 @@ zcp_synctask_rollback(lua_State *state, boolean_t sync, nvlist_t *err_details) } static int zcp_synctask_snapshot(lua_State *, boolean_t, nvlist_t *); -static zcp_synctask_info_t zcp_synctask_snapshot_info = { +static const zcp_synctask_info_t zcp_synctask_snapshot_info = { .name = "snapshot", .func = zcp_synctask_snapshot, .pargs = { {.za_name = "filesystem@snapname | volume@snapname", - .za_lua_type = LUA_TSTRING}, + .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -304,7 +304,7 @@ zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details) static int zcp_synctask_inherit_prop(lua_State *, boolean_t, nvlist_t *err_details); -static zcp_synctask_info_t zcp_synctask_inherit_prop_info = { +static const zcp_synctask_info_t zcp_synctask_inherit_prop_info = { .name = "inherit", .func = zcp_synctask_inherit_prop, .space_check = ZFS_SPACE_CHECK_RESERVED, @@ -382,12 +382,12 @@ zcp_synctask_inherit_prop(lua_State *state, boolean_t sync, } static int zcp_synctask_bookmark(lua_State *, boolean_t, nvlist_t *); -static zcp_synctask_info_t zcp_synctask_bookmark_info = { +static const zcp_synctask_info_t zcp_synctask_bookmark_info = { .name = "bookmark", .func = zcp_synctask_bookmark, .pargs = { - {.za_name = "snapshot | bookmark", .za_lua_type = LUA_TSTRING}, - {.za_name = "bookmark", .za_lua_type = LUA_TSTRING}, + {.za_name = "snapshot | bookmark", .za_lua_type = LUA_TSTRING }, + {.za_name = "bookmark", .za_lua_type = LUA_TSTRING }, {NULL, 0} }, .kwargs = { @@ -425,15 +425,15 @@ zcp_synctask_bookmark(lua_State *state, boolean_t sync, nvlist_t *err_details) } static int zcp_synctask_set_prop(lua_State *, boolean_t, nvlist_t *err_details); -static zcp_synctask_info_t zcp_synctask_set_prop_info = { +static const zcp_synctask_info_t zcp_synctask_set_prop_info = { .name = "set_prop", .func = zcp_synctask_set_prop, .space_check = ZFS_SPACE_CHECK_RESERVED, .blocks_modified = 2, .pargs = { - { .za_name = "dataset", .za_lua_type = LUA_TSTRING}, - { .za_name = "property", .za_lua_type = LUA_TSTRING}, - { .za_name = "value", .za_lua_type = LUA_TSTRING}, + { .za_name = "dataset", .za_lua_type = LUA_TSTRING }, + { .za_name = "property", .za_lua_type = LUA_TSTRING }, + { .za_name = "value", .za_lua_type = LUA_TSTRING }, { NULL, 0 } }, .kwargs = { @@ -524,8 +524,7 @@ zcp_synctask_wrapper(lua_State *state) int zcp_load_synctask_lib(lua_State *state, boolean_t sync) { - int i; - zcp_synctask_info_t *zcp_synctask_funcs[] = { + const zcp_synctask_info_t *zcp_synctask_funcs[] = { &zcp_synctask_destroy_info, &zcp_synctask_promote_info, &zcp_synctask_rollback_info, @@ -538,13 +537,12 @@ zcp_load_synctask_lib(lua_State *state, boolean_t sync) lua_newtable(state); - for (i = 0; zcp_synctask_funcs[i] != NULL; i++) { - zcp_synctask_info_t *info = zcp_synctask_funcs[i]; - lua_pushlightuserdata(state, info); + for (int i = 0; zcp_synctask_funcs[i] != NULL; i++) { + const zcp_synctask_info_t *info = zcp_synctask_funcs[i]; + lua_pushlightuserdata(state, (void *)(uintptr_t)info); lua_pushboolean(state, sync); lua_pushcclosure(state, &zcp_synctask_wrapper, 2); lua_setfield(state, -2, info->name); - info++; } return (1); diff --git a/module/zfs/zfs_fm.c b/module/zfs/zfs_fm.c index a05ff7330..828385b43 100644 --- a/module/zfs/zfs_fm.c +++ b/module/zfs/zfs_fm.c @@ -124,14 +124,14 @@ static taskqid_t recent_events_cleaner_tqid; * This setting can be changed dynamically and setting it to zero * disables duplicate detection. */ -unsigned int zfs_zevent_retain_max = 2000; +static unsigned int zfs_zevent_retain_max = 2000; /* * The lifespan for a recent ereport entry. The default of 15 minutes is * intended to outlive the zfs diagnosis engine's threshold of 10 errors * over a period of 10 minutes. */ -unsigned int zfs_zevent_retain_expire_secs = 900; +static unsigned int zfs_zevent_retain_expire_secs = 900; typedef enum zfs_subclass { ZSC_IO, diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index ca2da5612..e592734ee 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -222,7 +222,7 @@ #include kmutex_t zfsdev_state_lock; -zfsdev_state_t *zfsdev_state_list; +static zfsdev_state_t *zfsdev_state_list; /* * Limit maximum nvlist size. We don't want users passing in insane values @@ -236,7 +236,7 @@ unsigned long zfs_max_nvlist_src_size = 0; * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS. * This applies primarily to zfs_ioc_channel_program(). */ -unsigned long zfs_history_output_max = 1024 * 1024; +static unsigned long zfs_history_output_max = 1024 * 1024; uint_t zfs_fsyncer_key; uint_t zfs_allow_log_key; @@ -6095,10 +6095,6 @@ zfs_ioc_share(zfs_cmd_t *zc) return (SET_ERROR(ENOSYS)); } -ace_t full_access[] = { - {(uid_t)-1, ACE_ALL_PERMS, ACE_EVERYONE, 0} -}; - /* * inputs: * zc_name name of containing filesystem diff --git a/module/zfs/zfs_log.c b/module/zfs/zfs_log.c index e248dc3cc..2f3eab679 100644 --- a/module/zfs/zfs_log.c +++ b/module/zfs/zfs_log.c @@ -530,7 +530,7 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *sdzp, * called as soon as the write is on stable storage (be it via a DMU sync or a * ZIL commit). */ -long zfs_immediate_write_sz = 32768; +static long zfs_immediate_write_sz = 32768; void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, diff --git a/module/zfs/zfs_replay.c b/module/zfs/zfs_replay.c index e6ed3e738..f76725f0b 100644 --- a/module/zfs/zfs_replay.c +++ b/module/zfs/zfs_replay.c @@ -967,7 +967,7 @@ zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap) /* * Callback vectors for replaying records */ -zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = { +zil_replay_func_t *const zfs_replay_vector[TX_MAX_TYPE] = { zfs_replay_error, /* no such type */ zfs_replay_create, /* TX_CREATE */ zfs_replay_create, /* TX_MKDIR */ diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c index 67be131da..817f63048 100644 --- a/module/zfs/zfs_sa.c +++ b/module/zfs/zfs_sa.c @@ -43,7 +43,7 @@ * this version of ZFS won't change or delete them. */ -sa_attr_reg_t zfs_attr_table[ZPL_END+1] = { +const sa_attr_reg_t zfs_attr_table[ZPL_END+1] = { {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0}, {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1}, {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2}, diff --git a/module/zfs/zil.c b/module/zfs/zil.c index b9f177dae..85a17f10b 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -89,12 +89,12 @@ * committed to stable storage. Please refer to the zil_commit_waiter() * function (and the comments within it) for more details. */ -int zfs_commit_timeout_pct = 5; +static int zfs_commit_timeout_pct = 5; /* * See zil.h for more information about these fields. */ -zil_stats_t zil_stats = { +static zil_stats_t zil_stats = { { "zil_commit_count", KSTAT_DATA_UINT64 }, { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, { "zil_itx_count", KSTAT_DATA_UINT64 }, @@ -123,14 +123,14 @@ int zil_replay_disable = 0; * will cause ZIL corruption on power loss if a volatile out-of-order * write cache is enabled. */ -int zil_nocacheflush = 0; +static int zil_nocacheflush = 0; /* * Limit SLOG write size per commit executed with synchronous priority. * Any writes above that will be executed with lower (asynchronous) priority * to limit potential SLOG device abuse by single active ZIL writer. */ -unsigned long zil_slog_bulk = 768 * 1024; +static unsigned long zil_slog_bulk = 768 * 1024; static kmem_cache_t *zil_lwb_cache; static kmem_cache_t *zil_zcw_cache; @@ -1451,7 +1451,7 @@ zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) * aligned to 4KB) actually gets written. However, we can't always just * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. */ -struct { +static const struct { uint64_t limit; uint64_t blksz; } zil_block_buckets[] = { @@ -1469,7 +1469,7 @@ struct { * initialized. Otherwise this should not be used directly; see * zl_max_block_size instead. */ -int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; +static int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; /* * Start a log block write and advance to the next log block. @@ -3509,7 +3509,7 @@ zil_resume(void *cookie) } typedef struct zil_replay_arg { - zil_replay_func_t **zr_replay; + zil_replay_func_t *const *zr_replay; void *zr_arg; boolean_t zr_byteswap; char *zr_lr; @@ -3630,7 +3630,8 @@ zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) * If this dataset has a non-empty intent log, replay it and destroy it. */ void -zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE]) +zil_replay(objset_t *os, void *arg, + zil_replay_func_t *const replay_func[TX_MAX_TYPE]) { zilog_t *zilog = dmu_objset_zil(os); const zil_header_t *zh = zilog->zl_header; diff --git a/module/zfs/zio.c b/module/zfs/zio.c index e04be06bf..92115488c 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -57,7 +57,7 @@ * I/O type descriptions * ========================================================================== */ -const char *zio_type_name[ZIO_TYPES] = { +const char *const zio_type_name[ZIO_TYPES] = { /* * Note: Linux kernel thread name length is limited * so these names will differ from upstream open zfs. @@ -66,24 +66,24 @@ const char *zio_type_name[ZIO_TYPES] = { }; int zio_dva_throttle_enabled = B_TRUE; -int zio_deadman_log_all = B_FALSE; +static int zio_deadman_log_all = B_FALSE; /* * ========================================================================== * I/O kmem caches * ========================================================================== */ -kmem_cache_t *zio_cache; -kmem_cache_t *zio_link_cache; +static kmem_cache_t *zio_cache; +static kmem_cache_t *zio_link_cache; kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; #if defined(ZFS_DEBUG) && !defined(_KERNEL) -uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; -uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; +static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; +static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; #endif /* Mark IOs as "slow" if they take longer than 30 seconds */ -int zio_slow_io_ms = (30 * MILLISEC); +static int zio_slow_io_ms = (30 * MILLISEC); #define BP_SPANB(indblkshift, level) \ (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) @@ -115,8 +115,8 @@ int zio_slow_io_ms = (30 * MILLISEC); * and may need to load new metaslabs to satisfy 128K allocations. */ int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ -int zfs_sync_pass_dont_compress = 8; /* don't compress starting in this pass */ -int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ +static int zfs_sync_pass_dont_compress = 8; /* don't compress s. i. t. p. */ +static int zfs_sync_pass_rewrite = 2; /* rewrite new bps s. i. t. p. */ /* * An allocating zio is one that either currently has the DVA allocate @@ -129,12 +129,12 @@ int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ * allocations as well. */ int zio_exclude_metadata = 0; -int zio_requeue_io_start_cut_in_line = 1; +static int zio_requeue_io_start_cut_in_line = 1; #ifdef ZFS_DEBUG -int zio_buf_debug_limit = 16384; +static const int zio_buf_debug_limit = 16384; #else -int zio_buf_debug_limit = 0; +static const int zio_buf_debug_limit = 0; #endif static inline void __zio_execute(zio_t *zio); diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 31b3b91d6..e7010e77a 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -90,9 +90,9 @@ unsigned int zvol_inhibit_dev = 0; unsigned int zvol_volmode = ZFS_VOLMODE_GEOM; struct hlist_head *zvol_htable; -list_t zvol_state_list; +static list_t zvol_state_list; krwlock_t zvol_state_lock; -const zvol_platform_ops_t *ops; +static const zvol_platform_ops_t *ops; typedef enum { ZVOL_ASYNC_REMOVE_MINORS, @@ -486,6 +486,7 @@ zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap) static int zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap) { + (void) arg1, (void) arg2, (void) byteswap; return (SET_ERROR(ENOTSUP)); } @@ -493,7 +494,7 @@ zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap) * Callback vectors for replaying records. * Only TX_WRITE and TX_TRUNCATE are needed for zvol. */ -zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { +zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE] = { zvol_replay_err, /* no such transaction type */ zvol_replay_err, /* TX_CREATE */ zvol_replay_err, /* TX_MKDIR */ @@ -521,7 +522,7 @@ zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { * We store data in the log buffers if it's small enough. * Otherwise we will later flush the data out via dmu_sync(). */ -ssize_t zvol_immediate_write_sz = 32768; +static const ssize_t zvol_immediate_write_sz = 32768; void zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,