mirror of
				https://git.proxmox.com/git/mirror_zfs.git
				synced 2025-10-26 18:05:04 +03:00 
			
		
		
		
	module: zfs: fix unused, remove argsused
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz> Closes #12844
This commit is contained in:
		
							parent
							
								
									ccbe9efd6b
								
							
						
					
					
						commit
						94a4b7ec3d
					
				| @ -287,10 +287,10 @@ vdev_file_io_start(zio_t *zio) | ||||
| 	    TQ_SLEEP), !=, 0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_file_io_done(zio_t *zio) | ||||
| { | ||||
| 	(void) zio; | ||||
| } | ||||
| 
 | ||||
| vdev_ops_t vdev_file_ops = { | ||||
|  | ||||
| @ -37,6 +37,8 @@ zfs_racct_read(uint64_t size, uint64_t iops) | ||||
| 		racct_add_force(curproc, RACCT_READIOPS, iops); | ||||
| 		PROC_UNLOCK(curproc); | ||||
| 	} | ||||
| #else | ||||
| 	(void) size; | ||||
| #endif /* RACCT */ | ||||
| } | ||||
| 
 | ||||
| @ -51,5 +53,7 @@ zfs_racct_write(uint64_t size, uint64_t iops) | ||||
| 		racct_add_force(curproc, RACCT_WRITEIOPS, iops); | ||||
| 		PROC_UNLOCK(curproc); | ||||
| 	} | ||||
| #else | ||||
| 	(void) size; | ||||
| #endif /* RACCT */ | ||||
| } | ||||
|  | ||||
| @ -1033,7 +1033,6 @@ error: | ||||
|  * and le_bswap indicates whether a byteswap is needed to get this block | ||||
|  * into little endian format. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, | ||||
|     boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac) | ||||
| @ -1259,13 +1258,13 @@ zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd, | ||||
|  * It also means we'll only return one zfs_uio_t. | ||||
|  */ | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, | ||||
|     uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio, | ||||
|     zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, | ||||
|     boolean_t *no_crypt) | ||||
| { | ||||
| 	(void) puio; | ||||
| 	uint8_t *aadbuf = zio_buf_alloc(datalen); | ||||
| 	uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp; | ||||
| 	iovec_t *dst_iovecs; | ||||
| @ -1562,12 +1561,12 @@ zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, | ||||
|     uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio, | ||||
|     uint_t *enc_len) | ||||
| { | ||||
| 	(void) puio; | ||||
| 	int ret; | ||||
| 	uint_t nr_plain = 1, nr_cipher = 2; | ||||
| 	iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL; | ||||
|  | ||||
| @ -298,10 +298,10 @@ vdev_file_io_start(zio_t *zio) | ||||
| 	    TQ_SLEEP), !=, TASKQID_INVALID); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_file_io_done(zio_t *zio) | ||||
| { | ||||
| 	(void) zio; | ||||
| } | ||||
| 
 | ||||
| vdev_ops_t vdev_file_ops = { | ||||
|  | ||||
| @ -28,9 +28,11 @@ | ||||
| void | ||||
| zfs_racct_read(uint64_t size, uint64_t iops) | ||||
| { | ||||
| 	(void) size, (void) iops; | ||||
| } | ||||
| 
 | ||||
| void | ||||
| zfs_racct_write(uint64_t size, uint64_t iops) | ||||
| { | ||||
| 	(void) size, (void) iops; | ||||
| } | ||||
|  | ||||
| @ -1776,6 +1776,7 @@ zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, | ||||
|     uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio, | ||||
|     uint_t *enc_len) | ||||
| { | ||||
| 	(void) encrypt; | ||||
| 	int ret; | ||||
| 	uint_t nr_plain = 1, nr_cipher = 2; | ||||
| 	iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL; | ||||
|  | ||||
| @ -889,10 +889,10 @@ abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size) | ||||
| 	    &ba_ptr); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| abd_zero_off_cb(void *buf, size_t size, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	(void) memset(buf, 0, size); | ||||
| 	return (0); | ||||
| } | ||||
| @ -967,10 +967,10 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, | ||||
| 	return (ret); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	(void) memcpy(dbuf, sbuf, size); | ||||
| 	return (0); | ||||
| } | ||||
| @ -985,10 +985,10 @@ abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size) | ||||
| 	    abd_copy_off_cb, NULL); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	return (memcmp(bufa, bufb, size)); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1093,8 +1093,6 @@ static kmem_cache_t *buf_cache; | ||||
| static void | ||||
| buf_fini(void) | ||||
| { | ||||
| 	int i; | ||||
| 
 | ||||
| #if defined(_KERNEL) | ||||
| 	/*
 | ||||
| 	 * Large allocations which do not require contiguous pages | ||||
| @ -1106,7 +1104,7 @@ buf_fini(void) | ||||
| 	kmem_free(buf_hash_table.ht_table, | ||||
| 	    (buf_hash_table.ht_mask + 1) * sizeof (void *)); | ||||
| #endif | ||||
| 	for (i = 0; i < BUF_LOCKS; i++) | ||||
| 	for (int i = 0; i < BUF_LOCKS; i++) | ||||
| 		mutex_destroy(BUF_HASH_LOCK(i)); | ||||
| 	kmem_cache_destroy(hdr_full_cache); | ||||
| 	kmem_cache_destroy(hdr_full_crypt_cache); | ||||
| @ -1118,10 +1116,10 @@ buf_fini(void) | ||||
|  * Constructor callback - called when the cache is empty | ||||
|  * and a new buf is requested. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| hdr_full_cons(void *vbuf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	bzero(hdr, HDR_FULL_SIZE); | ||||
| @ -1137,10 +1135,10 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	hdr_full_cons(vbuf, unused, kmflag); | ||||
| @ -1150,10 +1148,10 @@ hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| hdr_l2only_cons(void *vbuf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	bzero(hdr, HDR_L2ONLY_SIZE); | ||||
| @ -1162,10 +1160,10 @@ hdr_l2only_cons(void *vbuf, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| buf_cons(void *vbuf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	arc_buf_t *buf = vbuf; | ||||
| 
 | ||||
| 	bzero(buf, sizeof (arc_buf_t)); | ||||
| @ -1179,10 +1177,10 @@ buf_cons(void *vbuf, void *unused, int kmflag) | ||||
|  * Destructor callback - called when a cached buf is | ||||
|  * no longer required. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| hdr_full_dest(void *vbuf, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	ASSERT(HDR_EMPTY(hdr)); | ||||
| @ -1193,30 +1191,30 @@ hdr_full_dest(void *vbuf, void *unused) | ||||
| 	arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| hdr_full_crypt_dest(void *vbuf, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	hdr_full_dest(vbuf, unused); | ||||
| 	arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| hdr_l2only_dest(void *vbuf, void *unused) | ||||
| { | ||||
| 	arc_buf_hdr_t *hdr __maybe_unused = vbuf; | ||||
| 	(void) unused; | ||||
| 	arc_buf_hdr_t *hdr = vbuf; | ||||
| 
 | ||||
| 	ASSERT(HDR_EMPTY(hdr)); | ||||
| 	arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| buf_dest(void *vbuf, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	arc_buf_t *buf = vbuf; | ||||
| 
 | ||||
| 	mutex_destroy(&buf->b_evict_lock); | ||||
| @ -1512,11 +1510,11 @@ arc_cksum_compute(arc_buf_t *buf) | ||||
| void | ||||
| arc_buf_sigsegv(int sig, siginfo_t *si, void *unused) | ||||
| { | ||||
| 	(void) sig, (void) unused; | ||||
| 	panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| arc_buf_unwatch(arc_buf_t *buf) | ||||
| { | ||||
| @ -1525,10 +1523,11 @@ arc_buf_unwatch(arc_buf_t *buf) | ||||
| 		ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), | ||||
| 		    PROT_READ | PROT_WRITE)); | ||||
| 	} | ||||
| #else | ||||
| 	(void) buf; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| arc_buf_watch(arc_buf_t *buf) | ||||
| { | ||||
| @ -1536,6 +1535,8 @@ arc_buf_watch(arc_buf_t *buf) | ||||
| 	if (arc_watch) | ||||
| 		ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), | ||||
| 		    PROT_READ)); | ||||
| #else | ||||
| 	(void) buf; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| @ -1947,7 +1948,7 @@ error: | ||||
|  * arc_buf_fill(). | ||||
|  */ | ||||
| static void | ||||
| arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock) | ||||
| arc_buf_untransform_in_place(arc_buf_t *buf) | ||||
| { | ||||
| 	arc_buf_hdr_t *hdr = buf->b_hdr; | ||||
| 
 | ||||
| @ -2051,7 +2052,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb, | ||||
| 
 | ||||
| 			if (hash_lock != NULL) | ||||
| 				mutex_enter(hash_lock); | ||||
| 			arc_buf_untransform_in_place(buf, hash_lock); | ||||
| 			arc_buf_untransform_in_place(buf); | ||||
| 			if (hash_lock != NULL) | ||||
| 				mutex_exit(hash_lock); | ||||
| 
 | ||||
| @ -2337,6 +2338,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) | ||||
| void | ||||
| arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index) | ||||
| { | ||||
| 	(void) state_index; | ||||
| 	arc_buf_hdr_t *hdr = ab->b_hdr; | ||||
| 	l1arc_buf_hdr_t *l1hdr = NULL; | ||||
| 	l2arc_buf_hdr_t *l2hdr = NULL; | ||||
| @ -4872,10 +4874,11 @@ arc_kmem_reap_soon(void) | ||||
| 	abd_cache_reap_now(); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static boolean_t | ||||
| arc_evict_cb_check(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) arg, (void) zthr; | ||||
| 
 | ||||
| #ifdef ZFS_DEBUG | ||||
| 	/*
 | ||||
| 	 * This is necessary in order to keep the kstat information | ||||
| @ -4915,10 +4918,11 @@ arc_evict_cb_check(void *arg, zthr_t *zthr) | ||||
|  * Keep arc_size under arc_c by running arc_evict which evicts data | ||||
|  * from the ARC. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| arc_evict_cb(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) arg, (void) zthr; | ||||
| 
 | ||||
| 	uint64_t evicted = 0; | ||||
| 	fstrans_cookie_t cookie = spl_fstrans_mark(); | ||||
| 
 | ||||
| @ -4955,10 +4959,11 @@ arc_evict_cb(void *arg, zthr_t *zthr) | ||||
| 	spl_fstrans_unmark(cookie); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static boolean_t | ||||
| arc_reap_cb_check(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) arg, (void) zthr; | ||||
| 
 | ||||
| 	int64_t free_memory = arc_available_memory(); | ||||
| 	static int reap_cb_check_counter = 0; | ||||
| 
 | ||||
| @ -5002,10 +5007,11 @@ arc_reap_cb_check(void *arg, zthr_t *zthr) | ||||
|  * target size of the cache (arc_c), causing the arc_evict_cb() | ||||
|  * to free more buffers. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| arc_reap_cb(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) arg, (void) zthr; | ||||
| 
 | ||||
| 	int64_t free_memory; | ||||
| 	fstrans_cookie_t cookie = spl_fstrans_mark(); | ||||
| 
 | ||||
| @ -5620,11 +5626,12 @@ arc_buf_access(arc_buf_t *buf) | ||||
| } | ||||
| 
 | ||||
| /* a generic arc_read_done_func_t which you can use */ | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | ||||
|     arc_buf_t *buf, void *arg) | ||||
| { | ||||
| 	(void) zio, (void) zb, (void) bp; | ||||
| 
 | ||||
| 	if (buf == NULL) | ||||
| 		return; | ||||
| 
 | ||||
| @ -5633,11 +5640,11 @@ arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | ||||
| } | ||||
| 
 | ||||
| /* a generic arc_read_done_func_t */ | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | ||||
|     arc_buf_t *buf, void *arg) | ||||
| { | ||||
| 	(void) zb, (void) bp; | ||||
| 	arc_buf_t **bufp = arg; | ||||
| 
 | ||||
| 	if (buf == NULL) { | ||||
| @ -9687,10 +9694,10 @@ l2arc_hdr_limit_reached(void) | ||||
|  * This thread feeds the L2ARC at regular intervals.  This is the beating | ||||
|  * heart of the L2ARC. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| l2arc_feed_thread(void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	callb_cpr_t cpr; | ||||
| 	l2arc_dev_t *dev; | ||||
| 	spa_t *spa; | ||||
|  | ||||
| @ -860,10 +860,10 @@ struct space_range_arg { | ||||
| 	uint64_t uncomp; | ||||
| }; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| space_range_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) bp_freed, (void) tx; | ||||
| 	struct space_range_arg *sra = arg; | ||||
| 
 | ||||
| 	if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) { | ||||
| @ -932,11 +932,11 @@ bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg, | ||||
|  * bpobj are designated as free or allocated that information is not preserved | ||||
|  * in bplists. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| bplist_append_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, | ||||
|     dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) bp_freed, (void) tx; | ||||
| 	bplist_t *bpl = arg; | ||||
| 	bplist_append(bpl, bp); | ||||
| 	return (0); | ||||
|  | ||||
| @ -147,11 +147,11 @@ bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg, | ||||
| 	dmu_buf_rele(db, FTAG); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | ||||
| { | ||||
| 	(void) zilog, (void) dnp; | ||||
| 	int err; | ||||
| 	struct bptree_args *ba = arg; | ||||
| 
 | ||||
|  | ||||
| @ -279,10 +279,10 @@ static unsigned long dbuf_metadata_cache_target_bytes(void); | ||||
| uint_t dbuf_cache_hiwater_pct = 10; | ||||
| uint_t dbuf_cache_lowater_pct = 10; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dbuf_cons(void *vdb, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 	bzero(db, sizeof (dmu_buf_impl_t)); | ||||
| 
 | ||||
| @ -295,10 +295,10 @@ dbuf_cons(void *vdb, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dbuf_dest(void *vdb, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 	mutex_destroy(&db->db_mtx); | ||||
| 	rw_destroy(&db->db_rwlock); | ||||
| @ -720,10 +720,10 @@ dbuf_evict_one(void) | ||||
|  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged | ||||
|  * out of the cache it is destroyed and becomes eligible for arc eviction. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dbuf_evict_thread(void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	callb_cpr_t cpr; | ||||
| 
 | ||||
| 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); | ||||
| @ -1276,6 +1276,7 @@ static void | ||||
| dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | ||||
|     arc_buf_t *buf, void *vdb) | ||||
| { | ||||
| 	(void) zb, (void) bp; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 
 | ||||
| 	mutex_enter(&db->db_mtx); | ||||
| @ -1369,7 +1370,7 @@ dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn) | ||||
|  * was taken, ENOENT if no action was taken. | ||||
|  */ | ||||
| static int | ||||
| dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) | ||||
| dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn) | ||||
| { | ||||
| 	ASSERT(MUTEX_HELD(&db->db_mtx)); | ||||
| 
 | ||||
| @ -1487,7 +1488,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, | ||||
| 		goto early_unlock; | ||||
| 	} | ||||
| 
 | ||||
| 	err = dbuf_read_hole(db, dn, flags); | ||||
| 	err = dbuf_read_hole(db, dn); | ||||
| 	if (err == 0) | ||||
| 		goto early_unlock; | ||||
| 
 | ||||
| @ -2617,10 +2618,10 @@ dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) | ||||
| 	dl->dr_overridden_by.blk_birth = dr->dr_txg; | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) tx; | ||||
| 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; | ||||
| 	dbuf_states_t old_state; | ||||
| 	mutex_enter(&db->db_mtx); | ||||
| @ -3137,6 +3138,7 @@ static void | ||||
| dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, | ||||
|     const blkptr_t *iobp, arc_buf_t *abuf, void *private) | ||||
| { | ||||
| 	(void) zio, (void) zb, (void) iobp; | ||||
| 	dbuf_prefetch_arg_t *dpa = private; | ||||
| 
 | ||||
| 	dbuf_prefetch_fini(dpa, B_TRUE); | ||||
| @ -3185,6 +3187,7 @@ static void | ||||
| dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, | ||||
|     const blkptr_t *iobp, arc_buf_t *abuf, void *private) | ||||
| { | ||||
| 	(void) zb, (void) iobp; | ||||
| 	dbuf_prefetch_arg_t *dpa = private; | ||||
| 
 | ||||
| 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); | ||||
| @ -4451,10 +4454,10 @@ dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
| { | ||||
| 	(void) buf; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 	dnode_t *dn; | ||||
| 	blkptr_t *bp = zio->io_bp; | ||||
| @ -4542,7 +4545,6 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
| 	dmu_buf_unlock_parent(db, dblt, FTAG); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| /*
 | ||||
|  * This function gets called just prior to running through the compression | ||||
|  * stage of the zio pipeline. If we're an indirect block comprised of only | ||||
| @ -4553,6 +4555,7 @@ dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
| static void | ||||
| dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
| { | ||||
| 	(void) zio, (void) buf; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 	dnode_t *dn; | ||||
| 	blkptr_t *bp; | ||||
| @ -4596,10 +4599,10 @@ dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
|  * so this callback allows us to retire dirty space gradually, as the physical | ||||
|  * i/os complete. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) | ||||
| { | ||||
| 	(void) buf; | ||||
| 	dmu_buf_impl_t *db = arg; | ||||
| 	objset_t *os = db->db_objset; | ||||
| 	dsl_pool_t *dp = dmu_objset_pool(os); | ||||
| @ -4618,10 +4621,10 @@ dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) | ||||
| 	dsl_pool_undirty_space(dp, delta, zio->io_txg); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) | ||||
| { | ||||
| 	(void) buf; | ||||
| 	dmu_buf_impl_t *db = vdb; | ||||
| 	blkptr_t *bp_orig = &zio->io_bp_orig; | ||||
| 	blkptr_t *bp = db->db_blkptr; | ||||
|  | ||||
| @ -812,13 +812,14 @@ get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks) | ||||
|  * otherwise return false. | ||||
|  * Used below in dmu_free_long_range_impl() to enable abort when unmounting | ||||
|  */ | ||||
| /*ARGSUSED*/ | ||||
| static boolean_t | ||||
| dmu_objset_zfs_unmounting(objset_t *os) | ||||
| { | ||||
| #ifdef _KERNEL | ||||
| 	if (dmu_objset_type(os) == DMU_OST_ZFS) | ||||
| 		return (zfs_get_vfs_flag_unmounted(os)); | ||||
| #else | ||||
| 	(void) os; | ||||
| #endif | ||||
| 	return (B_FALSE); | ||||
| } | ||||
| @ -1502,10 +1503,10 @@ typedef struct { | ||||
| 	dmu_tx_t		*dsa_tx; | ||||
| } dmu_sync_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) | ||||
| { | ||||
| 	(void) buf; | ||||
| 	dmu_sync_arg_t *dsa = varg; | ||||
| 	dmu_buf_t *db = dsa->dsa_zgd->zgd_db; | ||||
| 	blkptr_t *bp = zio->io_bp; | ||||
| @ -1530,10 +1531,10 @@ dmu_sync_late_arrival_ready(zio_t *zio) | ||||
| 	dmu_sync_ready(zio, NULL, zio->io_private); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) | ||||
| { | ||||
| 	(void) buf; | ||||
| 	dmu_sync_arg_t *dsa = varg; | ||||
| 	dbuf_dirty_record_t *dr = dsa->dsa_dr; | ||||
| 	dmu_buf_impl_t *db = dr->dr_dbuf; | ||||
| @ -2274,10 +2275,10 @@ byteswap_uint16_array(void *vbuf, size_t size) | ||||
| 		buf[i] = BSWAP_16(buf[i]); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| byteswap_uint8_array(void *vbuf, size_t size) | ||||
| { | ||||
| 	(void) vbuf, (void) size; | ||||
| } | ||||
| 
 | ||||
| void | ||||
|  | ||||
| @ -108,11 +108,11 @@ report_dnode(dmu_diffarg_t *da, uint64_t object, dnode_phys_t *dnp) | ||||
| 	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ | ||||
| 	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | ||||
| { | ||||
| 	(void) zilog; | ||||
| 	dmu_diffarg_t *da = arg; | ||||
| 	int err = 0; | ||||
| 
 | ||||
|  | ||||
| @ -721,9 +721,9 @@ static int | ||||
| dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, | ||||
|     boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp) | ||||
| { | ||||
| 	int err; | ||||
| 	(void) tag; | ||||
| 
 | ||||
| 	err = dmu_objset_from_ds(ds, osp); | ||||
| 	int err = dmu_objset_from_ds(ds, osp); | ||||
| 	if (err != 0) { | ||||
| 		return (err); | ||||
| 	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { | ||||
| @ -1127,7 +1127,6 @@ typedef struct dmu_objset_create_arg { | ||||
| 	dsl_crypto_params_t *doca_dcp; | ||||
| } dmu_objset_create_arg_t; | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| dmu_objset_create_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| @ -1323,7 +1322,6 @@ typedef struct dmu_objset_clone_arg { | ||||
| 	proc_t *doca_proc; | ||||
| } dmu_objset_clone_arg_t; | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| dmu_objset_clone_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| @ -1535,10 +1533,10 @@ dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) | ||||
| { | ||||
| 	(void) abuf; | ||||
| 	blkptr_t *bp = zio->io_bp; | ||||
| 	objset_t *os = arg; | ||||
| 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; | ||||
| @ -1566,10 +1564,10 @@ dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) | ||||
| 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) | ||||
| { | ||||
| 	(void) abuf; | ||||
| 	blkptr_t *bp = zio->io_bp; | ||||
| 	blkptr_t *bp_orig = &zio->io_bp_orig; | ||||
| 	objset_t *os = arg; | ||||
|  | ||||
| @ -249,11 +249,11 @@ zfs_get_deleteq(objset_t *os) | ||||
|  * Third, if there is a deleted object, we need to create a redaction record for | ||||
|  * all of the blocks in that object. | ||||
|  */ | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) | ||||
| { | ||||
| 	(void) spa, (void) zilog; | ||||
| 	struct redact_thread_arg *rta = arg; | ||||
| 	struct redact_record *record; | ||||
| 
 | ||||
|  | ||||
| @ -1099,11 +1099,11 @@ range_alloc(enum type type, uint64_t object, uint64_t start_blkid, | ||||
|  * This is the callback function to traverse_dataset that acts as a worker | ||||
|  * thread for dmu_send_impl. | ||||
|  */ | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) | ||||
| { | ||||
| 	(void) zilog; | ||||
| 	struct send_thread_arg *sta = arg; | ||||
| 	struct send_range *record; | ||||
| 
 | ||||
| @ -2157,6 +2157,7 @@ setup_resume_points(struct dmu_send_params *dspp, | ||||
|     struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os, | ||||
|     redaction_list_t *redact_rl, nvlist_t *nvl) | ||||
| { | ||||
| 	(void) smt_arg; | ||||
| 	dsl_dataset_t *to_ds = dspp->to_ds; | ||||
| 	int err = 0; | ||||
| 
 | ||||
|  | ||||
| @ -560,11 +560,11 @@ traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp, | ||||
| 	return (err); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | ||||
| { | ||||
| 	(void) zilog, (void) dnp; | ||||
| 	prefetch_data_t *pfd = arg; | ||||
| 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; | ||||
| 	arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | | ||||
|  | ||||
| @ -260,6 +260,7 @@ dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid) | ||||
| static void | ||||
| dmu_zfetch_stream_done(void *arg, boolean_t io_issued) | ||||
| { | ||||
| 	(void) io_issued; | ||||
| 	zstream_t *zs = arg; | ||||
| 
 | ||||
| 	if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0) | ||||
|  | ||||
| @ -108,12 +108,11 @@ dbuf_compare(const void *x1, const void *x2) | ||||
| 	return (TREE_PCMP(d1, d2)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dnode_cons(void *arg, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	dnode_t *dn = arg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL); | ||||
| 	mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); | ||||
| @ -139,7 +138,7 @@ dnode_cons(void *arg, void *unused, int kmflag) | ||||
| 	bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz)); | ||||
| 	bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid)); | ||||
| 
 | ||||
| 	for (i = 0; i < TXG_SIZE; i++) { | ||||
| 	for (int i = 0; i < TXG_SIZE; i++) { | ||||
| 		multilist_link_init(&dn->dn_dirty_link[i]); | ||||
| 		dn->dn_free_ranges[i] = NULL; | ||||
| 		list_create(&dn->dn_dirty_records[i], | ||||
| @ -174,11 +173,10 @@ dnode_cons(void *arg, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dnode_dest(void *arg, void *unused) | ||||
| { | ||||
| 	int i; | ||||
| 	(void) unused; | ||||
| 	dnode_t *dn = arg; | ||||
| 
 | ||||
| 	rw_destroy(&dn->dn_struct_rwlock); | ||||
| @ -190,7 +188,7 @@ dnode_dest(void *arg, void *unused) | ||||
| 	zfs_refcount_destroy(&dn->dn_tx_holds); | ||||
| 	ASSERT(!list_link_active(&dn->dn_link)); | ||||
| 
 | ||||
| 	for (i = 0; i < TXG_SIZE; i++) { | ||||
| 	for (int i = 0; i < TXG_SIZE; i++) { | ||||
| 		ASSERT(!multilist_link_active(&dn->dn_dirty_link[i])); | ||||
| 		ASSERT3P(dn->dn_free_ranges[i], ==, NULL); | ||||
| 		list_destroy(&dn->dn_dirty_records[i]); | ||||
| @ -889,7 +887,6 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn) | ||||
| 	odn->dn_moved = (uint8_t)-1; | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static kmem_cbrc_t | ||||
| dnode_move(void *buf, void *newbuf, size_t size, void *arg) | ||||
| { | ||||
|  | ||||
| @ -1203,7 +1203,6 @@ dsl_redaction_list_long_rele(redaction_list_t *rl, void *tag) | ||||
| 	(void) zfs_refcount_remove(&rl->rl_longholds, tag); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| redaction_list_evict_sync(void *rlu) | ||||
| { | ||||
| @ -1470,10 +1469,11 @@ dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin, | ||||
|  * Adjust the FBN of any bookmarks that reference this block, whose "next" | ||||
|  * is the head dataset. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) tx; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Iterate over bookmarks whose "next" is the head dataset. | ||||
| 	 */ | ||||
|  | ||||
| @ -2943,11 +2943,11 @@ typedef struct dsl_dataset_rename_snapshot_arg { | ||||
| 	dmu_tx_t *ddrsa_tx; | ||||
| } dsl_dataset_rename_snapshot_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp, | ||||
|     dsl_dataset_t *hds, void *arg) | ||||
| { | ||||
| 	(void) dp; | ||||
| 	dsl_dataset_rename_snapshot_arg_t *ddrsa = arg; | ||||
| 	int error; | ||||
| 	uint64_t val; | ||||
| @ -4305,7 +4305,6 @@ typedef struct dsl_dataset_set_qr_arg { | ||||
| } dsl_dataset_set_qr_arg_t; | ||||
| 
 | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| @ -4512,7 +4511,6 @@ typedef struct dsl_dataset_set_compression_arg { | ||||
| 	uint64_t ddsca_value; | ||||
| } dsl_dataset_set_compression_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
|  | ||||
| @ -699,11 +699,11 @@ struct killarg { | ||||
| 	dmu_tx_t *tx; | ||||
| }; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | ||||
| { | ||||
| 	(void) spa, (void) dnp; | ||||
| 	struct killarg *ka = arg; | ||||
| 	dmu_tx_t *tx = ka->tx; | ||||
| 
 | ||||
| @ -1246,10 +1246,10 @@ dsl_destroy_head(const char *name) | ||||
|  * inconsistent datasets, even if we encounter an error trying to | ||||
|  * process one of them. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| dsl_destroy_inconsistent(const char *dsname, void *arg) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	objset_t *os; | ||||
| 
 | ||||
| 	if (dmu_objset_hold(dsname, FTAG, &os) == 0) { | ||||
|  | ||||
| @ -764,6 +764,8 @@ dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, | ||||
| 	 */ | ||||
| 	if (secpolicy_zfs_proc(cr, proc) == 0) | ||||
| 		return (ENFORCE_NEVER); | ||||
| #else | ||||
| 	(void) proc; | ||||
| #endif | ||||
| 
 | ||||
| 	if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0) | ||||
| @ -1895,10 +1897,10 @@ typedef struct dsl_valid_rename_arg { | ||||
| 	int nest_delta; | ||||
| } dsl_valid_rename_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) | ||||
| { | ||||
| 	(void) dp; | ||||
| 	dsl_valid_rename_arg_t *dvra = arg; | ||||
| 	char namebuf[ZFS_MAX_DATASET_NAME_LEN]; | ||||
| 
 | ||||
| @ -2395,6 +2397,7 @@ dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds, | ||||
| 		 * The delete queue is ZPL specific, and libzpool doesn't have | ||||
| 		 * it. It doesn't make sense to wait for it. | ||||
| 		 */ | ||||
| 		(void) ds; | ||||
| 		*in_progress = B_FALSE; | ||||
| 		break; | ||||
| #endif | ||||
|  | ||||
| @ -455,8 +455,8 @@ dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | ||||
| } | ||||
| 
 | ||||
| dsl_pool_t * | ||||
| dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, | ||||
|     uint64_t txg) | ||||
| dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)), | ||||
|     dsl_crypto_params_t *dcp, uint64_t txg) | ||||
| { | ||||
| 	int err; | ||||
| 	dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | ||||
|  | ||||
| @ -504,10 +504,10 @@ dsl_prop_hascb(dsl_dataset_t *ds) | ||||
| 	return (!list_is_empty(&ds->ds_prop_cbs)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_prop_notify_all_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_dir_t *dd = ds->ds_dir; | ||||
| 	dsl_prop_record_t *pr; | ||||
| 	dsl_prop_cb_record_t *cbr; | ||||
|  | ||||
| @ -700,10 +700,10 @@ dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| dsl_scan_setup_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | ||||
| 	vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; | ||||
| 
 | ||||
| @ -854,7 +854,6 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) | ||||
| 	    dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | ||||
| { | ||||
| @ -1001,10 +1000,10 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) | ||||
| 	ASSERT(!dsl_scan_is_running(scn)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | ||||
| 
 | ||||
| 	if (!dsl_scan_is_running(scn)) | ||||
| @ -1012,10 +1011,10 @@ dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; | ||||
| 
 | ||||
| 	dsl_scan_done(scn, B_FALSE, tx); | ||||
| @ -1377,11 +1376,11 @@ typedef struct zil_scan_arg { | ||||
| 	zil_header_t	*zsa_zh; | ||||
| } zil_scan_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, | ||||
|     uint64_t claim_txg) | ||||
| { | ||||
| 	(void) zilog; | ||||
| 	zil_scan_arg_t *zsa = arg; | ||||
| 	dsl_pool_t *dp = zsa->zsa_dp; | ||||
| 	dsl_scan_t *scn = dp->dp_scan; | ||||
| @ -1408,11 +1407,11 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, | ||||
|     uint64_t claim_txg) | ||||
| { | ||||
| 	(void) zilog; | ||||
| 	if (lrc->lrc_txtype == TX_WRITE) { | ||||
| 		zil_scan_arg_t *zsa = arg; | ||||
| 		dsl_pool_t *dp = zsa->zsa_dp; | ||||
| @ -1637,6 +1636,7 @@ static void | ||||
| dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, | ||||
|     arc_buf_t *buf, void *private) | ||||
| { | ||||
| 	(void) zio; | ||||
| 	scan_prefetch_ctx_t *spc = private; | ||||
| 	dsl_scan_t *scn = spc->spc_scn; | ||||
| 	spa_t *spa = scn->scn_dp->dp_spa; | ||||
| @ -1696,7 +1696,6 @@ out: | ||||
| 	scan_prefetch_ctx_rele(spc, scn); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| dsl_scan_prefetch_thread(void *arg) | ||||
| { | ||||
| @ -2336,7 +2335,6 @@ dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) | ||||
| 	dsl_scan_sync_state(scn, tx, SYNC_CACHED); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) | ||||
| { | ||||
| @ -2520,10 +2518,10 @@ out: | ||||
| 	dsl_dataset_rele(ds, FTAG); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_dataset_t *ds; | ||||
| 	int err; | ||||
| 	dsl_scan_t *scn = dp->dp_scan; | ||||
| @ -2559,16 +2557,15 @@ enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | ||||
|     ddt_entry_t *dde, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) tx; | ||||
| 	const ddt_key_t *ddk = &dde->dde_key; | ||||
| 	ddt_phys_t *ddp = dde->dde_phys; | ||||
| 	blkptr_t bp; | ||||
| 	zbookmark_phys_t zb = { 0 }; | ||||
| 	int p; | ||||
| 
 | ||||
| 	if (!dsl_scan_is_running(scn)) | ||||
| 		return; | ||||
| @ -2587,7 +2584,7 @@ dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, | ||||
| 	if (scn->scn_done_txg != 0) | ||||
| 		return; | ||||
| 
 | ||||
| 	for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { | ||||
| 	for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { | ||||
| 		if (ddp->ddp_phys_birth == 0 || | ||||
| 		    ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) | ||||
| 			continue; | ||||
|  | ||||
| @ -32,10 +32,10 @@ | ||||
| 
 | ||||
| #define	DST_AVG_BLKSHIFT 14 | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| dsl_null_checkfunc(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg, (void) tx; | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -483,17 +483,17 @@ zfs_zevent_destroy(zfs_zevent_t *ze) | ||||
| /*
 | ||||
|  * Wrappers for FM nvlist allocators | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void * | ||||
| i_fm_alloc(nv_alloc_t *nva, size_t size) | ||||
| { | ||||
| 	(void) nva; | ||||
| 	return (kmem_zalloc(size, KM_SLEEP)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| i_fm_free(nv_alloc_t *nva, void *buf, size_t size) | ||||
| { | ||||
| 	(void) nva; | ||||
| 	kmem_free(buf, size); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -83,10 +83,10 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) | ||||
| 	return ((size_t)dstlen); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| int | ||||
| gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) | ||||
| { | ||||
| 	(void) n; | ||||
| 	zlen_t dstlen = d_len; | ||||
| 
 | ||||
| 	ASSERT(d_len >= s_len); | ||||
|  | ||||
| @ -46,11 +46,11 @@ static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest, | ||||
| 
 | ||||
| static kmem_cache_t *lz4_cache; | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| size_t | ||||
| lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, | ||||
|     size_t d_len, int n) | ||||
| { | ||||
| 	(void) n; | ||||
| 	uint32_t bufsiz; | ||||
| 	char *dest = d_start; | ||||
| 
 | ||||
| @ -74,11 +74,11 @@ lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, | ||||
| 	return (bufsiz + sizeof (bufsiz)); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| int | ||||
| lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, | ||||
|     size_t d_len, int n) | ||||
| { | ||||
| 	(void) n; | ||||
| 	const char *src = s_start; | ||||
| 	uint32_t bufsiz = BE_IN32(src); | ||||
| 
 | ||||
| @ -463,7 +463,6 @@ LZ4_NbCommonBytes(register U32 val) | ||||
| 
 | ||||
| /* Compression functions */ | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize, | ||||
|     int osize) | ||||
| @ -654,7 +653,6 @@ LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize, | ||||
| 	HASHLOG64K)) | ||||
| #define	LZ4_HASH64K_VALUE(p)	LZ4_HASH64K_FUNCTION(A32(p)) | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize, | ||||
|     int osize) | ||||
|  | ||||
| @ -45,10 +45,10 @@ | ||||
| #define	OFFSET_MASK	((1 << (16 - MATCH_BITS)) - 1) | ||||
| #define	LEMPEL_SIZE	1024 | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| size_t | ||||
| lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) | ||||
| { | ||||
| 	(void) n; | ||||
| 	uchar_t *src = s_start; | ||||
| 	uchar_t *dst = d_start; | ||||
| 	uchar_t *cpy; | ||||
| @ -100,10 +100,10 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) | ||||
| 	return (dst - (uchar_t *)d_start); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| int | ||||
| lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) | ||||
| { | ||||
| 	(void) s_len, (void) n; | ||||
| 	uchar_t *src = s_start; | ||||
| 	uchar_t *dst = d_start; | ||||
| 	uchar_t *d_end = (uchar_t *)d_start + d_len; | ||||
|  | ||||
| @ -1406,7 +1406,6 @@ metaslab_size_tree_full_load(range_tree_t *rt) | ||||
|  * Create any block allocator specific components. The current allocators | ||||
|  * rely on using both a size-ordered range_tree_t and an array of uint64_t's. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_rt_create(range_tree_t *rt, void *arg) | ||||
| { | ||||
| @ -1431,10 +1430,10 @@ metaslab_rt_create(range_tree_t *rt, void *arg) | ||||
| 	mrap->mra_floor_shift = metaslab_by_size_min_shift; | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_rt_destroy(range_tree_t *rt, void *arg) | ||||
| { | ||||
| 	(void) rt; | ||||
| 	metaslab_rt_arg_t *mrap = arg; | ||||
| 	zfs_btree_t *size_tree = mrap->mra_bt; | ||||
| 
 | ||||
| @ -1442,7 +1441,6 @@ metaslab_rt_destroy(range_tree_t *rt, void *arg) | ||||
| 	kmem_free(mrap, sizeof (*mrap)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| { | ||||
| @ -1456,7 +1454,6 @@ metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| 	zfs_btree_add(size_tree, rs); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| { | ||||
| @ -1470,7 +1467,6 @@ metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| 	zfs_btree_remove(size_tree, rs); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_rt_vacate(range_tree_t *rt, void *arg) | ||||
| { | ||||
| @ -2240,6 +2236,8 @@ metaslab_potentially_evict(metaslab_class_t *mc) | ||||
| 			inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); | ||||
| 		} | ||||
| 	} | ||||
| #else | ||||
| 	(void) mc; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| @ -4719,7 +4717,6 @@ metaslab_active_mask_verify(metaslab_t *msp) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static uint64_t | ||||
| metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, | ||||
|     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, | ||||
| @ -5345,11 +5342,11 @@ metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, | ||||
| 	mutex_exit(&msp->ms_lock); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | ||||
|     uint64_t size, void *arg) | ||||
| { | ||||
| 	(void) inner_offset; | ||||
| 	boolean_t *checkpoint = arg; | ||||
| 
 | ||||
| 	ASSERT3P(checkpoint, !=, NULL); | ||||
| @ -5715,11 +5712,11 @@ typedef struct metaslab_claim_cb_arg_t { | ||||
| 	int		mcca_error; | ||||
| } metaslab_claim_cb_arg_t; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, | ||||
|     uint64_t size, void *arg) | ||||
| { | ||||
| 	(void) inner_offset; | ||||
| 	metaslab_claim_cb_arg_t *mcca_arg = arg; | ||||
| 
 | ||||
| 	if (mcca_arg->mcca_error == 0) { | ||||
| @ -5971,11 +5968,12 @@ metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) | ||||
| 	spa_config_exit(spa, SCL_VDEV, FTAG); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, | ||||
|     uint64_t size, void *arg) | ||||
| { | ||||
| 	(void) inner, (void) arg; | ||||
| 
 | ||||
| 	if (vd->vdev_ops == &vdev_indirect_ops) | ||||
| 		return; | ||||
| 
 | ||||
|  | ||||
| @ -741,7 +741,6 @@ range_tree_is_empty(range_tree_t *rt) | ||||
| 	return (range_tree_space(rt) == 0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| rt_btree_create(range_tree_t *rt, void *arg) | ||||
| { | ||||
| @ -764,35 +763,34 @@ rt_btree_create(range_tree_t *rt, void *arg) | ||||
| 	zfs_btree_create(size_tree, rt->rt_btree_compare, size); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| rt_btree_destroy(range_tree_t *rt, void *arg) | ||||
| { | ||||
| 	(void) rt; | ||||
| 	zfs_btree_t *size_tree = arg; | ||||
| 	ASSERT0(zfs_btree_numnodes(size_tree)); | ||||
| 
 | ||||
| 	zfs_btree_destroy(size_tree); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| { | ||||
| 	(void) rt; | ||||
| 	zfs_btree_t *size_tree = arg; | ||||
| 
 | ||||
| 	zfs_btree_add(size_tree, rs); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg) | ||||
| { | ||||
| 	(void) rt; | ||||
| 	zfs_btree_t *size_tree = arg; | ||||
| 
 | ||||
| 	zfs_btree_remove(size_tree, rs); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| rt_btree_vacate(range_tree_t *rt, void *arg) | ||||
| { | ||||
|  | ||||
| @ -212,20 +212,20 @@ sa_attr_type_t sa_dummy_zpl_layout[] = { 0 }; | ||||
| static int sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs); | ||||
| static kmem_cache_t *sa_cache = NULL; | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| sa_cache_constructor(void *buf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	sa_handle_t *hdl = buf; | ||||
| 
 | ||||
| 	mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL); | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| sa_cache_destructor(void *buf, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	sa_handle_t *hdl = buf; | ||||
| 	mutex_destroy(&hdl->sa_lock); | ||||
| } | ||||
| @ -1218,11 +1218,11 @@ sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr, | ||||
|     uint16_t length, int length_idx, boolean_t variable_length, void *userp) | ||||
| { | ||||
| 	(void) hdr, (void) length_idx, (void) variable_length; | ||||
| 	sa_handle_t *hdl = userp; | ||||
| 	sa_os_t *sa = hdl->sa_os->os_sa; | ||||
| 
 | ||||
| @ -1309,10 +1309,10 @@ sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| sa_evict_sync(void *dbu) | ||||
| { | ||||
| 	(void) dbu; | ||||
| 	panic("evicting sa dbuf\n"); | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -41,11 +41,11 @@ sha_incremental(void *buf, size_t size, void *arg) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| abd_checksum_SHA256(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	int ret; | ||||
| 	SHA2_CTX ctx; | ||||
| 	zio_cksum_t tmp; | ||||
| @ -78,11 +78,11 @@ bswap: | ||||
| 	zcp->zc_word[3] = BE_64(tmp.zc_word[3]); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| abd_checksum_SHA512_native(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	SHA2_CTX	ctx; | ||||
| 
 | ||||
| 	SHA2Init(SHA512_256, &ctx); | ||||
| @ -90,7 +90,6 @@ abd_checksum_SHA512_native(abd_t *abd, uint64_t size, | ||||
| 	SHA2Final(zcp, &ctx); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| abd_checksum_SHA512_byteswap(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
|  | ||||
| @ -834,7 +834,6 @@ spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| spa_change_guid_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| @ -2281,11 +2280,12 @@ int spa_load_verify_shift = 4; | ||||
| int spa_load_verify_metadata = B_TRUE; | ||||
| int spa_load_verify_data = B_TRUE; | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
|     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) | ||||
| { | ||||
| 	(void) zilog, (void) dnp; | ||||
| 
 | ||||
| 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) || | ||||
| 	    BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) | ||||
| 		return (0); | ||||
| @ -2317,10 +2317,11 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) | ||||
| { | ||||
| 	(void) dp, (void) arg; | ||||
| 
 | ||||
| 	if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN) | ||||
| 		return (SET_ERROR(ENAMETOOLONG)); | ||||
| 
 | ||||
| @ -2450,10 +2451,10 @@ spa_livelist_delete_check(spa_t *spa) | ||||
| 	return (spa->spa_livelists_to_delete != 0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static boolean_t | ||||
| spa_livelist_delete_cb_check(void *arg, zthr_t *z) | ||||
| { | ||||
| 	(void) z; | ||||
| 	spa_t *spa = arg; | ||||
| 	return (spa_livelist_delete_check(spa)); | ||||
| } | ||||
| @ -2545,7 +2546,6 @@ livelist_delete_sync(void *arg, dmu_tx_t *tx) | ||||
|  * be freed. Then, call a synctask which performs the actual frees and updates | ||||
|  * the pool-wide livelist data. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| spa_livelist_delete_cb(void *arg, zthr_t *z) | ||||
| { | ||||
| @ -2791,7 +2791,6 @@ spa_livelist_condense_cb(void *arg, zthr_t *t) | ||||
| 		zfs_livelist_condense_zthr_cancel++; | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| /*
 | ||||
|  * Check that there is something to condense but that a condense is not | ||||
|  * already in progress and that condensing has not been cancelled. | ||||
| @ -2799,6 +2798,7 @@ spa_livelist_condense_cb(void *arg, zthr_t *t) | ||||
| static boolean_t | ||||
| spa_livelist_condense_cb_check(void *arg, zthr_t *z) | ||||
| { | ||||
| 	(void) z; | ||||
| 	spa_t *spa = arg; | ||||
| 	if ((spa->spa_to_condense.ds != NULL) && | ||||
| 	    (spa->spa_to_condense.syncing == B_FALSE) && | ||||
| @ -9474,6 +9474,7 @@ spa_upgrade(spa_t *spa, uint64_t version) | ||||
| boolean_t | ||||
| spa_has_spare(spa_t *spa, uint64_t guid) | ||||
| { | ||||
| 	(void) spa; | ||||
| 	int i; | ||||
| 	uint64_t spareguid; | ||||
| 	spa_aux_vdev_t *sav = &spa->spa_spares; | ||||
| @ -9826,6 +9827,8 @@ spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name) | ||||
| 		ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP); | ||||
| 		ev->resource = resource; | ||||
| 	} | ||||
| #else | ||||
| 	(void) spa, (void) vd, (void) hist_nvl, (void) name; | ||||
| #endif | ||||
| 	return (ev); | ||||
| } | ||||
| @ -9838,6 +9841,8 @@ spa_event_post(sysevent_t *ev) | ||||
| 		zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb); | ||||
| 		kmem_free(ev, sizeof (*ev)); | ||||
| 	} | ||||
| #else | ||||
| 	(void) ev; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -380,10 +380,10 @@ spa_checkpoint_discard_is_done(spa_t *spa) | ||||
| 	return (B_TRUE); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| boolean_t | ||||
| spa_checkpoint_discard_thread_check(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) zthr; | ||||
| 	spa_t *spa = arg; | ||||
| 
 | ||||
| 	if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) | ||||
| @ -450,10 +450,10 @@ spa_checkpoint_discard_thread(void *arg, zthr_t *zthr) | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| spa_checkpoint_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	spa_t *spa = dmu_tx_pool(tx)->dp_spa; | ||||
| 
 | ||||
| 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_POOL_CHECKPOINT)) | ||||
| @ -474,10 +474,10 @@ spa_checkpoint_check(void *arg, dmu_tx_t *tx) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| spa_checkpoint_sync(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	dsl_pool_t *dp = dmu_tx_pool(tx); | ||||
| 	spa_t *spa = dp->dp_spa; | ||||
| 	uberblock_t checkpoint = spa->spa_ubsync; | ||||
| @ -571,10 +571,10 @@ spa_checkpoint(const char *pool) | ||||
| 	return (error); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	spa_t *spa = dmu_tx_pool(tx)->dp_spa; | ||||
| 
 | ||||
| 	if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) | ||||
| @ -589,10 +589,10 @@ spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| spa_checkpoint_discard_sync(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	spa_t *spa = dmu_tx_pool(tx)->dp_spa; | ||||
| 
 | ||||
| 	VERIFY0(zap_remove(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT, | ||||
|  | ||||
| @ -252,6 +252,8 @@ spa_get_errlog(spa_t *spa, void *uaddr, size_t *count) | ||||
| 	mutex_exit(&spa->spa_errlist_lock); | ||||
| 
 | ||||
| 	mutex_exit(&spa->spa_errlog_lock); | ||||
| #else | ||||
| 	(void) spa, (void) uaddr, (void) count; | ||||
| #endif | ||||
| 
 | ||||
| 	return (ret); | ||||
|  | ||||
| @ -497,6 +497,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) | ||||
| void | ||||
| spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) | ||||
| { | ||||
| 	(void) tag; | ||||
| 	int wlocks_held = 0; | ||||
| 
 | ||||
| 	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); | ||||
| @ -530,6 +531,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) | ||||
| void | ||||
| spa_config_exit(spa_t *spa, int locks, const void *tag) | ||||
| { | ||||
| 	(void) tag; | ||||
| 	for (int i = SCL_LOCKS - 1; i >= 0; i--) { | ||||
| 		spa_config_lock_t *scl = &spa->spa_config_lock[i]; | ||||
| 		if (!(locks & (1 << i))) | ||||
|  | ||||
| @ -262,11 +262,12 @@ vdev_get_mg(vdev_t *vd, metaslab_class_t *mc) | ||||
| 		return (vd->vdev_mg); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| void | ||||
| vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs, | ||||
|     range_seg64_t *physical_rs, range_seg64_t *remain_rs) | ||||
| { | ||||
| 	(void) vd, (void) remain_rs; | ||||
| 
 | ||||
| 	physical_rs->rs_start = logical_rs->rs_start; | ||||
| 	physical_rs->rs_end = logical_rs->rs_end; | ||||
| } | ||||
| @ -1768,6 +1769,7 @@ vdev_uses_zvols(vdev_t *vd) | ||||
| static boolean_t | ||||
| vdev_default_open_children_func(vdev_t *vd) | ||||
| { | ||||
| 	(void) vd; | ||||
| 	return (B_TRUE); | ||||
| } | ||||
| 
 | ||||
| @ -2849,6 +2851,8 @@ boolean_t | ||||
| vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, | ||||
|     uint64_t phys_birth) | ||||
| { | ||||
| 	(void) dva, (void) psize; | ||||
| 
 | ||||
| 	/* Set by sequential resilver. */ | ||||
| 	if (phys_birth == TXG_UNKNOWN) | ||||
| 		return (B_TRUE); | ||||
| @ -4291,6 +4295,8 @@ vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs) | ||||
| static void | ||||
| vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx) | ||||
| { | ||||
| 	(void) cvd; | ||||
| 
 | ||||
| 	int t, b; | ||||
| 	for (t = 0; t < ZIO_TYPES; t++) { | ||||
| 		for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++) | ||||
| @ -4727,6 +4733,7 @@ void | ||||
| vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, | ||||
|     int64_t space_delta) | ||||
| { | ||||
| 	(void) defer_delta; | ||||
| 	int64_t dspace_delta; | ||||
| 	spa_t *spa = vd->vdev_spa; | ||||
| 	vdev_t *rvd = spa->spa_root_vdev; | ||||
|  | ||||
| @ -2201,6 +2201,7 @@ vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv) | ||||
| static int | ||||
| vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd) | ||||
| { | ||||
| 	(void) spa; | ||||
| 	uint64_t ndata, nparity, nspares, ngroups; | ||||
| 	int error; | ||||
| 
 | ||||
| @ -2429,7 +2430,6 @@ vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset) | ||||
| 	return (cvd); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_draid_spare_close(vdev_t *vd) | ||||
| { | ||||
| @ -2688,10 +2688,10 @@ vdev_draid_spare_io_start(zio_t *zio) | ||||
| 	zio_execute(zio); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_draid_spare_io_done(zio_t *zio) | ||||
| { | ||||
| 	(void) zio; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -637,16 +637,15 @@ spa_condense_indirect_generate_new_mapping(vdev_t *vd, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static boolean_t | ||||
| spa_condense_indirect_thread_check(void *arg, zthr_t *zthr) | ||||
| { | ||||
| 	(void) zthr; | ||||
| 	spa_t *spa = arg; | ||||
| 
 | ||||
| 	return (spa->spa_condensing_indirect != NULL); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| spa_condense_indirect_thread(void *arg, zthr_t *zthr) | ||||
| { | ||||
| @ -941,13 +940,12 @@ vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise) | ||||
| 	return (error); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_indirect_close(vdev_t *vd) | ||||
| { | ||||
| 	(void) vd; | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, | ||||
|     uint64_t *logical_ashift, uint64_t *physical_ashift) | ||||
|  | ||||
| @ -255,10 +255,11 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data) | ||||
|  * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD | ||||
|  * allocation will guarantee these for us. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_initialize_block_fill(void *buf, size_t len, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 
 | ||||
| 	ASSERT0(len % sizeof (uint64_t)); | ||||
| #ifdef _ILP32 | ||||
| 	for (uint64_t i = 0; i < len; i += sizeof (uint32_t)) { | ||||
| @ -624,6 +625,7 @@ vdev_initialize_stop_wait_impl(vdev_t *vd) | ||||
| void | ||||
| vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list) | ||||
| { | ||||
| 	(void) spa; | ||||
| 	vdev_t *vd; | ||||
| 
 | ||||
| 	ASSERT(MUTEX_HELD(&spa_namespace_lock)); | ||||
|  | ||||
| @ -880,6 +880,8 @@ static uint64_t | ||||
| vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize, | ||||
|     uint64_t max_segment) | ||||
| { | ||||
| 	(void) start; | ||||
| 
 | ||||
| 	uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift), | ||||
| 	    SPA_MAXBLOCKSIZE); | ||||
| 
 | ||||
|  | ||||
| @ -42,7 +42,6 @@ | ||||
| #include <sys/fs/zfs.h> | ||||
| #include <sys/zio.h> | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, | ||||
|     uint64_t *ashift, uint64_t *pshift) | ||||
| @ -53,6 +52,7 @@ vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, | ||||
| 	 * VDEV_AUX_BAD_GUID_SUM.  So we pretend to succeed, knowing that we | ||||
| 	 * will fail the GUID sum check before ever trying to open the pool. | ||||
| 	 */ | ||||
| 	(void) vd; | ||||
| 	*psize = 0; | ||||
| 	*max_psize = 0; | ||||
| 	*ashift = 0; | ||||
| @ -60,13 +60,12 @@ vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_missing_close(vdev_t *vd) | ||||
| { | ||||
| 	(void) vd; | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_missing_io_start(zio_t *zio) | ||||
| { | ||||
| @ -74,10 +73,10 @@ vdev_missing_io_start(zio_t *zio) | ||||
| 	zio_execute(zio); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| vdev_missing_io_done(zio_t *zio) | ||||
| { | ||||
| 	(void) zio; | ||||
| } | ||||
| 
 | ||||
| vdev_ops_t vdev_missing_ops = { | ||||
|  | ||||
| @ -545,10 +545,10 @@ vdev_raidz_generate_parity(raidz_map_t *rm) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	uint64_t *dst = dbuf; | ||||
| 	uint64_t *src = sbuf; | ||||
| 	int cnt = size / sizeof (src[0]); | ||||
| @ -560,11 +560,11 @@ vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size, | ||||
|     void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	uint64_t *dst = dbuf; | ||||
| 	uint64_t *src = sbuf; | ||||
| 	uint64_t mask; | ||||
| @ -578,10 +578,10 @@ vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 	uint64_t *dst = buf; | ||||
| 	uint64_t mask; | ||||
| 	int cnt = size / sizeof (dst[0]); | ||||
| @ -2404,6 +2404,8 @@ static void | ||||
| vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs, | ||||
|     range_seg64_t *physical_rs, range_seg64_t *remain_rs) | ||||
| { | ||||
| 	(void) remain_rs; | ||||
| 
 | ||||
| 	vdev_t *raidvd = cvd->vdev_parent; | ||||
| 	ASSERT(raidvd->vdev_ops == &vdev_raidz_ops); | ||||
| 
 | ||||
|  | ||||
| @ -1623,10 +1623,10 @@ spa_vdev_remove_suspend(spa_t *spa) | ||||
| 	mutex_exit(&svr->svr_lock); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	spa_t *spa = dmu_tx_pool(tx)->dp_spa; | ||||
| 
 | ||||
| 	if (spa->spa_vdev_removal == NULL) | ||||
| @ -1638,10 +1638,10 @@ spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) | ||||
|  * Cancel a removal by freeing all entries from the partial mapping | ||||
|  * and marking the vdev as no longer being removing. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) arg; | ||||
| 	spa_t *spa = dmu_tx_pool(tx)->dp_spa; | ||||
| 	spa_vdev_removal_t *svr = spa->spa_vdev_removal; | ||||
| 	vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); | ||||
|  | ||||
| @ -1003,6 +1003,7 @@ vdev_trim_stop_wait_impl(vdev_t *vd) | ||||
| void | ||||
| vdev_trim_stop_wait(spa_t *spa, list_t *vd_list) | ||||
| { | ||||
| 	(void) spa; | ||||
| 	vdev_t *vd; | ||||
| 
 | ||||
| 	ASSERT(MUTEX_HELD(&spa_namespace_lock)); | ||||
|  | ||||
| @ -769,10 +769,10 @@ zcp_lua_alloc(void *ud, void *ptr, size_t osize, size_t nsize) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| zcp_lua_counthook(lua_State *state, lua_Debug *ar) | ||||
| { | ||||
| 	(void) ar; | ||||
| 	lua_getfield(state, LUA_REGISTRYINDEX, ZCP_RUN_INFO_KEY); | ||||
| 	zcp_run_info_t *ri = lua_touserdata(state, -1); | ||||
| 
 | ||||
| @ -974,10 +974,10 @@ zcp_pool_error(zcp_run_info_t *ri, const char *poolname) | ||||
|  * The txg_wait_synced_sig will continue to wait for the txg to complete | ||||
|  * after calling this callback. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| zcp_eval_sig(void *arg, dmu_tx_t *tx) | ||||
| { | ||||
| 	(void) tx; | ||||
| 	zcp_run_info_t *ri = arg; | ||||
| 
 | ||||
| 	ri->zri_canceled = B_TRUE; | ||||
|  | ||||
| @ -129,10 +129,10 @@ static zcp_synctask_info_t zcp_synctask_destroy_info = { | ||||
| 	.blocks_modified = 0 | ||||
| }; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zcp_synctask_destroy(lua_State *state, boolean_t sync, nvlist_t *err_details) | ||||
| { | ||||
| 	(void) err_details; | ||||
| 	int err; | ||||
| 	const char *dsname = lua_tostring(state, 1); | ||||
| 
 | ||||
| @ -251,10 +251,10 @@ static zcp_synctask_info_t zcp_synctask_snapshot_info = { | ||||
| 	.blocks_modified = 3 | ||||
| }; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details) | ||||
| { | ||||
| 	(void) err_details; | ||||
| 	int err; | ||||
| 	dsl_dataset_snapshot_arg_t ddsa = { 0 }; | ||||
| 	const char *dsname = lua_tostring(state, 1); | ||||
| @ -354,6 +354,7 @@ static int | ||||
| zcp_synctask_inherit_prop(lua_State *state, boolean_t sync, | ||||
|     nvlist_t *err_details) | ||||
| { | ||||
| 	(void) err_details; | ||||
| 	int err; | ||||
| 	zcp_inherit_prop_arg_t zipa = { 0 }; | ||||
| 	dsl_props_set_arg_t *dpsa = &zipa.zipa_dpsa; | ||||
| @ -396,10 +397,10 @@ static zcp_synctask_info_t zcp_synctask_bookmark_info = { | ||||
| 	.blocks_modified = 1, | ||||
| }; | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zcp_synctask_bookmark(lua_State *state, boolean_t sync, nvlist_t *err_details) | ||||
| { | ||||
| 	(void) err_details; | ||||
| 	int err; | ||||
| 	const char *source = lua_tostring(state, 1); | ||||
| 	const char *new = lua_tostring(state, 2); | ||||
| @ -443,6 +444,7 @@ static zcp_synctask_info_t zcp_synctask_set_prop_info = { | ||||
| static int | ||||
| zcp_synctask_set_prop(lua_State *state, boolean_t sync, nvlist_t *err_details) | ||||
| { | ||||
| 	(void) err_details; | ||||
| 	int err; | ||||
| 	zcp_set_prop_arg_t args = { 0 }; | ||||
| 
 | ||||
|  | ||||
| @ -205,7 +205,6 @@ static void zfs_ereport_schedule_cleaner(void); | ||||
| /*
 | ||||
|  * background task to clean stale recent event nodes. | ||||
|  */ | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| zfs_ereport_cleaner(void *arg) | ||||
| { | ||||
| @ -992,10 +991,10 @@ annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info, | ||||
| 	return (eip); | ||||
| } | ||||
| #else | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| zfs_ereport_clear(spa_t *spa, vdev_t *vd) | ||||
| { | ||||
| 	(void) spa, (void) vd; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| @ -1072,6 +1071,8 @@ zfs_ereport_is_valid(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio) | ||||
| 	    (zio != NULL) && (!zio->io_timestamp)) { | ||||
| 		return (B_FALSE); | ||||
| 	} | ||||
| #else | ||||
| 	(void) subclass, (void) spa, (void) vd, (void) zio; | ||||
| #endif | ||||
| 	return (B_TRUE); | ||||
| } | ||||
| @ -1112,6 +1113,9 @@ zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, | ||||
| 
 | ||||
| 	/* Cleanup is handled by the callback function */ | ||||
| 	rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb); | ||||
| #else | ||||
| 	(void) subclass, (void) spa, (void) vd, (void) zb, (void) zio, | ||||
| 	    (void) state; | ||||
| #endif | ||||
| 	return (rc); | ||||
| } | ||||
| @ -1141,6 +1145,8 @@ zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb, | ||||
| 
 | ||||
| 	if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd)) | ||||
| 		return (SET_ERROR(EBUSY)); | ||||
| #else | ||||
| 	(void) zb, (void) offset; | ||||
| #endif | ||||
| 
 | ||||
| 	report = kmem_zalloc(sizeof (*report), KM_SLEEP); | ||||
| @ -1193,6 +1199,9 @@ zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data, | ||||
| 	report->zcr_ereport = report->zcr_detector = NULL; | ||||
| 	if (info != NULL) | ||||
| 		kmem_free(info, sizeof (*info)); | ||||
| #else | ||||
| 	(void) report, (void) good_data, (void) bad_data, | ||||
| 	    (void) drop_if_identical; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| @ -1257,6 +1266,9 @@ zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb, | ||||
| 		rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb); | ||||
| 		kmem_free(info, sizeof (*info)); | ||||
| 	} | ||||
| #else | ||||
| 	(void) spa, (void) vd, (void) zb, (void) zio, (void) offset, | ||||
| 	    (void) length, (void) good_data, (void) bad_data, (void) zbc; | ||||
| #endif | ||||
| 	return (rc); | ||||
| } | ||||
| @ -1321,7 +1333,8 @@ zfs_event_create(spa_t *spa, vdev_t *vd, const char *type, const char *name, | ||||
| 		while ((elem = nvlist_next_nvpair(aux, elem)) != NULL) | ||||
| 			(void) nvlist_add_nvpair(resource, elem); | ||||
| 	} | ||||
| 
 | ||||
| #else | ||||
| 	(void) spa, (void) vd, (void) type, (void) name, (void) aux; | ||||
| #endif | ||||
| 	return (resource); | ||||
| } | ||||
| @ -1336,6 +1349,8 @@ zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name, | ||||
| 	resource = zfs_event_create(spa, vd, type, name, aux); | ||||
| 	if (resource) | ||||
| 		zfs_zevent_post(resource, NULL, zfs_zevent_post_cb); | ||||
| #else | ||||
| 	(void) spa, (void) vd, (void) type, (void) name, (void) aux; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| @ -1399,6 +1414,8 @@ zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate) | ||||
| 
 | ||||
| 	if (aux) | ||||
| 		fm_nvlist_destroy(aux, FM_NVA_FREE); | ||||
| #else | ||||
| 	(void) spa, (void) vd, (void) laststate; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -432,11 +432,11 @@ done: | ||||
| 	return (error); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, | ||||
|     uint64_t first_txg) | ||||
| { | ||||
| 	(void) tx; | ||||
| 	ASSERT(!BP_IS_HOLE(bp)); | ||||
| 
 | ||||
| 	/*
 | ||||
| @ -455,11 +455,11 @@ zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | ||||
|     uint64_t first_txg) | ||||
| { | ||||
| 	(void) zilog, (void) lrc, (void) tx, (void) first_txg; | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| @ -507,11 +507,12 @@ zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | ||||
| 	return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, | ||||
|     uint64_t claim_txg) | ||||
| { | ||||
| 	(void) claim_txg; | ||||
| 
 | ||||
| 	zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); | ||||
| 
 | ||||
| 	return (0); | ||||
| @ -911,10 +912,10 @@ zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) | ||||
|  * Checksum errors are ok as they indicate the end of the chain. | ||||
|  * Any other error (no device or read failure) returns an error. | ||||
|  */ | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) | ||||
| { | ||||
| 	(void) dp; | ||||
| 	zilog_t *zilog; | ||||
| 	objset_t *os; | ||||
| 	blkptr_t *bp; | ||||
| @ -3127,10 +3128,10 @@ zil_sync(zilog_t *zilog, dmu_tx_t *tx) | ||||
| 	mutex_exit(&zilog->zl_lock); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zil_lwb_cons(void *vbuf, void *unused, int kmflag) | ||||
| { | ||||
| 	(void) unused, (void) kmflag; | ||||
| 	lwb_t *lwb = vbuf; | ||||
| 	list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); | ||||
| 	list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), | ||||
| @ -3141,10 +3142,10 @@ zil_lwb_cons(void *vbuf, void *unused, int kmflag) | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static void | ||||
| zil_lwb_dest(void *vbuf, void *unused) | ||||
| { | ||||
| 	(void) unused; | ||||
| 	lwb_t *lwb = vbuf; | ||||
| 	mutex_destroy(&lwb->lwb_vdev_lock); | ||||
| 	avl_destroy(&lwb->lwb_vdev_tree); | ||||
| @ -3615,10 +3616,11 @@ zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, | ||||
| 	return (0); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static int | ||||
| zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) | ||||
| { | ||||
| 	(void) bp, (void) arg, (void) claim_txg; | ||||
| 
 | ||||
| 	zilog->zl_replay_blks++; | ||||
| 
 | ||||
| 	return (0); | ||||
| @ -3677,13 +3679,12 @@ zil_replaying(zilog_t *zilog, dmu_tx_t *tx) | ||||
| 	return (B_FALSE); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| int | ||||
| zil_reset(const char *osname, void *arg) | ||||
| { | ||||
| 	int error; | ||||
| 	(void) arg; | ||||
| 
 | ||||
| 	error = zil_suspend(osname, NULL); | ||||
| 	int error = zil_suspend(osname, NULL); | ||||
| 	/* EACCES means crypto key not loaded */ | ||||
| 	if ((error == EACCES) || (error == EBUSY)) | ||||
| 		return (SET_ERROR(error)); | ||||
|  | ||||
| @ -369,6 +369,7 @@ zio_data_buf_free(void *buf, size_t size) | ||||
| static void | ||||
| zio_abd_free(void *abd, size_t size) | ||||
| { | ||||
| 	(void) size; | ||||
| 	abd_free((abd_t *)abd); | ||||
| } | ||||
| 
 | ||||
| @ -1072,6 +1073,7 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held, | ||||
| boolean_t | ||||
| zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) | ||||
| { | ||||
| 	(void) bp; | ||||
| 	uint64_t vdevid = DVA_GET_VDEV(dva); | ||||
| 
 | ||||
| 	if (vdevid >= spa->spa_root_vdev->vdev_children) | ||||
| @ -2143,6 +2145,8 @@ zio_execute_stack_check(zio_t *zio) | ||||
| 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && | ||||
| 	    !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) | ||||
| 		return (B_TRUE); | ||||
| #else | ||||
| 	(void) zio; | ||||
| #endif /* HAVE_LARGE_STACKS */ | ||||
| 
 | ||||
| 	return (B_FALSE); | ||||
| @ -2555,11 +2559,12 @@ zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | ||||
| 	return (zio); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static zio_t * | ||||
| zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | ||||
|     uint64_t offset) | ||||
| { | ||||
| 	(void) gn, (void) data, (void) offset; | ||||
| 
 | ||||
| 	zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, | ||||
| 	    ZIO_GANG_CHILD_FLAGS(pio)); | ||||
| 	if (zio == NULL) { | ||||
| @ -2569,11 +2574,11 @@ zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | ||||
| 	return (zio); | ||||
| } | ||||
| 
 | ||||
| /* ARGSUSED */ | ||||
| static zio_t * | ||||
| zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | ||||
|     uint64_t offset) | ||||
| { | ||||
| 	(void) gn, (void) data, (void) offset; | ||||
| 	return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, | ||||
| 	    NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); | ||||
| } | ||||
| @ -3964,7 +3969,6 @@ zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, | ||||
| 	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) | ||||
| { | ||||
|  | ||||
| @ -91,29 +91,29 @@ | ||||
|  * invocation and passed to the checksum function. | ||||
|  */ | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| abd_checksum_off(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) abd, (void) size, (void) ctx_template; | ||||
| 	ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| abd_fletcher_2_native(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	fletcher_init(zcp); | ||||
| 	(void) abd_iterate_func(abd, 0, size, | ||||
| 	    fletcher_2_incremental_native, zcp); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static void | ||||
| abd_fletcher_2_byteswap(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	fletcher_init(zcp); | ||||
| 	(void) abd_iterate_func(abd, 0, size, | ||||
| 	    fletcher_2_incremental_byteswap, zcp); | ||||
| @ -127,11 +127,11 @@ abd_fletcher_4_impl(abd_t *abd, uint64_t size, zio_abd_checksum_data_t *acdp) | ||||
| 	fletcher_4_abd_ops.acf_fini(acdp); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| abd_fletcher_4_native(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	fletcher_4_ctx_t ctx; | ||||
| 
 | ||||
| 	zio_abd_checksum_data_t acd = { | ||||
| @ -144,11 +144,11 @@ abd_fletcher_4_native(abd_t *abd, uint64_t size, | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| void | ||||
| abd_fletcher_4_byteswap(abd_t *abd, uint64_t size, | ||||
|     const void *ctx_template, zio_cksum_t *zcp) | ||||
| { | ||||
| 	(void) ctx_template; | ||||
| 	fletcher_4_ctx_t ctx; | ||||
| 
 | ||||
| 	zio_abd_checksum_data_t acd = { | ||||
|  | ||||
| @ -74,6 +74,7 @@ uint8_t | ||||
| zio_complevel_select(spa_t *spa, enum zio_compress compress, uint8_t child, | ||||
|     uint8_t parent) | ||||
| { | ||||
| 	(void) spa; | ||||
| 	uint8_t result; | ||||
| 
 | ||||
| 	if (!ZIO_COMPRESS_HASLEVEL(compress)) | ||||
| @ -110,10 +111,11 @@ zio_compress_select(spa_t *spa, enum zio_compress child, | ||||
| 	return (result); | ||||
| } | ||||
| 
 | ||||
| /*ARGSUSED*/ | ||||
| static int | ||||
| zio_compress_zeroed_cb(void *data, size_t len, void *private) | ||||
| { | ||||
| 	(void) private; | ||||
| 
 | ||||
| 	uint64_t *end = (uint64_t *)((char *)data + len); | ||||
| 	for (uint64_t *word = (uint64_t *)data; word < end; word++) | ||||
| 		if (*word != 0) | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 наб
						наб