diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 04d275dd8..3484fff3b 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -7454,9 +7454,10 @@ arc_state_multilist_index_func(multilist_t *ml, void *obj) * Also, the low order bits of the hash value are thought to be * distributed evenly. Otherwise, in the case that the multilist * has a power of two number of sublists, each sublists' usage - * would not be evenly distributed. + * would not be evenly distributed. In this context full 64bit + * division would be a waste of time, so limit it to 32 bits. */ - return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % + return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % multilist_get_num_sublists(ml)); } diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index f9bcd9313..9ce091b80 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -622,9 +622,10 @@ dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) * Also, the low order bits of the hash value are thought to be * distributed evenly. Otherwise, in the case that the multilist * has a power of two number of sublists, each sublists' usage - * would not be evenly distributed. + * would not be evenly distributed. In this context full 64bit + * division would be a waste of time, so limit it to 32 bits. */ - return (dbuf_hash(db->db_objset, db->db.db_object, + return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, db->db_level, db->db_blkid) % multilist_get_num_sublists(ml)); } diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 22deee7f3..af107fb8a 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -399,7 +399,15 @@ static unsigned int dnode_multilist_index_func(multilist_t *ml, void *obj) { dnode_t *dn = obj; - return (dnode_hash(dn->dn_objset, dn->dn_object) % + + /* + * The low order bits of the hash value are thought to be + * distributed evenly. Otherwise, in the case that the multilist + * has a power of two number of sublists, each sublists' usage + * would not be evenly distributed. In this context full 64bit + * division would be a waste of time, so limit it to 32 bits. + */ + return ((unsigned int)dnode_hash(dn->dn_objset, dn->dn_object) % multilist_get_num_sublists(ml)); } diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 92f51806a..23f3e2989 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -1874,7 +1874,12 @@ static unsigned int metaslab_idx_func(multilist_t *ml, void *arg) { metaslab_t *msp = arg; - return (msp->ms_id % multilist_get_num_sublists(ml)); + + /* + * ms_id values are allocated sequentially, so full 64bit + * division would be a waste of time, so limit it to 32 bits. + */ + return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); } uint64_t