config: remove HAVE_BLK_MQ

Sponsored-by: https://despairlabs.com/sponsor/
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Tino Reichardt <milky-zfs@mcmilk.de>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Closes #16479
This commit is contained in:
Rob Norris 2024-08-24 21:33:35 +10:00 committed by Tony Hutter
parent 56a82c6b6f
commit 9601eeea1c
4 changed files with 8 additions and 106 deletions

View File

@ -248,24 +248,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [
]) ])
]) ])
dnl # AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX], [
dnl # See if kernel supports block multi-queue and blk_status_t.
dnl # blk_status_t represents the new status codes introduced in the 4.13
dnl # kernel patch:
dnl #
dnl # block: introduce new block status code type
dnl #
dnl # We do not currently support the "old" block multi-queue interfaces from
dnl # prior kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [
ZFS_LINUX_TEST_SRC([blk_mq], [
#include <linux/blk-mq.h>
], [
struct blk_mq_tag_set tag_set __attribute__ ((unused)) = {0};
(void) blk_mq_alloc_tag_set(&tag_set);
return BLK_STS_OK;
], [])
ZFS_LINUX_TEST_SRC([blk_mq_rq_hctx], [ ZFS_LINUX_TEST_SRC([blk_mq_rq_hctx], [
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
@ -276,11 +259,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [
], []) ], [])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [ AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX], [
AC_MSG_CHECKING([whether block multiqueue with blk_status_t is available])
ZFS_LINUX_TEST_RESULT([blk_mq], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_MQ, 1, [block multiqueue is available])
AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request]) AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request])
ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [ ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
@ -288,9 +267,6 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [
], [ ], [
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
]) ])
], [
AC_MSG_RESULT(no)
])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [
@ -302,7 +278,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [
ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_SRC_BLK_MQ ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX
]) ])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
@ -314,5 +290,5 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_BLK_MQ ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX
]) ])

View File

@ -35,11 +35,7 @@
#include <linux/major.h> #include <linux/major.h>
#include <linux/msdos_fs.h> /* for SECTOR_* */ #include <linux/msdos_fs.h> /* for SECTOR_* */
#include <linux/bio.h> #include <linux/bio.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#endif
/* /*
* 6.11 API * 6.11 API
@ -531,7 +527,6 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
static inline int static inline int
io_data_dir(struct bio *bio, struct request *rq) io_data_dir(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) { if (rq != NULL) {
if (op_is_write(req_op(rq))) { if (op_is_write(req_op(rq))) {
return (WRITE); return (WRITE);
@ -539,57 +534,38 @@ io_data_dir(struct bio *bio, struct request *rq)
return (READ); return (READ);
} }
} }
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_data_dir(bio)); return (bio_data_dir(bio));
} }
static inline int static inline int
io_is_flush(struct bio *bio, struct request *rq) io_is_flush(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (req_op(rq) == REQ_OP_FLUSH); return (req_op(rq) == REQ_OP_FLUSH);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_flush(bio)); return (bio_is_flush(bio));
} }
static inline int static inline int
io_is_discard(struct bio *bio, struct request *rq) io_is_discard(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (req_op(rq) == REQ_OP_DISCARD); return (req_op(rq) == REQ_OP_DISCARD);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_discard(bio)); return (bio_is_discard(bio));
} }
static inline int static inline int
io_is_secure_erase(struct bio *bio, struct request *rq) io_is_secure_erase(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (req_op(rq) == REQ_OP_SECURE_ERASE); return (req_op(rq) == REQ_OP_SECURE_ERASE);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_secure_erase(bio)); return (bio_is_secure_erase(bio));
} }
static inline int static inline int
io_is_fua(struct bio *bio, struct request *rq) io_is_fua(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (rq->cmd_flags & REQ_FUA); return (rq->cmd_flags & REQ_FUA);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_fua(bio)); return (bio_is_fua(bio));
} }
@ -597,36 +573,24 @@ io_is_fua(struct bio *bio, struct request *rq)
static inline uint64_t static inline uint64_t
io_offset(struct bio *bio, struct request *rq) io_offset(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (blk_rq_pos(rq) << 9); return (blk_rq_pos(rq) << 9);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SECTOR(bio) << 9); return (BIO_BI_SECTOR(bio) << 9);
} }
static inline uint64_t static inline uint64_t
io_size(struct bio *bio, struct request *rq) io_size(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (blk_rq_bytes(rq)); return (blk_rq_bytes(rq));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SIZE(bio)); return (BIO_BI_SIZE(bio));
} }
static inline int static inline int
io_has_data(struct bio *bio, struct request *rq) io_has_data(struct bio *bio, struct request *rq)
{ {
#ifdef HAVE_BLK_MQ
if (rq != NULL) if (rq != NULL)
return (bio_has_data(rq->bio)); return (bio_has_data(rq->bio));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_has_data(bio)); return (bio_has_data(bio));
} }
#endif /* _ZFS_BLKDEV_H */ #endif /* _ZFS_BLKDEV_H */

View File

@ -161,7 +161,6 @@ zfs_uiomove_bvec_impl(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
return (0); return (0);
} }
#ifdef HAVE_BLK_MQ
static void static void
zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw, zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
struct bio_vec *bv) struct bio_vec *bv)
@ -253,17 +252,12 @@ zfs_uiomove_bvec_rq(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
} }
return (0); return (0);
} }
#endif
static int static int
zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{ {
#ifdef HAVE_BLK_MQ
if (uio->rq != NULL) if (uio->rq != NULL)
return (zfs_uiomove_bvec_rq(p, n, rw, uio)); return (zfs_uiomove_bvec_rq(p, n, rw, uio));
#else
ASSERT3P(uio->rq, ==, NULL);
#endif
return (zfs_uiomove_bvec_impl(p, n, rw, uio)); return (zfs_uiomove_bvec_impl(p, n, rw, uio));
} }

View File

@ -44,10 +44,7 @@
#include <linux/blkdev_compat.h> #include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#endif
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio, static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
struct request *rq, boolean_t force_sync); struct request *rq, boolean_t force_sync);
@ -68,7 +65,6 @@ static unsigned int zvol_open_timeout_ms = 1000;
#endif #endif
static unsigned int zvol_threads = 0; static unsigned int zvol_threads = 0;
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_threads = 0; static unsigned int zvol_blk_mq_threads = 0;
static unsigned int zvol_blk_mq_actual_threads; static unsigned int zvol_blk_mq_actual_threads;
static boolean_t zvol_use_blk_mq = B_FALSE; static boolean_t zvol_use_blk_mq = B_FALSE;
@ -84,7 +80,6 @@ static boolean_t zvol_use_blk_mq = B_FALSE;
* read and write tests to a zvol in an NVMe pool (with 16 CPUs). * read and write tests to a zvol in an NVMe pool (with 16 CPUs).
*/ */
static unsigned int zvol_blk_mq_blocks_per_thread = 8; static unsigned int zvol_blk_mq_blocks_per_thread = 8;
#endif
static unsigned int zvol_num_taskqs = 0; static unsigned int zvol_num_taskqs = 0;
@ -96,7 +91,6 @@ static unsigned int zvol_num_taskqs = 0;
/* /*
* Finalize our BIO or request. * Finalize our BIO or request.
*/ */
#ifdef HAVE_BLK_MQ
#define END_IO(zv, bio, rq, error) do { \ #define END_IO(zv, bio, rq, error) do { \
if (bio) { \ if (bio) { \
bio->bi_status = errno_to_bi_status(-error); \ bio->bi_status = errno_to_bi_status(-error); \
@ -105,26 +99,16 @@ static unsigned int zvol_num_taskqs = 0;
blk_mq_end_request(rq, errno_to_bi_status(error)); \ blk_mq_end_request(rq, errno_to_bi_status(error)); \
} \ } \
} while (0) } while (0)
#else
#define END_IO(zv, bio, rq, error) do { \
bio->bi_status = errno_to_bi_status(-error); \
bio_endio(bio); \
} while (0)
#endif
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ; static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
static unsigned int zvol_actual_blk_mq_queue_depth; static unsigned int zvol_actual_blk_mq_queue_depth;
#endif
struct zvol_state_os { struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */ struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */ struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */ dev_t zvo_dev; /* device id */
#ifdef HAVE_BLK_MQ
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
#endif
/* Set from the global 'zvol_use_blk_mq' at zvol load */ /* Set from the global 'zvol_use_blk_mq' at zvol load */
boolean_t use_blk_mq; boolean_t use_blk_mq;
@ -169,8 +153,6 @@ zv_request_task_free(zv_request_task_t *task)
kmem_free(task, sizeof (*task)); kmem_free(task, sizeof (*task));
} }
#ifdef HAVE_BLK_MQ
/* /*
* This is called when a new block multiqueue request comes in. A request * This is called when a new block multiqueue request comes in. A request
* contains one or more BIOs. * contains one or more BIOs.
@ -223,7 +205,6 @@ static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv)
return (blk_mq_alloc_tag_set(&zso->tag_set)); return (blk_mq_alloc_tag_set(&zso->tag_set));
} }
#endif /* HAVE_BLK_MQ */
/* /*
* Given a path, return TRUE if path is a ZVOL. * Given a path, return TRUE if path is a ZVOL.
@ -561,14 +542,12 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
uint_t blk_mq_hw_queue = 0; uint_t blk_mq_hw_queue = 0;
uint_t tq_idx; uint_t tq_idx;
uint_t taskq_hash; uint_t taskq_hash;
#ifdef HAVE_BLK_MQ
if (rq) if (rq)
#ifdef HAVE_BLK_MQ_RQ_HCTX #ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num; blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else #else
blk_mq_hw_queue = blk_mq_hw_queue =
rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num; rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
#endif
#endif #endif
taskq_hash = cityhash4((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT, taskq_hash = cityhash4((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue, 0); blk_mq_hw_queue, 0);
@ -1174,7 +1153,6 @@ zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
* the correct number of segments for the volblocksize and * the correct number of segments for the volblocksize and
* number of chunks you want. * number of chunks you want.
*/ */
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) { if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks; unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX); chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
@ -1191,7 +1169,6 @@ zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
limits->zql_max_segment_size = UINT_MAX; limits->zql_max_segment_size = UINT_MAX;
} }
} else { } else {
#endif
limits->zql_max_segments = UINT16_MAX; limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX; limits->zql_max_segment_size = UINT_MAX;
} }
@ -1304,7 +1281,6 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
static int static int
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits) zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
{ {
#ifdef HAVE_BLK_MQ
struct zvol_state_os *zso = zv->zv_zso; struct zvol_state_os *zso = zv->zv_zso;
/* Allocate our blk-mq tag_set */ /* Allocate our blk-mq tag_set */
@ -1351,7 +1327,6 @@ zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
#endif #endif
zvol_queue_limits_apply(limits, zso->zvo_queue); zvol_queue_limits_apply(limits, zso->zvo_queue);
#endif
return (0); return (0);
} }
@ -1387,9 +1362,7 @@ zvol_alloc(dev_t dev, const char *name, uint64_t volblocksize)
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL); cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL);
#ifdef HAVE_BLK_MQ
zv->zv_zso->use_blk_mq = zvol_use_blk_mq; zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
#endif
zvol_queue_limits_t limits; zvol_queue_limits_t limits;
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq); zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
@ -1498,10 +1471,8 @@ zvol_os_free(zvol_state_t *zv)
put_disk(zv->zv_zso->zvo_disk); put_disk(zv->zv_zso->zvo_disk);
#endif #endif
#ifdef HAVE_BLK_MQ
if (zv->zv_zso->use_blk_mq) if (zv->zv_zso->use_blk_mq)
blk_mq_free_tag_set(&zv->zv_zso->tag_set); blk_mq_free_tag_set(&zv->zv_zso->tag_set);
#endif
ida_simple_remove(&zvol_ida, ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS); MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
@ -1859,7 +1830,6 @@ zvol_init(void)
return (error); return (error);
} }
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_queue_depth == 0) { if (zvol_blk_mq_queue_depth == 0) {
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ; zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
} else { } else {
@ -1873,7 +1843,7 @@ zvol_init(void)
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1), zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
1024); 1024);
} }
#endif
for (uint_t i = 0; i < num_tqs; i++) { for (uint_t i = 0; i < num_tqs; i++) {
char name[32]; char name[32];
(void) snprintf(name, sizeof (name), "%s_tq-%u", (void) snprintf(name, sizeof (name), "%s_tq-%u",
@ -1945,7 +1915,6 @@ MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644); module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value"); MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
#ifdef HAVE_BLK_MQ
module_param(zvol_blk_mq_queue_depth, uint, 0644); module_param(zvol_blk_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth"); MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
@ -1955,7 +1924,6 @@ MODULE_PARM_DESC(zvol_use_blk_mq, "Use the blk-mq API for zvols");
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644); module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread, MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
"Process volblocksize blocks per thread"); "Process volblocksize blocks per thread");
#endif
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
module_param(zvol_open_timeout_ms, uint, 0644); module_param(zvol_open_timeout_ms, uint, 0644);