config: remove HAVE_BLK_MQ

Sponsored-by: https://despairlabs.com/sponsor/
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Tino Reichardt <milky-zfs@mcmilk.de>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Closes #16479
This commit is contained in:
Rob Norris 2024-08-24 21:33:35 +10:00 committed by Brian Behlendorf
parent 1bf93713d8
commit dcb8e5ec7c
4 changed files with 8 additions and 106 deletions

View File

@ -248,24 +248,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [
])
])
dnl #
dnl # See if kernel supports block multi-queue and blk_status_t.
dnl # blk_status_t represents the new status codes introduced in the 4.13
dnl # kernel patch:
dnl #
dnl # block: introduce new block status code type
dnl #
dnl # We do not currently support the "old" block multi-queue interfaces from
dnl # prior kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [
ZFS_LINUX_TEST_SRC([blk_mq], [
#include <linux/blk-mq.h>
], [
struct blk_mq_tag_set tag_set __attribute__ ((unused)) = {0};
(void) blk_mq_alloc_tag_set(&tag_set);
return BLK_STS_OK;
], [])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX], [
ZFS_LINUX_TEST_SRC([blk_mq_rq_hctx], [
#include <linux/blk-mq.h>
#include <linux/blkdev.h>
@ -276,11 +259,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [
], [])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [
AC_MSG_CHECKING([whether block multiqueue with blk_status_t is available])
ZFS_LINUX_TEST_RESULT([blk_mq], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_MQ, 1, [block multiqueue is available])
AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX], [
AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request])
ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [
AC_MSG_RESULT(yes)
@ -288,9 +267,6 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [
], [
AC_MSG_RESULT(no)
])
], [
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [
@ -302,7 +278,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [
ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_SRC_BLK_MQ
ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
@ -314,5 +290,5 @@ AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_BLK_MQ
ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX
])

View File

@ -35,11 +35,7 @@
#include <linux/major.h>
#include <linux/msdos_fs.h> /* for SECTOR_* */
#include <linux/bio.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
#endif
/*
* 6.11 API
@ -558,7 +554,6 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
static inline int
io_data_dir(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL) {
if (op_is_write(req_op(rq))) {
return (WRITE);
@ -566,57 +561,38 @@ io_data_dir(struct bio *bio, struct request *rq)
return (READ);
}
}
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_data_dir(bio));
}
static inline int
io_is_flush(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_FLUSH);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_flush(bio));
}
static inline int
io_is_discard(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_DISCARD);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_discard(bio));
}
static inline int
io_is_secure_erase(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_SECURE_ERASE);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_secure_erase(bio));
}
static inline int
io_is_fua(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (rq->cmd_flags & REQ_FUA);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_is_fua(bio));
}
@ -624,36 +600,24 @@ io_is_fua(struct bio *bio, struct request *rq)
static inline uint64_t
io_offset(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_pos(rq) << 9);
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SECTOR(bio) << 9);
}
static inline uint64_t
io_size(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_bytes(rq));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (BIO_BI_SIZE(bio));
}
static inline int
io_has_data(struct bio *bio, struct request *rq)
{
#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (bio_has_data(rq->bio));
#else
ASSERT3P(rq, ==, NULL);
#endif
return (bio_has_data(bio));
}
#endif /* _ZFS_BLKDEV_H */

View File

@ -168,7 +168,6 @@ zfs_uiomove_bvec_impl(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
return (0);
}
#ifdef HAVE_BLK_MQ
static void
zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
struct bio_vec *bv)
@ -260,17 +259,12 @@ zfs_uiomove_bvec_rq(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
}
return (0);
}
#endif
static int
zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
#ifdef HAVE_BLK_MQ
if (uio->rq != NULL)
return (zfs_uiomove_bvec_rq(p, n, rw, uio));
#else
ASSERT3P(uio->rq, ==, NULL);
#endif
return (zfs_uiomove_bvec_impl(p, n, rw, uio));
}

View File

@ -44,10 +44,7 @@
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/workqueue.h>
#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
#endif
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
struct request *rq, boolean_t force_sync);
@ -68,7 +65,6 @@ static unsigned int zvol_open_timeout_ms = 1000;
#endif
static unsigned int zvol_threads = 0;
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_threads = 0;
static unsigned int zvol_blk_mq_actual_threads;
static boolean_t zvol_use_blk_mq = B_FALSE;
@ -84,7 +80,6 @@ static boolean_t zvol_use_blk_mq = B_FALSE;
* read and write tests to a zvol in an NVMe pool (with 16 CPUs).
*/
static unsigned int zvol_blk_mq_blocks_per_thread = 8;
#endif
static unsigned int zvol_num_taskqs = 0;
@ -96,7 +91,6 @@ static unsigned int zvol_num_taskqs = 0;
/*
* Finalize our BIO or request.
*/
#ifdef HAVE_BLK_MQ
#define END_IO(zv, bio, rq, error) do { \
if (bio) { \
bio->bi_status = errno_to_bi_status(-error); \
@ -105,26 +99,16 @@ static unsigned int zvol_num_taskqs = 0;
blk_mq_end_request(rq, errno_to_bi_status(error)); \
} \
} while (0)
#else
#define END_IO(zv, bio, rq, error) do { \
bio->bi_status = errno_to_bi_status(-error); \
bio_endio(bio); \
} while (0)
#endif
#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
static unsigned int zvol_actual_blk_mq_queue_depth;
#endif
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
#ifdef HAVE_BLK_MQ
struct blk_mq_tag_set tag_set;
#endif
/* Set from the global 'zvol_use_blk_mq' at zvol load */
boolean_t use_blk_mq;
@ -169,8 +153,6 @@ zv_request_task_free(zv_request_task_t *task)
kmem_free(task, sizeof (*task));
}
#ifdef HAVE_BLK_MQ
/*
* This is called when a new block multiqueue request comes in. A request
* contains one or more BIOs.
@ -223,7 +205,6 @@ static int zvol_blk_mq_alloc_tag_set(zvol_state_t *zv)
return (blk_mq_alloc_tag_set(&zso->tag_set));
}
#endif /* HAVE_BLK_MQ */
/*
* Given a path, return TRUE if path is a ZVOL.
@ -561,14 +542,12 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
uint_t blk_mq_hw_queue = 0;
uint_t tq_idx;
uint_t taskq_hash;
#ifdef HAVE_BLK_MQ
if (rq)
#ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else
blk_mq_hw_queue =
rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
#endif
#endif
taskq_hash = cityhash4((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue, 0);
@ -1175,7 +1154,6 @@ zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
@ -1192,7 +1170,6 @@ zvol_queue_limits_init(zvol_queue_limits_t *limits, zvol_state_t *zv,
limits->zql_max_segment_size = UINT_MAX;
}
} else {
#endif
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
@ -1305,7 +1282,6 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
static int
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
{
#ifdef HAVE_BLK_MQ
struct zvol_state_os *zso = zv->zv_zso;
/* Allocate our blk-mq tag_set */
@ -1352,7 +1328,6 @@ zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
#endif
zvol_queue_limits_apply(limits, zso->zvo_queue);
#endif
return (0);
}
@ -1388,9 +1363,7 @@ zvol_alloc(dev_t dev, const char *name, uint64_t volblocksize)
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL);
#ifdef HAVE_BLK_MQ
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
#endif
zvol_queue_limits_t limits;
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
@ -1499,10 +1472,8 @@ zvol_os_free(zvol_state_t *zv)
put_disk(zv->zv_zso->zvo_disk);
#endif
#ifdef HAVE_BLK_MQ
if (zv->zv_zso->use_blk_mq)
blk_mq_free_tag_set(&zv->zv_zso->tag_set);
#endif
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
@ -1867,7 +1838,6 @@ zvol_init(void)
return (error);
}
#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_queue_depth == 0) {
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
} else {
@ -1881,7 +1851,7 @@ zvol_init(void)
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
1024);
}
#endif
for (uint_t i = 0; i < num_tqs; i++) {
char name[32];
(void) snprintf(name, sizeof (name), "%s_tq-%u",
@ -1953,7 +1923,6 @@ MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
#ifdef HAVE_BLK_MQ
module_param(zvol_blk_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
@ -1963,7 +1932,6 @@ MODULE_PARM_DESC(zvol_use_blk_mq, "Use the blk-mq API for zvols");
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
"Process volblocksize blocks per thread");
#endif
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
module_param(zvol_open_timeout_ms, uint, 0644);