2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2008-11-20 23:01:55 +03:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
|
2008-11-20 23:01:55 +03:00
|
|
|
* Use is subject to license terms.
|
|
|
|
*/
|
2011-11-08 04:26:52 +04:00
|
|
|
/*
|
|
|
|
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
|
2013-01-11 21:04:23 +04:00
|
|
|
* Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
|
2014-10-21 02:07:45 +04:00
|
|
|
* Copyright (c) 2014 by Delphix. All rights reserved.
|
2011-11-08 04:26:52 +04:00
|
|
|
*/
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
|
|
|
|
int taskq_now;
|
2008-12-03 23:09:06 +03:00
|
|
|
taskq_t *system_taskq;
|
2016-12-01 00:56:50 +03:00
|
|
|
taskq_t *system_delay_taskq;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-03-03 21:29:38 +03:00
|
|
|
static pthread_key_t taskq_tsd;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
#define TASKQ_ACTIVE 0x00010000
|
|
|
|
|
2011-11-08 04:26:52 +04:00
|
|
|
static taskq_ent_t *
|
2008-11-20 23:01:55 +03:00
|
|
|
task_alloc(taskq_t *tq, int tqflags)
|
|
|
|
{
|
2011-11-08 04:26:52 +04:00
|
|
|
taskq_ent_t *t;
|
2010-05-29 00:45:14 +04:00
|
|
|
int rv;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
again: if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
|
2011-11-08 04:26:52 +04:00
|
|
|
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
|
|
|
|
tq->tq_freelist = t->tqent_next;
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
|
|
|
if (tq->tq_nalloc >= tq->tq_maxalloc) {
|
2010-05-29 00:45:14 +04:00
|
|
|
if (!(tqflags & KM_SLEEP))
|
2008-11-20 23:01:55 +03:00
|
|
|
return (NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* We don't want to exceed tq_maxalloc, but we can't
|
|
|
|
* wait for other tasks to complete (and thus free up
|
|
|
|
* task structures) without risking deadlock with
|
|
|
|
* the caller. So, we just delay for one second
|
2010-05-29 00:45:14 +04:00
|
|
|
* to throttle the allocation rate. If we have tasks
|
|
|
|
* complete before one second timeout expires then
|
|
|
|
* taskq_ent_free will signal us and we will
|
|
|
|
* immediately retry the allocation.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
tq->tq_maxalloc_wait++;
|
|
|
|
rv = cv_timedwait(&tq->tq_maxalloc_cv,
|
|
|
|
&tq->tq_lock, ddi_get_lbolt() + hz);
|
|
|
|
tq->tq_maxalloc_wait--;
|
|
|
|
if (rv > 0)
|
|
|
|
goto again; /* signaled */
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
|
2011-11-08 04:26:52 +04:00
|
|
|
t = kmem_alloc(sizeof (taskq_ent_t), tqflags);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
mutex_enter(&tq->tq_lock);
|
2011-11-08 04:26:52 +04:00
|
|
|
if (t != NULL) {
|
|
|
|
/* Make sure we start without any flags */
|
|
|
|
t->tqent_flags = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
tq->tq_nalloc++;
|
2011-11-08 04:26:52 +04:00
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
return (t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2011-11-08 04:26:52 +04:00
|
|
|
task_free(taskq_t *tq, taskq_ent_t *t)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
if (tq->tq_nalloc <= tq->tq_minalloc) {
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_next = tq->tq_freelist;
|
2008-11-20 23:01:55 +03:00
|
|
|
tq->tq_freelist = t;
|
|
|
|
} else {
|
|
|
|
tq->tq_nalloc--;
|
|
|
|
mutex_exit(&tq->tq_lock);
|
2011-11-08 04:26:52 +04:00
|
|
|
kmem_free(t, sizeof (taskq_ent_t));
|
2008-11-20 23:01:55 +03:00
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (tq->tq_maxalloc_wait)
|
|
|
|
cv_signal(&tq->tq_maxalloc_cv);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
taskqid_t
|
|
|
|
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
|
|
|
|
{
|
2011-11-08 04:26:52 +04:00
|
|
|
taskq_ent_t *t;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (taskq_now) {
|
|
|
|
func(arg);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
ASSERT(tq->tq_flags & TASKQ_ACTIVE);
|
|
|
|
if ((t = task_alloc(tq, tqflags)) == NULL) {
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
return (0);
|
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
if (tqflags & TQ_FRONT) {
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_next = tq->tq_task.tqent_next;
|
|
|
|
t->tqent_prev = &tq->tq_task;
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_next = &tq->tq_task;
|
|
|
|
t->tqent_prev = tq->tq_task.tqent_prev;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_next->tqent_prev = t;
|
|
|
|
t->tqent_prev->tqent_next = t;
|
|
|
|
t->tqent_func = func;
|
|
|
|
t->tqent_arg = arg;
|
2013-01-11 21:04:23 +04:00
|
|
|
t->tqent_flags = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
cv_signal(&tq->tq_dispatch_cv);
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2013-04-30 02:49:23 +04:00
|
|
|
taskqid_t
|
2021-12-12 18:01:06 +03:00
|
|
|
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags,
|
2013-04-30 02:49:23 +04:00
|
|
|
clock_t expire_time)
|
|
|
|
{
|
2021-12-12 18:01:06 +03:00
|
|
|
(void) tq, (void) func, (void) arg, (void) tqflags, (void) expire_time;
|
2013-04-30 02:49:23 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-11-08 04:26:52 +04:00
|
|
|
int
|
|
|
|
taskq_empty_ent(taskq_ent_t *t)
|
|
|
|
{
|
2013-11-01 23:26:11 +04:00
|
|
|
return (t->tqent_next == NULL);
|
2011-11-08 04:26:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
taskq_init_ent(taskq_ent_t *t)
|
|
|
|
{
|
|
|
|
t->tqent_next = NULL;
|
|
|
|
t->tqent_prev = NULL;
|
|
|
|
t->tqent_func = NULL;
|
|
|
|
t->tqent_arg = NULL;
|
|
|
|
t->tqent_flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
|
|
|
|
taskq_ent_t *t)
|
|
|
|
{
|
|
|
|
ASSERT(func != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark it as a prealloc'd task. This is important
|
|
|
|
* to ensure that we don't free it later.
|
|
|
|
*/
|
|
|
|
t->tqent_flags |= TQENT_FLAG_PREALLOC;
|
|
|
|
/*
|
|
|
|
* Enqueue the task to the underlying queue.
|
|
|
|
*/
|
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
|
|
|
|
if (flags & TQ_FRONT) {
|
|
|
|
t->tqent_next = tq->tq_task.tqent_next;
|
|
|
|
t->tqent_prev = &tq->tq_task;
|
|
|
|
} else {
|
|
|
|
t->tqent_next = &tq->tq_task;
|
|
|
|
t->tqent_prev = tq->tq_task.tqent_prev;
|
|
|
|
}
|
|
|
|
t->tqent_next->tqent_prev = t;
|
|
|
|
t->tqent_prev->tqent_next = t;
|
|
|
|
t->tqent_func = func;
|
|
|
|
t->tqent_arg = arg;
|
|
|
|
cv_signal(&tq->tq_dispatch_cv);
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
void
|
|
|
|
taskq_wait(taskq_t *tq)
|
|
|
|
{
|
|
|
|
mutex_enter(&tq->tq_lock);
|
2011-11-08 04:26:52 +04:00
|
|
|
while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
|
2008-11-20 23:01:55 +03:00
|
|
|
cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
}
|
|
|
|
|
2013-05-04 01:17:21 +04:00
|
|
|
void
|
|
|
|
taskq_wait_id(taskq_t *tq, taskqid_t id)
|
|
|
|
{
|
2021-12-12 18:01:06 +03:00
|
|
|
(void) id;
|
2013-05-04 01:17:21 +04:00
|
|
|
taskq_wait(tq);
|
|
|
|
}
|
|
|
|
|
2015-06-05 02:25:37 +03:00
|
|
|
void
|
|
|
|
taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
|
|
|
|
{
|
2021-12-12 18:01:06 +03:00
|
|
|
(void) id;
|
2015-06-05 02:25:37 +03:00
|
|
|
taskq_wait(tq);
|
|
|
|
}
|
|
|
|
|
2022-03-23 18:51:00 +03:00
|
|
|
static __attribute__((noreturn)) void
|
2008-11-20 23:01:55 +03:00
|
|
|
taskq_thread(void *arg)
|
|
|
|
{
|
|
|
|
taskq_t *tq = arg;
|
2011-11-08 04:26:52 +04:00
|
|
|
taskq_ent_t *t;
|
|
|
|
boolean_t prealloc;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-03-03 21:29:38 +03:00
|
|
|
VERIFY0(pthread_setspecific(taskq_tsd, tq));
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
while (tq->tq_flags & TASKQ_ACTIVE) {
|
2011-11-08 04:26:52 +04:00
|
|
|
if ((t = tq->tq_task.tqent_next) == &tq->tq_task) {
|
2008-11-20 23:01:55 +03:00
|
|
|
if (--tq->tq_active == 0)
|
|
|
|
cv_broadcast(&tq->tq_wait_cv);
|
|
|
|
cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
|
|
|
|
tq->tq_active++;
|
|
|
|
continue;
|
|
|
|
}
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_prev->tqent_next = t->tqent_next;
|
|
|
|
t->tqent_next->tqent_prev = t->tqent_prev;
|
|
|
|
t->tqent_next = NULL;
|
|
|
|
t->tqent_prev = NULL;
|
|
|
|
prealloc = t->tqent_flags & TQENT_FLAG_PREALLOC;
|
2008-11-20 23:01:55 +03:00
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
|
|
|
|
rw_enter(&tq->tq_threadlock, RW_READER);
|
2011-11-08 04:26:52 +04:00
|
|
|
t->tqent_func(t->tqent_arg);
|
2008-11-20 23:01:55 +03:00
|
|
|
rw_exit(&tq->tq_threadlock);
|
|
|
|
|
|
|
|
mutex_enter(&tq->tq_lock);
|
2011-11-08 04:26:52 +04:00
|
|
|
if (!prealloc)
|
|
|
|
task_free(tq, t);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
tq->tq_nthreads--;
|
|
|
|
cv_broadcast(&tq->tq_wait_cv);
|
|
|
|
mutex_exit(&tq->tq_lock);
|
2010-08-26 21:43:27 +04:00
|
|
|
thread_exit();
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
taskq_t *
|
|
|
|
taskq_create(const char *name, int nthreads, pri_t pri,
|
2017-01-21 00:17:55 +03:00
|
|
|
int minalloc, int maxalloc, uint_t flags)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2021-12-12 18:01:06 +03:00
|
|
|
(void) pri;
|
2008-11-20 23:01:55 +03:00
|
|
|
taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
|
|
|
|
int t;
|
|
|
|
|
2009-07-03 02:44:48 +04:00
|
|
|
if (flags & TASKQ_THREADS_CPU_PCT) {
|
|
|
|
int pct;
|
|
|
|
ASSERT3S(nthreads, >=, 0);
|
|
|
|
ASSERT3S(nthreads, <=, 100);
|
|
|
|
pct = MIN(nthreads, 100);
|
|
|
|
pct = MAX(pct, 0);
|
|
|
|
|
|
|
|
nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
|
|
|
|
nthreads = MAX(nthreads, 1); /* need at least 1 thread */
|
|
|
|
} else {
|
|
|
|
ASSERT3S(nthreads, >=, 1);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
|
|
|
|
mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
|
|
|
|
cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
|
|
|
|
cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
|
Cleanup: Switch to strlcpy from strncpy
Coverity found a bug in `zfs_secpolicy_create_clone()` where it is
possible for us to pass an unterminated string when `zfs_get_parent()`
returns an error. Upon inspection, it is clear that using `strlcpy()`
would have avoided this issue.
Looking at the codebase, there are a number of other uses of `strncpy()`
that are unsafe and even when it is used safely, switching to
`strlcpy()` would make the code more readable. Therefore, we switch all
instances where we use `strncpy()` to use `strlcpy()`.
Unfortunately, we do not portably have access to `strlcpy()` in
tests/zfs-tests/cmd/zfs_diff-socket.c because it does not link to
libspl. Modifying the appropriate Makefile.am to try to link to it
resulted in an error from the naming choice used in the file. Trying to
disable the check on the file did not work on FreeBSD because Clang
ignores `#undef` when a definition is provided by `-Dstrncpy(...)=...`.
We workaround that by explictly including the C file from libspl into
the test. This makes things build correctly everywhere.
We add a deprecation warning to `config/Rules.am` and suppress it on the
remaining `strncpy()` usage. `strlcpy()` is not portably avaliable in
tests/zfs-tests/cmd/zfs_diff-socket.c, so we use `snprintf()` there as a
substitute.
This patch does not tackle the related problem of `strcpy()`, which is
even less safe. Thankfully, a quick inspection found that it is used far
more correctly than strncpy() was used. A quick inspection did not find
any problems with `strcpy()` usage outside of zhack, but it should be
said that I only checked around 90% of them.
Lastly, some of the fields in kstat_t varied in size by 1 depending on
whether they were in userspace or in the kernel. The origin of this
discrepancy appears to be 04a479f7066ccdaa23a6546955303b172f4a6909 where
it was made for no apparent reason. It conflicts with the comment on
KSTAT_STRLEN, so we shrink the kernel field sizes to match the userspace
field sizes.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13876
2022-09-28 02:35:29 +03:00
|
|
|
(void) strlcpy(tq->tq_name, name, sizeof (tq->tq_name));
|
2008-11-20 23:01:55 +03:00
|
|
|
tq->tq_flags = flags | TASKQ_ACTIVE;
|
|
|
|
tq->tq_active = nthreads;
|
|
|
|
tq->tq_nthreads = nthreads;
|
|
|
|
tq->tq_minalloc = minalloc;
|
|
|
|
tq->tq_maxalloc = maxalloc;
|
2011-11-08 04:26:52 +04:00
|
|
|
tq->tq_task.tqent_next = &tq->tq_task;
|
|
|
|
tq->tq_task.tqent_prev = &tq->tq_task;
|
2013-11-01 23:26:11 +04:00
|
|
|
tq->tq_threadlist = kmem_alloc(nthreads * sizeof (kthread_t *),
|
|
|
|
KM_SLEEP);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (flags & TASKQ_PREPOPULATE) {
|
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
while (minalloc-- > 0)
|
|
|
|
task_free(tq, task_alloc(tq, KM_SLEEP));
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (t = 0; t < nthreads; t++)
|
2024-04-28 04:03:11 +03:00
|
|
|
VERIFY((tq->tq_threadlist[t] = thread_create_named(tq->tq_name,
|
|
|
|
NULL, 0, taskq_thread, tq, 0, &p0, TS_RUN, pri)) != NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
return (tq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
taskq_destroy(taskq_t *tq)
|
|
|
|
{
|
|
|
|
int nthreads = tq->tq_nthreads;
|
|
|
|
|
|
|
|
taskq_wait(tq);
|
|
|
|
|
|
|
|
mutex_enter(&tq->tq_lock);
|
|
|
|
|
|
|
|
tq->tq_flags &= ~TASKQ_ACTIVE;
|
|
|
|
cv_broadcast(&tq->tq_dispatch_cv);
|
|
|
|
|
|
|
|
while (tq->tq_nthreads != 0)
|
|
|
|
cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
|
|
|
|
|
|
|
|
tq->tq_minalloc = 0;
|
|
|
|
while (tq->tq_nalloc != 0) {
|
|
|
|
ASSERT(tq->tq_freelist != NULL);
|
2022-10-03 23:41:58 +03:00
|
|
|
taskq_ent_t *tqent_nexttq = tq->tq_freelist->tqent_next;
|
|
|
|
task_free(tq, tq->tq_freelist);
|
|
|
|
tq->tq_freelist = tqent_nexttq;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_exit(&tq->tq_lock);
|
|
|
|
|
2010-08-26 21:43:27 +04:00
|
|
|
kmem_free(tq->tq_threadlist, nthreads * sizeof (kthread_t *));
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
rw_destroy(&tq->tq_threadlock);
|
|
|
|
mutex_destroy(&tq->tq_lock);
|
|
|
|
cv_destroy(&tq->tq_dispatch_cv);
|
|
|
|
cv_destroy(&tq->tq_wait_cv);
|
2010-05-29 00:45:14 +04:00
|
|
|
cv_destroy(&tq->tq_maxalloc_cv);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
kmem_free(tq, sizeof (taskq_t));
|
|
|
|
}
|
|
|
|
|
2023-11-06 21:38:42 +03:00
|
|
|
/*
|
|
|
|
* Create a taskq with a specified number of pool threads. Allocate
|
|
|
|
* and return an array of nthreads kthread_t pointers, one for each
|
|
|
|
* thread in the pool. The array is not ordered and must be freed
|
|
|
|
* by the caller.
|
|
|
|
*/
|
|
|
|
taskq_t *
|
|
|
|
taskq_create_synced(const char *name, int nthreads, pri_t pri,
|
|
|
|
int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp)
|
|
|
|
{
|
|
|
|
taskq_t *tq;
|
|
|
|
kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads,
|
|
|
|
KM_SLEEP);
|
|
|
|
|
|
|
|
(void) pri; (void) minalloc; (void) maxalloc;
|
|
|
|
|
|
|
|
flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH);
|
|
|
|
|
|
|
|
tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX,
|
|
|
|
flags | TASKQ_PREPOPULATE);
|
|
|
|
VERIFY(tq != NULL);
|
|
|
|
VERIFY(tq->tq_nthreads == nthreads);
|
|
|
|
|
|
|
|
for (int i = 0; i < nthreads; i++) {
|
|
|
|
kthreads[i] = tq->tq_threadlist[i];
|
|
|
|
}
|
|
|
|
*ktpp = kthreads;
|
|
|
|
return (tq);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
int
|
2010-08-26 21:43:27 +04:00
|
|
|
taskq_member(taskq_t *tq, kthread_t *t)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (taskq_now)
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
for (i = 0; i < tq->tq_nthreads; i++)
|
2010-08-26 21:43:27 +04:00
|
|
|
if (tq->tq_threadlist[i] == t)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (1);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2008-12-03 23:09:06 +03:00
|
|
|
|
2020-03-03 21:29:38 +03:00
|
|
|
taskq_t *
|
|
|
|
taskq_of_curthread(void)
|
|
|
|
{
|
|
|
|
return (pthread_getspecific(taskq_tsd));
|
|
|
|
}
|
|
|
|
|
2013-04-30 02:49:23 +04:00
|
|
|
int
|
|
|
|
taskq_cancel_id(taskq_t *tq, taskqid_t id)
|
|
|
|
{
|
2021-12-12 18:01:06 +03:00
|
|
|
(void) tq, (void) id;
|
2013-04-30 02:49:23 +04:00
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
void
|
|
|
|
system_taskq_init(void)
|
|
|
|
{
|
2020-03-03 21:29:38 +03:00
|
|
|
VERIFY0(pthread_key_create(&taskq_tsd, NULL));
|
2015-07-24 20:08:31 +03:00
|
|
|
system_taskq = taskq_create("system_taskq", 64, maxclsyspri, 4, 512,
|
2008-12-03 23:09:06 +03:00
|
|
|
TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
|
2016-12-01 00:56:50 +03:00
|
|
|
system_delay_taskq = taskq_create("delay_taskq", 4, maxclsyspri, 4,
|
|
|
|
512, TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
|
2008-12-03 23:09:06 +03:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
system_taskq_fini(void)
|
|
|
|
{
|
|
|
|
taskq_destroy(system_taskq);
|
|
|
|
system_taskq = NULL; /* defensive */
|
2016-12-01 00:56:50 +03:00
|
|
|
taskq_destroy(system_delay_taskq);
|
|
|
|
system_delay_taskq = NULL;
|
2020-03-03 21:29:38 +03:00
|
|
|
VERIFY0(pthread_key_delete(taskq_tsd));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|