mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2025-01-27 02:14:28 +03:00
Remove TQ_SLEEP -> KM_SLEEP mapping
When the taskq code was originally written it seemed like a good
idea to simply map TQ_SLEEP to KM_SLEEP. Unfortunately, this
assumed that the TQ_* flags would never confict with any of the
Linux GFP_* flags. When adding the TQ_PUSHPAGE support in commit
cd5ca4b
this invariant was accidentally broken.
Therefore to support TQ_PUSHPAGE, which is needed for Linux, and
prevent any further confusion I have removed this direct mapping.
The TQ_SLEEP, TQ_NOSLEEP, and TQ_PUSHPAGE are no longer defined
in terms of their KM_* counterparts. Instead a simple mapping
function is introduce to convert TQ_* -> KM_* where needed.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #171
This commit is contained in:
parent
330fe010e4
commit
9b51f21841
@ -60,8 +60,9 @@ typedef struct taskq_ent {
|
||||
* KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly
|
||||
* large so as not to conflict with already used GFP_* defines.
|
||||
*/
|
||||
#define TQ_SLEEP KM_SLEEP
|
||||
#define TQ_NOSLEEP KM_NOSLEEP
|
||||
#define TQ_SLEEP 0x00000000
|
||||
#define TQ_NOSLEEP 0x00000001
|
||||
#define TQ_PUSHPAGE 0x00000002
|
||||
#define TQ_NOQUEUE 0x01000000
|
||||
#define TQ_NOALLOC 0x02000000
|
||||
#define TQ_NEW 0x04000000
|
||||
|
@ -38,6 +38,18 @@
|
||||
taskq_t *system_taskq;
|
||||
EXPORT_SYMBOL(system_taskq);
|
||||
|
||||
static int
|
||||
task_km_flags(uint_t flags)
|
||||
{
|
||||
if (flags & TQ_NOSLEEP)
|
||||
return KM_NOSLEEP;
|
||||
|
||||
if (flags & TQ_PUSHPAGE)
|
||||
return KM_PUSHPAGE;
|
||||
|
||||
return KM_SLEEP;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: Must be called with tq->tq_lock held, returns a list_t which
|
||||
* is not attached to the free, work, or pending taskq lists.
|
||||
@ -50,8 +62,6 @@ task_alloc(taskq_t *tq, uint_t flags)
|
||||
SENTRY;
|
||||
|
||||
ASSERT(tq);
|
||||
ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */
|
||||
ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
|
||||
ASSERT(spin_is_locked(&tq->tq_lock));
|
||||
retry:
|
||||
/* Acquire taskq_ent_t's from free list if available */
|
||||
@ -92,7 +102,7 @@ retry:
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
|
||||
t = kmem_alloc(sizeof(taskq_ent_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
|
||||
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
|
||||
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
|
||||
|
||||
if (t) {
|
||||
@ -251,14 +261,6 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
|
||||
ASSERT(tq);
|
||||
ASSERT(func);
|
||||
|
||||
/* Solaris assumes TQ_SLEEP if not passed explicitly */
|
||||
if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
|
||||
flags |= TQ_SLEEP;
|
||||
|
||||
if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
|
||||
PANIC("May schedule while atomic: %s/0x%08x/%d\n",
|
||||
current->comm, preempt_count(), current->pid);
|
||||
|
||||
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
|
||||
|
||||
/* Taskq being destroyed and all tasks drained */
|
||||
@ -554,7 +556,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
|
||||
nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
|
||||
}
|
||||
|
||||
tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
|
||||
tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
|
||||
if (tq == NULL)
|
||||
SRETURN(NULL);
|
||||
|
||||
@ -580,12 +582,12 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
|
||||
|
||||
if (flags & TASKQ_PREPOPULATE)
|
||||
for (i = 0; i < minalloc; i++)
|
||||
task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
|
||||
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
|
||||
|
||||
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
|
||||
|
||||
for (i = 0; i < nthreads; i++) {
|
||||
tqt = kmem_alloc(sizeof(*tqt), KM_SLEEP);
|
||||
tqt = kmem_alloc(sizeof(*tqt), KM_PUSHPAGE);
|
||||
INIT_LIST_HEAD(&tqt->tqt_thread_list);
|
||||
INIT_LIST_HEAD(&tqt->tqt_active_list);
|
||||
tqt->tqt_tq = tq;
|
||||
|
Loading…
Reference in New Issue
Block a user