2010-05-18 02:18:00 +04:00
|
|
|
/*****************************************************************************\
|
|
|
|
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
|
|
|
* Copyright (C) 2007 The Regents of the University of California.
|
|
|
|
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
|
|
|
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
2008-05-26 08:38:26 +04:00
|
|
|
* UCRL-CODE-235197
|
|
|
|
*
|
2010-05-18 02:18:00 +04:00
|
|
|
* This file is part of the SPL, Solaris Porting Layer.
|
|
|
|
* For details, see <http://github.com/behlendorf/spl/>.
|
|
|
|
*
|
|
|
|
* The SPL is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
|
|
|
* option) any later version.
|
2008-05-26 08:38:26 +04:00
|
|
|
*
|
2010-05-18 02:18:00 +04:00
|
|
|
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
2008-05-26 08:38:26 +04:00
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
2010-05-18 02:18:00 +04:00
|
|
|
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
\*****************************************************************************/
|
2008-05-26 08:38:26 +04:00
|
|
|
|
2008-02-28 03:52:31 +03:00
|
|
|
#ifndef _SPL_TASKQ_H
|
|
|
|
#define _SPL_TASKQ_H
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2008-02-27 22:09:51 +03:00
|
|
|
#include <linux/module.h>
|
2008-02-26 23:36:04 +03:00
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
2008-04-26 02:10:47 +04:00
|
|
|
#include <linux/kthread.h>
|
2008-03-01 03:45:59 +03:00
|
|
|
#include <sys/types.h>
|
2008-12-24 03:27:36 +03:00
|
|
|
#include <sys/thread.h>
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2008-04-26 02:10:47 +04:00
|
|
|
#define TASKQ_NAMELEN 31
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2008-04-26 02:10:47 +04:00
|
|
|
#define TASKQ_PREPOPULATE 0x00000001
|
|
|
|
#define TASKQ_CPR_SAFE 0x00000002
|
|
|
|
#define TASKQ_DYNAMIC 0x00000004
|
2009-07-09 21:07:52 +04:00
|
|
|
#define TASKQ_THREADS_CPU_PCT 0x00000008
|
2010-06-28 22:39:43 +04:00
|
|
|
#define TASKQ_DC_BATCH 0x00000010
|
2011-05-07 02:21:58 +04:00
|
|
|
#define TASKQ_NORECLAIM 0x00000020
|
2008-02-26 23:36:04 +03:00
|
|
|
|
|
|
|
typedef unsigned long taskqid_t;
|
2008-04-26 02:10:47 +04:00
|
|
|
typedef void (task_func_t)(void *);
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2011-12-06 05:32:48 +04:00
|
|
|
typedef struct taskq_ent {
|
|
|
|
spinlock_t tqent_lock;
|
|
|
|
struct list_head tqent_list;
|
|
|
|
taskqid_t tqent_id;
|
|
|
|
task_func_t *tqent_func;
|
|
|
|
void *tqent_arg;
|
2011-12-06 22:04:51 +04:00
|
|
|
uintptr_t tqent_flags;
|
2011-12-06 05:32:48 +04:00
|
|
|
} taskq_ent_t;
|
|
|
|
|
2011-12-06 22:04:51 +04:00
|
|
|
#define TQENT_FLAG_PREALLOC 0x1
|
|
|
|
|
2008-02-26 23:36:04 +03:00
|
|
|
/*
|
|
|
|
* Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
|
2008-04-26 02:10:47 +04:00
|
|
|
* KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly
|
|
|
|
* large so as not to conflict with already used GFP_* defines.
|
2008-02-26 23:36:04 +03:00
|
|
|
*/
|
2008-04-26 02:10:47 +04:00
|
|
|
#define TQ_SLEEP KM_SLEEP
|
|
|
|
#define TQ_NOSLEEP KM_NOSLEEP
|
|
|
|
#define TQ_NOQUEUE 0x01000000
|
|
|
|
#define TQ_NOALLOC 0x02000000
|
|
|
|
#define TQ_NEW 0x04000000
|
2010-06-28 22:39:43 +04:00
|
|
|
#define TQ_FRONT 0x08000000
|
2008-04-26 02:10:47 +04:00
|
|
|
#define TQ_ACTIVE 0x80000000
|
|
|
|
|
|
|
|
typedef struct taskq {
|
|
|
|
spinlock_t tq_lock; /* protects taskq_t */
|
2008-11-03 23:21:08 +03:00
|
|
|
unsigned long tq_lock_flags; /* interrupt state */
|
2008-04-26 02:10:47 +04:00
|
|
|
const char *tq_name; /* taskq name */
|
2011-12-06 05:32:48 +04:00
|
|
|
struct list_head tq_thread_list;/* list of all threads */
|
|
|
|
struct list_head tq_active_list;/* list of active threads */
|
2008-04-26 02:10:47 +04:00
|
|
|
int tq_nactive; /* # of active threads */
|
|
|
|
int tq_nthreads; /* # of total threads */
|
|
|
|
int tq_pri; /* priority */
|
|
|
|
int tq_minalloc; /* min task_t pool size */
|
|
|
|
int tq_maxalloc; /* max task_t pool size */
|
|
|
|
int tq_nalloc; /* cur task_t pool size */
|
|
|
|
uint_t tq_flags; /* flags */
|
|
|
|
taskqid_t tq_next_id; /* next pend/work id */
|
|
|
|
taskqid_t tq_lowest_id; /* lowest pend/work id */
|
|
|
|
struct list_head tq_free_list; /* free task_t's */
|
|
|
|
struct list_head tq_pend_list; /* pending task_t's */
|
2010-07-01 21:07:51 +04:00
|
|
|
struct list_head tq_prio_list; /* priority pending task_t's */
|
2008-04-26 02:10:47 +04:00
|
|
|
wait_queue_head_t tq_work_waitq; /* new work waitq */
|
|
|
|
wait_queue_head_t tq_wait_waitq; /* wait waitq */
|
|
|
|
} taskq_t;
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2011-12-06 05:32:48 +04:00
|
|
|
typedef struct taskq_thread {
|
|
|
|
struct list_head tqt_thread_list;
|
|
|
|
struct list_head tqt_active_list;
|
|
|
|
struct task_struct *tqt_thread;
|
|
|
|
taskq_t *tqt_tq;
|
Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
2011-12-16 21:44:31 +04:00
|
|
|
taskqid_t tqt_id;
|
Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
2011-12-17 02:57:31 +04:00
|
|
|
uintptr_t tqt_flags;
|
2011-12-06 05:32:48 +04:00
|
|
|
} taskq_thread_t;
|
|
|
|
|
2009-01-06 02:08:03 +03:00
|
|
|
/* Global system-wide dynamic task queue available for all consumers */
|
|
|
|
extern taskq_t *system_taskq;
|
|
|
|
|
2008-02-26 23:36:04 +03:00
|
|
|
extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
|
2011-12-06 22:04:51 +04:00
|
|
|
extern void __taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t, taskq_ent_t *);
|
|
|
|
extern int __taskq_empty_ent(taskq_ent_t *);
|
|
|
|
extern void __taskq_init_ent(taskq_ent_t *);
|
2008-02-26 23:36:04 +03:00
|
|
|
extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
|
2008-03-11 05:08:57 +03:00
|
|
|
extern void __taskq_destroy(taskq_t *);
|
2009-03-16 01:13:49 +03:00
|
|
|
extern void __taskq_wait_id(taskq_t *, taskqid_t);
|
2008-03-11 05:08:57 +03:00
|
|
|
extern void __taskq_wait(taskq_t *);
|
2008-04-26 02:10:47 +04:00
|
|
|
extern int __taskq_member(taskq_t *, void *);
|
|
|
|
|
2009-01-06 02:08:03 +03:00
|
|
|
int spl_taskq_init(void);
|
|
|
|
void spl_taskq_fini(void);
|
|
|
|
|
2008-04-26 02:10:47 +04:00
|
|
|
#define taskq_member(tq, t) __taskq_member(tq, t)
|
|
|
|
#define taskq_wait_id(tq, id) __taskq_wait_id(tq, id)
|
|
|
|
#define taskq_wait(tq) __taskq_wait(tq)
|
|
|
|
#define taskq_dispatch(tq, f, p, fl) __taskq_dispatch(tq, f, p, fl)
|
2011-12-06 22:04:51 +04:00
|
|
|
#define taskq_dispatch_ent(tq, f, p, fl, t) __taskq_dispatch_ent(tq, f, p, fl, t)
|
|
|
|
#define taskq_empty_ent(t) __taskq_empty_ent(t)
|
|
|
|
#define taskq_init_ent(t) __taskq_init_ent(t)
|
2008-04-26 02:10:47 +04:00
|
|
|
#define taskq_create(n, th, p, mi, ma, fl) __taskq_create(n, th, p, mi, ma, fl)
|
2010-06-12 01:37:46 +04:00
|
|
|
#define taskq_create_proc(n, th, p, mi, ma, pr, fl) \
|
|
|
|
__taskq_create(n, th, p, mi, ma, fl)
|
|
|
|
#define taskq_create_sysdc(n, th, mi, ma, pr, dc, fl) \
|
|
|
|
__taskq_create(n, th, maxclsyspri, mi, ma, fl)
|
2008-04-26 02:10:47 +04:00
|
|
|
#define taskq_destroy(tq) __taskq_destroy(tq)
|
2008-02-26 23:36:04 +03:00
|
|
|
|
2008-02-28 03:52:31 +03:00
|
|
|
#endif /* _SPL_TASKQ_H */
|