Fix cstyle issues in spl-taskq.c and taskq.h

This patch only addresses the issues identified by the style checker.
It contains no functional changes.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2015-12-11 16:15:50 -08:00
parent 066b89e685
commit 2c4332cf79
2 changed files with 86 additions and 78 deletions

View File

@ -1,4 +1,4 @@
/*****************************************************************************\ /*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California. * Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,10 +20,10 @@
* *
* You should have received a copy of the GNU General Public License along * You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>. * with the SPL. If not, see <http://www.gnu.org/licenses/>.
\*****************************************************************************/ */
#ifndef _SPL_TASKQ_H #ifndef _SPL_TASKQ_H
#define _SPL_TASKQ_H #define _SPL_TASKQ_H
#include <linux/module.h> #include <linux/module.h>
#include <linux/gfp.h> #include <linux/gfp.h>
@ -33,29 +33,30 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/thread.h> #include <sys/thread.h>
#define TASKQ_NAMELEN 31 #define TASKQ_NAMELEN 31
#define TASKQ_PREPOPULATE 0x00000001 #define TASKQ_PREPOPULATE 0x00000001
#define TASKQ_CPR_SAFE 0x00000002 #define TASKQ_CPR_SAFE 0x00000002
#define TASKQ_DYNAMIC 0x00000004 #define TASKQ_DYNAMIC 0x00000004
#define TASKQ_THREADS_CPU_PCT 0x00000008 #define TASKQ_THREADS_CPU_PCT 0x00000008
#define TASKQ_DC_BATCH 0x00000010 #define TASKQ_DC_BATCH 0x00000010
#define TASKQ_ACTIVE 0x80000000 #define TASKQ_ACTIVE 0x80000000
/* /*
* Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
* KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly * KM_SLEEP/KM_NOSLEEP. TQ_NOQUEUE/TQ_NOALLOC are set particularly
* large so as not to conflict with already used GFP_* defines. * large so as not to conflict with already used GFP_* defines.
*/ */
#define TQ_SLEEP 0x00000000 #define TQ_SLEEP 0x00000000
#define TQ_NOSLEEP 0x00000001 #define TQ_NOSLEEP 0x00000001
#define TQ_PUSHPAGE 0x00000002 #define TQ_PUSHPAGE 0x00000002
#define TQ_NOQUEUE 0x01000000 #define TQ_NOQUEUE 0x01000000
#define TQ_NOALLOC 0x02000000 #define TQ_NOALLOC 0x02000000
#define TQ_NEW 0x04000000 #define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000 #define TQ_FRONT 0x08000000
/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent, /*
* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
* so TQ_LOCK_DYNAMIC must not evaluate to 0 * so TQ_LOCK_DYNAMIC must not evaluate to 0
*/ */
typedef enum tq_lock_role { typedef enum tq_lock_role {
@ -67,28 +68,28 @@ typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *); typedef void (task_func_t)(void *);
typedef struct taskq { typedef struct taskq {
spinlock_t tq_lock; /* protects taskq_t */ spinlock_t tq_lock; /* protects taskq_t */
char *tq_name; /* taskq name */ char *tq_name; /* taskq name */
struct list_head tq_thread_list;/* list of all threads */ struct list_head tq_thread_list; /* list of all threads */
struct list_head tq_active_list;/* list of active threads */ struct list_head tq_active_list; /* list of active threads */
int tq_nactive; /* # of active threads */ int tq_nactive; /* # of active threads */
int tq_nthreads; /* # of existing threads */ int tq_nthreads; /* # of existing threads */
int tq_nspawn; /* # of threads being spawned */ int tq_nspawn; /* # of threads being spawned */
int tq_maxthreads; /* # of threads maximum */ int tq_maxthreads; /* # of threads maximum */
int tq_pri; /* priority */ int tq_pri; /* priority */
int tq_minalloc; /* min task_t pool size */ int tq_minalloc; /* min task_t pool size */
int tq_maxalloc; /* max task_t pool size */ int tq_maxalloc; /* max task_t pool size */
int tq_nalloc; /* cur task_t pool size */ int tq_nalloc; /* cur task_t pool size */
uint_t tq_flags; /* flags */ uint_t tq_flags; /* flags */
taskqid_t tq_next_id; /* next pend/work id */ taskqid_t tq_next_id; /* next pend/work id */
taskqid_t tq_lowest_id; /* lowest pend/work id */ taskqid_t tq_lowest_id; /* lowest pend/work id */
struct list_head tq_free_list; /* free task_t's */ struct list_head tq_free_list; /* free task_t's */
struct list_head tq_pend_list; /* pending task_t's */ struct list_head tq_pend_list; /* pending task_t's */
struct list_head tq_prio_list; /* priority pending task_t's */ struct list_head tq_prio_list; /* priority pending task_t's */
struct list_head tq_delay_list; /* delayed task_t's */ struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */ wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */ wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */ tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
} taskq_t; } taskq_t;
typedef struct taskq_ent { typedef struct taskq_ent {
@ -103,8 +104,8 @@ typedef struct taskq_ent {
uintptr_t tqent_flags; uintptr_t tqent_flags;
} taskq_ent_t; } taskq_ent_t;
#define TQENT_FLAG_PREALLOC 0x1 #define TQENT_FLAG_PREALLOC 0x1
#define TQENT_FLAG_CANCEL 0x2 #define TQENT_FLAG_CANCEL 0x2
typedef struct taskq_thread { typedef struct taskq_thread {
struct list_head tqt_thread_list; struct list_head tqt_thread_list;
@ -134,9 +135,9 @@ extern void taskq_wait(taskq_t *);
extern int taskq_cancel_id(taskq_t *, taskqid_t); extern int taskq_cancel_id(taskq_t *, taskqid_t);
extern int taskq_member(taskq_t *, void *); extern int taskq_member(taskq_t *, void *);
#define taskq_create_proc(name, nthreads, pri, min, max, proc, flags) \ #define taskq_create_proc(name, nthreads, pri, min, max, proc, flags) \
taskq_create(name, nthreads, pri, min, max, flags) taskq_create(name, nthreads, pri, min, max, flags)
#define taskq_create_sysdc(name, nthreads, min, max, proc, dc, flags) \ #define taskq_create_sysdc(name, nthreads, min, max, proc, dc, flags) \
taskq_create(name, nthreads, maxclsyspri, min, max, flags) taskq_create(name, nthreads, maxclsyspri, min, max, flags)
int spl_taskq_init(void); int spl_taskq_init(void);

View File

@ -1,4 +1,4 @@
/*****************************************************************************\ /*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California. * Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,9 +20,9 @@
* *
* You should have received a copy of the GNU General Public License along * You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>. * with the SPL. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************** *
* Solaris Porting Layer (SPL) Task Queue Implementation. * Solaris Porting Layer (SPL) Task Queue Implementation.
\*****************************************************************************/ */
#include <sys/taskq.h> #include <sys/taskq.h>
#include <sys/kmem.h> #include <sys/kmem.h>
@ -39,12 +39,12 @@ MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
int spl_taskq_thread_priority = 1; int spl_taskq_thread_priority = 1;
module_param(spl_taskq_thread_priority, int, 0644); module_param(spl_taskq_thread_priority, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_priority, MODULE_PARM_DESC(spl_taskq_thread_priority,
"Allow non-default priority for taskq threads"); "Allow non-default priority for taskq threads");
int spl_taskq_thread_sequential = 4; int spl_taskq_thread_sequential = 4;
module_param(spl_taskq_thread_sequential, int, 0644); module_param(spl_taskq_thread_sequential, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_sequential, MODULE_PARM_DESC(spl_taskq_thread_sequential,
"Create new taskq threads after N sequential tasks"); "Create new taskq threads after N sequential tasks");
/* Global system-wide dynamic task queue available for all consumers */ /* Global system-wide dynamic task queue available for all consumers */
taskq_t *system_taskq; taskq_t *system_taskq;
@ -58,12 +58,12 @@ static int
task_km_flags(uint_t flags) task_km_flags(uint_t flags)
{ {
if (flags & TQ_NOSLEEP) if (flags & TQ_NOSLEEP)
return KM_NOSLEEP; return (KM_NOSLEEP);
if (flags & TQ_PUSHPAGE) if (flags & TQ_PUSHPAGE)
return KM_PUSHPAGE; return (KM_PUSHPAGE);
return KM_SLEEP; return (KM_SLEEP);
} }
/* /*
@ -122,7 +122,7 @@ retry:
} }
spin_unlock_irqrestore(&tq->tq_lock, *irqflags); spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags)); t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
if (t) { if (t) {
@ -146,7 +146,7 @@ task_free(taskq_t *tq, taskq_ent_t *t)
ASSERT(list_empty(&t->tqent_list)); ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer)); ASSERT(!timer_pending(&t->tqent_timer));
kmem_free(t, sizeof(taskq_ent_t)); kmem_free(t, sizeof (taskq_ent_t));
tq->tq_nalloc--; tq->tq_nalloc--;
} }
@ -653,7 +653,7 @@ EXPORT_SYMBOL(taskq_dispatch_delay);
void void
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t) taskq_ent_t *t)
{ {
unsigned long irqflags; unsigned long irqflags;
ASSERT(tq); ASSERT(tq);
@ -702,7 +702,7 @@ EXPORT_SYMBOL(taskq_dispatch_ent);
int int
taskq_empty_ent(taskq_ent_t *t) taskq_empty_ent(taskq_ent_t *t)
{ {
return list_empty(&t->tqent_list); return (list_empty(&t->tqent_list));
} }
EXPORT_SYMBOL(taskq_empty_ent); EXPORT_SYMBOL(taskq_empty_ent);
@ -809,7 +809,7 @@ taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
(tq->tq_nactive == 0) && /* No threads are handling tasks */ (tq->tq_nactive == 0) && /* No threads are handling tasks */
(tq->tq_nthreads > 1) && /* More than 1 thread is running */ (tq->tq_nthreads > 1) && /* More than 1 thread is running */
(!taskq_next_ent(tq)) && /* There are no pending tasks */ (!taskq_next_ent(tq)) && /* There are no pending tasks */
(spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */ (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
} }
static int static int
@ -828,9 +828,9 @@ taskq_thread(void *args)
tq = tqt->tqt_tq; tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE; current->flags |= PF_NOFREEZE;
#if defined(PF_MEMALLOC_NOIO) #if defined(PF_MEMALLOC_NOIO)
(void) memalloc_noio_save(); (void) memalloc_noio_save();
#endif #endif
sigfillset(&blocked); sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL); sigprocmask(SIG_BLOCK, &blocked, NULL);
@ -873,17 +873,21 @@ taskq_thread(void *args)
if ((t = taskq_next_ent(tq)) != NULL) { if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list); list_del_init(&t->tqent_list);
/* In order to support recursively dispatching a /*
* In order to support recursively dispatching a
* preallocated taskq_ent_t, tqent_id must be * preallocated taskq_ent_t, tqent_id must be
* stored prior to executing tqent_func. */ * stored prior to executing tqent_func.
*/
tqt->tqt_id = t->tqent_id; tqt->tqt_id = t->tqent_id;
tqt->tqt_task = t; tqt->tqt_task = t;
/* We must store a copy of the flags prior to /*
* We must store a copy of the flags prior to
* servicing the task (servicing a prealloc'd task * servicing the task (servicing a prealloc'd task
* returns the ownership of the tqent back to * returns the ownership of the tqent back to
* the caller of taskq_dispatch). Thus, * the caller of taskq_dispatch). Thus,
* tqent_flags _may_ change within the call. */ * tqent_flags _may_ change within the call.
*/
tqt->tqt_flags = t->tqent_flags; tqt->tqt_flags = t->tqent_flags;
taskq_insert_in_order(tq, tqt); taskq_insert_in_order(tq, tqt);
@ -903,8 +907,10 @@ taskq_thread(void *args)
if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t); task_done(tq, t);
/* When the current lowest outstanding taskqid is /*
* done calculate the new lowest outstanding id */ * When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id
*/
if (tq->tq_lowest_id == tqt->tqt_id) { if (tq->tq_lowest_id == tqt->tqt_id) {
tq->tq_lowest_id = taskq_lowest_id(tq); tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
@ -999,18 +1005,18 @@ taskq_create(const char *name, int nthreads, pri_t pri,
spin_lock_init(&tq->tq_lock); spin_lock_init(&tq->tq_lock);
INIT_LIST_HEAD(&tq->tq_thread_list); INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list); INIT_LIST_HEAD(&tq->tq_active_list);
tq->tq_name = strdup(name); tq->tq_name = strdup(name);
tq->tq_nactive = 0; tq->tq_nactive = 0;
tq->tq_nthreads = 0; tq->tq_nthreads = 0;
tq->tq_nspawn = 0; tq->tq_nspawn = 0;
tq->tq_maxthreads = nthreads; tq->tq_maxthreads = nthreads;
tq->tq_pri = pri; tq->tq_pri = pri;
tq->tq_minalloc = minalloc; tq->tq_minalloc = minalloc;
tq->tq_maxalloc = maxalloc; tq->tq_maxalloc = maxalloc;
tq->tq_nalloc = 0; tq->tq_nalloc = 0;
tq->tq_flags = (flags | TASKQ_ACTIVE); tq->tq_flags = (flags | TASKQ_ACTIVE);
tq->tq_next_id = 1; tq->tq_next_id = 1;
tq->tq_lowest_id = 1; tq->tq_lowest_id = 1;
INIT_LIST_HEAD(&tq->tq_free_list); INIT_LIST_HEAD(&tq->tq_free_list);
INIT_LIST_HEAD(&tq->tq_pend_list); INIT_LIST_HEAD(&tq->tq_pend_list);
INIT_LIST_HEAD(&tq->tq_prio_list); INIT_LIST_HEAD(&tq->tq_prio_list);
@ -1136,8 +1142,9 @@ spl_taskq_init(void)
return (1); return (1);
} }
/* This is used to annotate tq_lock, so /*
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch * This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking * does not trigger a lockdep warning re: possible recursive locking
*/ */
dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;