Fix cstyle issues in spl-taskq.c and taskq.h

This patch only addresses the issues identified by the style checker.
It contains no functional changes.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2015-12-11 16:15:50 -08:00
parent 066b89e685
commit 2c4332cf79
2 changed files with 86 additions and 78 deletions

View File

@ -1,4 +1,4 @@
/*****************************************************************************\
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,7 +20,7 @@
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
\*****************************************************************************/
*/
#ifndef _SPL_TASKQ_H
#define _SPL_TASKQ_H
@ -55,7 +55,8 @@
#define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000
/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
/*
* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
* so TQ_LOCK_DYNAMIC must not evaluate to 0
*/
typedef enum tq_lock_role {
@ -69,8 +70,8 @@ typedef void (task_func_t)(void *);
typedef struct taskq {
spinlock_t tq_lock; /* protects taskq_t */
char *tq_name; /* taskq name */
struct list_head tq_thread_list;/* list of all threads */
struct list_head tq_active_list;/* list of active threads */
struct list_head tq_thread_list; /* list of all threads */
struct list_head tq_active_list; /* list of active threads */
int tq_nactive; /* # of active threads */
int tq_nthreads; /* # of existing threads */
int tq_nspawn; /* # of threads being spawned */
@ -88,7 +89,7 @@ typedef struct taskq {
struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */
tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
} taskq_t;
typedef struct taskq_ent {

View File

@ -1,4 +1,4 @@
/*****************************************************************************\
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,9 +20,9 @@
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************
*
* Solaris Porting Layer (SPL) Task Queue Implementation.
\*****************************************************************************/
*/
#include <sys/taskq.h>
#include <sys/kmem.h>
@ -58,12 +58,12 @@ static int
task_km_flags(uint_t flags)
{
if (flags & TQ_NOSLEEP)
return KM_NOSLEEP;
return (KM_NOSLEEP);
if (flags & TQ_PUSHPAGE)
return KM_PUSHPAGE;
return (KM_PUSHPAGE);
return KM_SLEEP;
return (KM_SLEEP);
}
/*
@ -122,7 +122,7 @@ retry:
}
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
if (t) {
@ -146,7 +146,7 @@ task_free(taskq_t *tq, taskq_ent_t *t)
ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer));
kmem_free(t, sizeof(taskq_ent_t));
kmem_free(t, sizeof (taskq_ent_t));
tq->tq_nalloc--;
}
@ -702,7 +702,7 @@ EXPORT_SYMBOL(taskq_dispatch_ent);
int
taskq_empty_ent(taskq_ent_t *t)
{
return list_empty(&t->tqent_list);
return (list_empty(&t->tqent_list));
}
EXPORT_SYMBOL(taskq_empty_ent);
@ -809,7 +809,7 @@ taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
(tq->tq_nactive == 0) && /* No threads are handling tasks */
(tq->tq_nthreads > 1) && /* More than 1 thread is running */
(!taskq_next_ent(tq)) && /* There are no pending tasks */
(spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */
(spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
}
static int
@ -828,9 +828,9 @@ taskq_thread(void *args)
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
#if defined(PF_MEMALLOC_NOIO)
#if defined(PF_MEMALLOC_NOIO)
(void) memalloc_noio_save();
#endif
#endif
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
@ -873,17 +873,21 @@ taskq_thread(void *args)
if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list);
/* In order to support recursively dispatching a
/*
* In order to support recursively dispatching a
* preallocated taskq_ent_t, tqent_id must be
* stored prior to executing tqent_func. */
* stored prior to executing tqent_func.
*/
tqt->tqt_id = t->tqent_id;
tqt->tqt_task = t;
/* We must store a copy of the flags prior to
/*
* We must store a copy of the flags prior to
* servicing the task (servicing a prealloc'd task
* returns the ownership of the tqent back to
* the caller of taskq_dispatch). Thus,
* tqent_flags _may_ change within the call. */
* tqent_flags _may_ change within the call.
*/
tqt->tqt_flags = t->tqent_flags;
taskq_insert_in_order(tq, tqt);
@ -903,8 +907,10 @@ taskq_thread(void *args)
if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
/* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id */
/*
* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id
*/
if (tq->tq_lowest_id == tqt->tqt_id) {
tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
@ -1136,7 +1142,8 @@ spl_taskq_init(void)
return (1);
}
/* This is used to annotate tq_lock, so
/*
* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking
*/