Fix cstyle issues in spl-taskq.c and taskq.h

This patch only addresses the issues identified by the style checker.
It contains no functional changes.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2015-12-11 16:15:50 -08:00
parent 066b89e685
commit 2c4332cf79
2 changed files with 86 additions and 78 deletions

View File

@ -1,4 +1,4 @@
/*****************************************************************************\ /*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California. * Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,7 +20,7 @@
* *
* You should have received a copy of the GNU General Public License along * You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>. * with the SPL. If not, see <http://www.gnu.org/licenses/>.
\*****************************************************************************/ */
#ifndef _SPL_TASKQ_H #ifndef _SPL_TASKQ_H
#define _SPL_TASKQ_H #define _SPL_TASKQ_H
@ -55,7 +55,8 @@
#define TQ_NEW 0x04000000 #define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000 #define TQ_FRONT 0x08000000
/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent, /*
* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
* so TQ_LOCK_DYNAMIC must not evaluate to 0 * so TQ_LOCK_DYNAMIC must not evaluate to 0
*/ */
typedef enum tq_lock_role { typedef enum tq_lock_role {
@ -88,7 +89,7 @@ typedef struct taskq {
struct list_head tq_delay_list; /* delayed task_t's */ struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */ wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */ wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */ tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
} taskq_t; } taskq_t;
typedef struct taskq_ent { typedef struct taskq_ent {

View File

@ -1,4 +1,4 @@
/*****************************************************************************\ /*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California. * Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@ -20,9 +20,9 @@
* *
* You should have received a copy of the GNU General Public License along * You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>. * with the SPL. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************** *
* Solaris Porting Layer (SPL) Task Queue Implementation. * Solaris Porting Layer (SPL) Task Queue Implementation.
\*****************************************************************************/ */
#include <sys/taskq.h> #include <sys/taskq.h>
#include <sys/kmem.h> #include <sys/kmem.h>
@ -58,12 +58,12 @@ static int
task_km_flags(uint_t flags) task_km_flags(uint_t flags)
{ {
if (flags & TQ_NOSLEEP) if (flags & TQ_NOSLEEP)
return KM_NOSLEEP; return (KM_NOSLEEP);
if (flags & TQ_PUSHPAGE) if (flags & TQ_PUSHPAGE)
return KM_PUSHPAGE; return (KM_PUSHPAGE);
return KM_SLEEP; return (KM_SLEEP);
} }
/* /*
@ -702,7 +702,7 @@ EXPORT_SYMBOL(taskq_dispatch_ent);
int int
taskq_empty_ent(taskq_ent_t *t) taskq_empty_ent(taskq_ent_t *t)
{ {
return list_empty(&t->tqent_list); return (list_empty(&t->tqent_list));
} }
EXPORT_SYMBOL(taskq_empty_ent); EXPORT_SYMBOL(taskq_empty_ent);
@ -873,17 +873,21 @@ taskq_thread(void *args)
if ((t = taskq_next_ent(tq)) != NULL) { if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list); list_del_init(&t->tqent_list);
/* In order to support recursively dispatching a /*
* In order to support recursively dispatching a
* preallocated taskq_ent_t, tqent_id must be * preallocated taskq_ent_t, tqent_id must be
* stored prior to executing tqent_func. */ * stored prior to executing tqent_func.
*/
tqt->tqt_id = t->tqent_id; tqt->tqt_id = t->tqent_id;
tqt->tqt_task = t; tqt->tqt_task = t;
/* We must store a copy of the flags prior to /*
* We must store a copy of the flags prior to
* servicing the task (servicing a prealloc'd task * servicing the task (servicing a prealloc'd task
* returns the ownership of the tqent back to * returns the ownership of the tqent back to
* the caller of taskq_dispatch). Thus, * the caller of taskq_dispatch). Thus,
* tqent_flags _may_ change within the call. */ * tqent_flags _may_ change within the call.
*/
tqt->tqt_flags = t->tqent_flags; tqt->tqt_flags = t->tqent_flags;
taskq_insert_in_order(tq, tqt); taskq_insert_in_order(tq, tqt);
@ -903,8 +907,10 @@ taskq_thread(void *args)
if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t); task_done(tq, t);
/* When the current lowest outstanding taskqid is /*
* done calculate the new lowest outstanding id */ * When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id
*/
if (tq->tq_lowest_id == tqt->tqt_id) { if (tq->tq_lowest_id == tqt->tqt_id) {
tq->tq_lowest_id = taskq_lowest_id(tq); tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
@ -1136,7 +1142,8 @@ spl_taskq_init(void)
return (1); return (1);
} }
/* This is used to annotate tq_lock, so /*
* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking * does not trigger a lockdep warning re: possible recursive locking
*/ */