| 
									
										
										
										
											2010-05-18 02:18:00 +04:00
										 |  |  | /*****************************************************************************\
 | 
					
						
							|  |  |  |  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | 
					
						
							|  |  |  |  *  Copyright (C) 2007 The Regents of the University of California. | 
					
						
							|  |  |  |  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | 
					
						
							|  |  |  |  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>. | 
					
						
							| 
									
										
										
										
											2008-05-26 08:38:26 +04:00
										 |  |  |  *  UCRL-CODE-235197 | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2010-05-18 02:18:00 +04:00
										 |  |  |  *  This file is part of the SPL, Solaris Porting Layer. | 
					
						
							|  |  |  |  *  For details, see <http://github.com/behlendorf/spl/>.
 | 
					
						
							| 
									
										
										
										
											2008-05-26 08:38:26 +04:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2010-05-18 02:18:00 +04:00
										 |  |  |  *  The SPL is free software; you can redistribute it and/or modify it | 
					
						
							|  |  |  |  *  under the terms of the GNU General Public License as published by the | 
					
						
							|  |  |  |  *  Free Software Foundation; either version 2 of the License, or (at your | 
					
						
							|  |  |  |  *  option) any later version. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *  The SPL is distributed in the hope that it will be useful, but WITHOUT | 
					
						
							| 
									
										
										
										
											2008-05-26 08:38:26 +04:00
										 |  |  |  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
					
						
							|  |  |  |  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
					
						
							|  |  |  |  *  for more details. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *  You should have received a copy of the GNU General Public License along | 
					
						
							| 
									
										
										
										
											2010-05-18 02:18:00 +04:00
										 |  |  |  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							|  |  |  |  ***************************************************************************** | 
					
						
							|  |  |  |  *  Solaris Porting Layer (SPL) Task Queue Implementation. | 
					
						
							|  |  |  | \*****************************************************************************/ | 
					
						
							| 
									
										
										
										
											2008-05-26 08:38:26 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-03-01 03:45:59 +03:00
										 |  |  | #include <sys/taskq.h>
 | 
					
						
							| 
									
										
										
										
											2008-08-12 02:13:47 +04:00
										 |  |  | #include <sys/kmem.h>
 | 
					
						
							| 
									
										
										
										
											2010-07-20 01:16:05 +04:00
										 |  |  | #include <spl-debug.h>
 | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | #ifdef SS_DEBUG_SUBSYS
 | 
					
						
							|  |  |  | #undef SS_DEBUG_SUBSYS
 | 
					
						
							| 
									
										
										
										
											2008-04-21 21:29:47 +04:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | #define SS_DEBUG_SUBSYS SS_TASKQ
 | 
					
						
							| 
									
										
										
										
											2008-04-21 21:29:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | /* Global system-wide dynamic task queue available for all consumers */ | 
					
						
							|  |  |  | taskq_t *system_taskq; | 
					
						
							|  |  |  | EXPORT_SYMBOL(system_taskq); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-09-12 22:31:39 +04:00
										 |  |  | static int | 
					
						
							|  |  |  | task_km_flags(uint_t flags) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	if (flags & TQ_NOSLEEP) | 
					
						
							|  |  |  | 		return KM_NOSLEEP; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (flags & TQ_PUSHPAGE) | 
					
						
							|  |  |  | 		return KM_PUSHPAGE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return KM_SLEEP; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * NOTE: Must be called with tq->tq_lock held, returns a list_t which | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  |  * is not attached to the free, work, or pending taskq lists. | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | static taskq_ent_t * | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | task_alloc(taskq_t *tq, uint_t flags) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	taskq_ent_t *t; | 
					
						
							|  |  |  | 	int count = 0; | 
					
						
							|  |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(spin_is_locked(&tq->tq_lock)); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | retry: | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	/* Acquire taskq_ent_t's from free list if available */ | 
					
						
							|  |  |  | 	if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { | 
					
						
							|  |  |  | 		t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		list_del_init(&t->tqent_list); | 
					
						
							|  |  |  | 		SRETURN(t); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Free list is empty and memory allocations are prohibited */ | 
					
						
							|  |  |  | 	if (flags & TQ_NOALLOC) | 
					
						
							|  |  |  | 		SRETURN(NULL); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Hit maximum taskq_ent_t pool size */ | 
					
						
							|  |  |  | 	if (tq->tq_nalloc >= tq->tq_maxalloc) { | 
					
						
							|  |  |  | 		if (flags & TQ_NOSLEEP) | 
					
						
							|  |  |  | 			SRETURN(NULL); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * Sleep periodically polling the free list for an available | 
					
						
							|  |  |  | 		 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed | 
					
						
							|  |  |  | 		 * but we cannot block forever waiting for an taskq_ent_t to | 
					
						
							|  |  |  | 		 * show up in the free list, otherwise a deadlock can happen. | 
					
						
							|  |  |  | 		 * | 
					
						
							|  |  |  | 		 * Therefore, we need to allocate a new task even if the number | 
					
						
							|  |  |  | 		 * of allocated tasks is above tq->tq_maxalloc, but we still | 
					
						
							|  |  |  | 		 * end up delaying the task allocation by one second, thereby | 
					
						
							|  |  |  | 		 * throttling the task dispatch rate. | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 		schedule_timeout(HZ / 100); | 
					
						
							|  |  |  | 		spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 		if (count < 100) | 
					
						
							|  |  |  | 			SGOTO(retry, count++); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 	t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags)); | 
					
						
							|  |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (t) { | 
					
						
							|  |  |  | 		taskq_init_ent(t); | 
					
						
							|  |  |  | 		tq->tq_nalloc++; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	SRETURN(t); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | /*
 | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  |  * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  |  * to already be removed from the free, work, or pending taskq lists. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static void | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | task_free(taskq_t *tq, taskq_ent_t *t) | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(t); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	ASSERT(spin_is_locked(&tq->tq_lock)); | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	ASSERT(list_empty(&t->tqent_list)); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	kmem_free(t, sizeof(taskq_ent_t)); | 
					
						
							|  |  |  | 	tq->tq_nalloc--; | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * NOTE: Must be called with tq->tq_lock held, either destroys the | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  |  * taskq_ent_t if too many exist or moves it to the free list for later use. | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | static void | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | task_done(taskq_t *tq, taskq_ent_t *t) | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(t); | 
					
						
							|  |  |  | 	ASSERT(spin_is_locked(&tq->tq_lock)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	list_del_init(&t->tqent_list); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	if (tq->tq_nalloc <= tq->tq_minalloc) { | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		t->tqent_id = 0; | 
					
						
							|  |  |  | 		t->tqent_func = NULL; | 
					
						
							|  |  |  | 		t->tqent_arg = NULL; | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 		t->tqent_flags = 0; | 
					
						
							| 
									
										
											  
											
												Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
											
										 
											2011-12-17 02:57:31 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 		list_add_tail(&t->tqent_list, &tq->tq_free_list); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	} else { | 
					
						
							|  |  |  | 		task_free(tq, t); | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * As tasks are submitted to the task queue they are assigned a | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  |  * monotonically increasing taskqid and added to the tail of the pending | 
					
						
							|  |  |  |  * list.  As worker threads become available the tasks are removed from | 
					
						
							|  |  |  |  * the head of the pending or priority list, giving preference to the | 
					
						
							| 
									
										
										
										
											2011-12-16 01:48:37 +04:00
										 |  |  |  * priority list.  The tasks are then removed from their respective | 
					
						
							|  |  |  |  * list, and the taskq_thread servicing the task is added to the active | 
					
						
							|  |  |  |  * list, preserving the order using the serviced task's taskqid. | 
					
						
							|  |  |  |  * Finally, as tasks complete the taskq_thread servicing the task is | 
					
						
							|  |  |  |  * removed from the active list.  This means that the pending task and | 
					
						
							|  |  |  |  * active taskq_thread lists are always kept sorted by taskqid. Thus the | 
					
						
							|  |  |  |  * lowest outstanding incomplete taskqid can be determined simply by | 
					
						
							|  |  |  |  * checking the min taskqid for each head item on the pending, priority, | 
					
						
							|  |  |  |  * and active taskq_thread list. This value is stored in | 
					
						
							|  |  |  |  * tq->tq_lowest_id and only updated to the new lowest id when the | 
					
						
							|  |  |  |  * previous lowest id completes.  All taskqids lower than | 
					
						
							|  |  |  |  * tq->tq_lowest_id must have completed.  It is also possible larger | 
					
						
							|  |  |  |  * taskqid's have completed because they may be processed in parallel by | 
					
						
							|  |  |  |  * several worker threads.  However, this is not a problem because the | 
					
						
							|  |  |  |  * behavior of taskq_wait_id() is to block until all previously | 
					
						
							|  |  |  |  * submitted taskqid's have completed. | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  |  * | 
					
						
							|  |  |  |  * XXX: Taskqid_t wrapping is not handled.  However, taskqid_t's are | 
					
						
							|  |  |  |  * 64-bit values so even if a taskq is processing 2^24 (16,777,216) | 
					
						
							|  |  |  |  * taskqid_ts per second it will still take 2^40 seconds, 34,865 years, | 
					
						
							|  |  |  |  * before the wrap occurs.  I can live with that for now. | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  |  */ | 
					
						
							|  |  |  | static int | 
					
						
							|  |  |  | taskq_wait_check(taskq_t *tq, taskqid_t id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-03-16 01:13:49 +03:00
										 |  |  | 	int rc; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 	rc = (id < tq->tq_lowest_id); | 
					
						
							|  |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SRETURN(rc); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | __taskq_wait_id(taskq_t *tq, taskqid_t id) | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_wait_id); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | __taskq_wait(taskq_t *tq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	taskqid_t id; | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-16 01:13:49 +03:00
										 |  |  | 	/* Wait for the largest outstanding taskqid */ | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2009-03-16 01:13:49 +03:00
										 |  |  | 	id = tq->tq_next_id - 1; | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	__taskq_wait_id(tq, id); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_wait); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | __taskq_member(taskq_t *tq, void *t) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	struct list_head *l; | 
					
						
							|  |  |  | 	taskq_thread_t *tqt; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	ASSERT(tq); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(t); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	list_for_each(l, &tq->tq_thread_list) { | 
					
						
							|  |  |  | 		tqt = list_entry(l, taskq_thread_t, tqt_thread_list); | 
					
						
							|  |  |  | 		if (tqt->tqt_thread == (struct task_struct *)t) | 
					
						
							|  |  |  | 			SRETURN(1); | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SRETURN(0); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_member); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | taskqid_t | 
					
						
							|  |  |  | __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	taskq_ent_t *t; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	taskqid_t rc = 0; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(func); | 
					
						
							| 
									
										
										
										
											2010-04-24 01:39:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	/* Taskq being destroyed and all tasks drained */ | 
					
						
							|  |  |  | 	if (!(tq->tq_flags & TQ_ACTIVE)) | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 		SGOTO(out, rc = 0); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	/* Do not queue the task unless there is idle thread for it */ | 
					
						
							|  |  |  | 	ASSERT(tq->tq_nactive <= tq->tq_nthreads); | 
					
						
							|  |  |  | 	if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 		SGOTO(out, rc = 0); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	if ((t = task_alloc(tq, flags)) == NULL) | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 		SGOTO(out, rc = 0); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	spin_lock(&t->tqent_lock); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* Queue to the priority list instead of the pending list */ | 
					
						
							|  |  |  | 	if (flags & TQ_FRONT) | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		list_add_tail(&t->tqent_list, &tq->tq_prio_list); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	else | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		list_add_tail(&t->tqent_list, &tq->tq_pend_list); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	t->tqent_id = rc = tq->tq_next_id; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	tq->tq_next_id++; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	t->tqent_func = func; | 
					
						
							|  |  |  | 	t->tqent_arg = arg; | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	spin_unlock(&t->tqent_lock); | 
					
						
							| 
									
										
										
										
											2012-01-19 23:36:27 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	wake_up(&tq->tq_work_waitq); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | out: | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SRETURN(rc); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2008-02-27 22:09:51 +03:00
										 |  |  | EXPORT_SYMBOL(__taskq_dispatch); | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | __taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, | 
					
						
							|  |  |  |    taskq_ent_t *t) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	SENTRY; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(func); | 
					
						
							|  |  |  | 	ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Taskq being destroyed and all tasks drained */ | 
					
						
							|  |  |  | 	if (!(tq->tq_flags & TQ_ACTIVE)) { | 
					
						
							|  |  |  | 		t->tqent_id = 0; | 
					
						
							|  |  |  | 		goto out; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock(&t->tqent_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Mark it as a prealloc'd task.  This is important | 
					
						
							|  |  |  | 	 * to ensure that we don't free it later. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	t->tqent_flags |= TQENT_FLAG_PREALLOC; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Queue to the priority list instead of the pending list */ | 
					
						
							|  |  |  | 	if (flags & TQ_FRONT) | 
					
						
							|  |  |  | 		list_add_tail(&t->tqent_list, &tq->tq_prio_list); | 
					
						
							|  |  |  | 	else | 
					
						
							|  |  |  | 		list_add_tail(&t->tqent_list, &tq->tq_pend_list); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	t->tqent_id = tq->tq_next_id; | 
					
						
							|  |  |  | 	tq->tq_next_id++; | 
					
						
							|  |  |  | 	t->tqent_func = func; | 
					
						
							|  |  |  | 	t->tqent_arg = arg; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_unlock(&t->tqent_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	wake_up(&tq->tq_work_waitq); | 
					
						
							|  |  |  | out: | 
					
						
							| 
									
										
										
										
											2012-01-19 23:36:27 +04:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 	SEXIT; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_dispatch_ent); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | __taskq_empty_ent(taskq_ent_t *t) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return list_empty(&t->tqent_list); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_empty_ent); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | __taskq_init_ent(taskq_ent_t *t) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	spin_lock_init(&t->tqent_lock); | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&t->tqent_list); | 
					
						
							|  |  |  | 	t->tqent_id = 0; | 
					
						
							|  |  |  | 	t->tqent_func = NULL; | 
					
						
							|  |  |  | 	t->tqent_arg = NULL; | 
					
						
							|  |  |  | 	t->tqent_flags = 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | EXPORT_SYMBOL(__taskq_init_ent); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Returns the lowest incomplete taskqid_t.  The taskqid_t may | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  |  * be queued on the pending list, on the priority list,  or on | 
					
						
							|  |  |  |  * the work list currently being handled, but it is not 100% | 
					
						
							|  |  |  |  * complete yet. | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | static taskqid_t | 
					
						
							|  |  |  | taskq_lowest_id(taskq_t *tq) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-03-16 01:13:49 +03:00
										 |  |  | 	taskqid_t lowest_id = tq->tq_next_id; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	taskq_ent_t *t; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	taskq_thread_t *tqt; | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	ASSERT(tq); | 
					
						
							|  |  |  | 	ASSERT(spin_is_locked(&tq->tq_lock)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | 	if (!list_empty(&tq->tq_pend_list)) { | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); | 
					
						
							|  |  |  | 		lowest_id = MIN(lowest_id, t->tqent_id); | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	if (!list_empty(&tq->tq_prio_list)) { | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); | 
					
						
							|  |  |  | 		lowest_id = MIN(lowest_id, t->tqent_id); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	if (!list_empty(&tq->tq_active_list)) { | 
					
						
							|  |  |  | 		tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 		    tqt_active_list); | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 		ASSERT(tqt->tqt_id != 0); | 
					
						
							|  |  |  | 		lowest_id = MIN(lowest_id, tqt->tqt_id); | 
					
						
							| 
									
										
										
										
											2010-01-05 02:52:26 +03:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SRETURN(lowest_id); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | /*
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  |  * Insert a task into a list keeping the list sorted by increasing taskqid. | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  |  */ | 
					
						
							|  |  |  | static void | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	taskq_thread_t *w; | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	struct list_head *l; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	ASSERT(tqt); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	ASSERT(spin_is_locked(&tq->tq_lock)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	list_for_each_prev(l, &tq->tq_active_list) { | 
					
						
							|  |  |  | 		w = list_entry(l, taskq_thread_t, tqt_active_list); | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 		if (w->tqt_id < tqt->tqt_id) { | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 			list_add(&tqt->tqt_active_list, l); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 			break; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	if (l == &tq->tq_active_list) | 
					
						
							|  |  |  | 		list_add(&tqt->tqt_active_list, &tq->tq_active_list); | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | static int | 
					
						
							|  |  |  | taskq_thread(void *args) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	DECLARE_WAITQUEUE(wait, current); | 
					
						
							|  |  |  | 	sigset_t blocked; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	taskq_thread_t *tqt = args; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	taskq_t *tq; | 
					
						
							|  |  |  | 	taskq_ent_t *t; | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 	struct list_head *pend_list; | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(tqt); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	tq = tqt->tqt_tq; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	current->flags |= PF_NOFREEZE; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	sigfillset(&blocked); | 
					
						
							|  |  |  | 	sigprocmask(SIG_BLOCK, &blocked, NULL); | 
					
						
							|  |  |  | 	flush_signals(current); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 	tq->tq_nthreads++; | 
					
						
							|  |  |  | 	wake_up(&tq->tq_wait_waitq); | 
					
						
							|  |  |  | 	set_current_state(TASK_INTERRUPTIBLE); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	while (!kthread_should_stop()) { | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 		if (list_empty(&tq->tq_pend_list) && | 
					
						
							|  |  |  | 		    list_empty(&tq->tq_prio_list)) { | 
					
						
							| 
									
										
										
										
											2012-01-19 22:33:19 +04:00
										 |  |  | 			add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 			spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 			schedule(); | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 			spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2012-01-19 22:33:19 +04:00
										 |  |  | 			remove_wait_queue(&tq->tq_work_waitq, &wait); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			__set_current_state(TASK_RUNNING); | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-01 21:07:51 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if (!list_empty(&tq->tq_prio_list)) | 
					
						
							|  |  |  | 			pend_list = &tq->tq_prio_list; | 
					
						
							|  |  |  | 		else if (!list_empty(&tq->tq_pend_list)) | 
					
						
							|  |  |  | 			pend_list = &tq->tq_pend_list; | 
					
						
							|  |  |  | 		else | 
					
						
							|  |  |  | 			pend_list = NULL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (pend_list) { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 			t = list_entry(pend_list->next,taskq_ent_t,tqent_list); | 
					
						
							|  |  |  | 			list_del_init(&t->tqent_list); | 
					
						
							| 
									
										
											  
											
												Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
											
										 
											2011-12-17 02:57:31 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 			/* In order to support recursively dispatching a
 | 
					
						
							|  |  |  | 			 * preallocated taskq_ent_t, tqent_id must be | 
					
						
							|  |  |  | 			 * stored prior to executing tqent_func. */ | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 			tqt->tqt_id = t->tqent_id; | 
					
						
							| 
									
										
											  
											
												Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
											
										 
											2011-12-17 02:57:31 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			/* We must store a copy of the flags prior to
 | 
					
						
							|  |  |  | 			 * servicing the task (servicing a prealloc'd task | 
					
						
							|  |  |  | 			 * returns the ownership of the tqent back to | 
					
						
							|  |  |  | 			 * the caller of taskq_dispatch). Thus, | 
					
						
							|  |  |  | 			 * tqent_flags _may_ change within the call. */ | 
					
						
							|  |  |  | 			tqt->tqt_flags = t->tqent_flags; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 			taskq_insert_in_order(tq, tqt); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 			tq->tq_nactive++; | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 			spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			/* Perform the requested task */ | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 			t->tqent_func(t->tqent_arg); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 			spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 			tq->tq_nactive--; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 			list_del_init(&tqt->tqt_active_list); | 
					
						
							| 
									
										
											  
											
												Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
											
										 
											2011-12-17 02:57:31 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			/* For prealloc'd tasks, we don't free anything. */ | 
					
						
							|  |  |  | 			if ((tq->tq_flags & TASKQ_DYNAMIC) || | 
					
						
							|  |  |  | 			    !(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) | 
					
						
							|  |  |  | 				task_done(tq, t); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-16 01:13:49 +03:00
										 |  |  | 			/* When the current lowest outstanding taskqid is
 | 
					
						
							|  |  |  | 			 * done calculate the new lowest outstanding id */ | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 			if (tq->tq_lowest_id == tqt->tqt_id) { | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 				tq->tq_lowest_id = taskq_lowest_id(tq); | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 				ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 			tqt->tqt_id = 0; | 
					
						
							| 
									
										
											  
											
												Store copy of tqent_flags prior to servicing task
A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
											
										 
											2011-12-17 02:57:31 +04:00
										 |  |  | 			tqt->tqt_flags = 0; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 			wake_up_all(&tq->tq_wait_waitq); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		set_current_state(TASK_INTERRUPTIBLE); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	__set_current_state(TASK_RUNNING); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	tq->tq_nthreads--; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	list_del_init(&tqt->tqt_thread_list); | 
					
						
							|  |  |  | 	kmem_free(tqt, sizeof(taskq_thread_t)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SRETURN(0); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | taskq_t * | 
					
						
							|  |  |  | __taskq_create(const char *name, int nthreads, pri_t pri, | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  |     int minalloc, int maxalloc, uint_t flags) | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	taskq_t *tq; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	taskq_thread_t *tqt; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	int rc = 0, i, j = 0; | 
					
						
							|  |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(name != NULL); | 
					
						
							|  |  |  | 	ASSERT(pri <= maxclsyspri); | 
					
						
							|  |  |  | 	ASSERT(minalloc >= 0); | 
					
						
							|  |  |  | 	ASSERT(maxalloc <= INT_MAX); | 
					
						
							|  |  |  | 	ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */ | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-07-09 21:07:52 +04:00
										 |  |  | 	/* Scale the number of threads using nthreads as a percentage */ | 
					
						
							|  |  |  | 	if (flags & TASKQ_THREADS_CPU_PCT) { | 
					
						
							|  |  |  | 		ASSERT(nthreads <= 100); | 
					
						
							|  |  |  | 		ASSERT(nthreads >= 0); | 
					
						
							|  |  |  | 		nthreads = MIN(nthreads, 100); | 
					
						
							|  |  |  | 		nthreads = MAX(nthreads, 0); | 
					
						
							|  |  |  | 		nthreads = MAX((num_online_cpus() * nthreads) / 100, 1); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE); | 
					
						
							|  |  |  | 	if (tq == NULL) | 
					
						
							|  |  |  | 		SRETURN(NULL); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_lock_init(&tq->tq_lock); | 
					
						
							|  |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&tq->tq_thread_list); | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&tq->tq_active_list); | 
					
						
							|  |  |  | 	tq->tq_name      = name; | 
					
						
							|  |  |  | 	tq->tq_nactive   = 0; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	tq->tq_nthreads  = 0; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	tq->tq_pri       = pri; | 
					
						
							|  |  |  | 	tq->tq_minalloc  = minalloc; | 
					
						
							|  |  |  | 	tq->tq_maxalloc  = maxalloc; | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	tq->tq_nalloc    = 0; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	tq->tq_flags     = (flags | TQ_ACTIVE); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	tq->tq_next_id   = 1; | 
					
						
							|  |  |  | 	tq->tq_lowest_id = 1; | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	INIT_LIST_HEAD(&tq->tq_free_list); | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&tq->tq_pend_list); | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&tq->tq_prio_list); | 
					
						
							|  |  |  | 	init_waitqueue_head(&tq->tq_work_waitq); | 
					
						
							|  |  |  | 	init_waitqueue_head(&tq->tq_wait_waitq); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	if (flags & TASKQ_PREPOPULATE) | 
					
						
							|  |  |  | 		for (i = 0; i < minalloc; i++) | 
					
						
							|  |  |  | 			task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW)); | 
					
						
							| 
									
										
										
										
											2008-04-24 01:19:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-24 01:19:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	for (i = 0; i < nthreads; i++) { | 
					
						
							| 
									
										
										
										
											2012-09-12 22:31:39 +04:00
										 |  |  | 		tqt = kmem_alloc(sizeof(*tqt), KM_PUSHPAGE); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 		INIT_LIST_HEAD(&tqt->tqt_thread_list); | 
					
						
							|  |  |  | 		INIT_LIST_HEAD(&tqt->tqt_active_list); | 
					
						
							|  |  |  | 		tqt->tqt_tq = tq; | 
					
						
							| 
									
										
											  
											
												Swap taskq_ent_t with taskqid_t in taskq_thread_t
The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.
Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.
Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).
To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
											
										 
											2011-12-16 21:44:31 +04:00
										 |  |  | 		tqt->tqt_id = 0; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		tqt->tqt_thread = kthread_create(taskq_thread, tqt, | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 		    "%s/%d", name, i); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 		if (tqt->tqt_thread) { | 
					
						
							|  |  |  | 			list_add(&tqt->tqt_thread_list, &tq->tq_thread_list); | 
					
						
							|  |  |  | 			kthread_bind(tqt->tqt_thread, i % num_online_cpus()); | 
					
						
							|  |  |  | 			set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri)); | 
					
						
							|  |  |  | 			wake_up_process(tqt->tqt_thread); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 			j++; | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			kmem_free(tqt, sizeof(taskq_thread_t)); | 
					
						
							|  |  |  | 			rc = 1; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	/* Wait for all threads to be started before potential destroy */ | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	if (rc) { | 
					
						
							|  |  |  | 		__taskq_destroy(tq); | 
					
						
							|  |  |  | 		tq = NULL; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SRETURN(tq); | 
					
						
							| 
									
										
										
										
											2008-02-26 23:36:04 +03:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2008-02-27 22:09:51 +03:00
										 |  |  | EXPORT_SYMBOL(__taskq_create); | 
					
						
							| 
									
										
										
										
											2008-03-11 05:08:57 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | __taskq_destroy(taskq_t *tq) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	struct task_struct *thread; | 
					
						
							|  |  |  | 	taskq_thread_t *tqt; | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 	taskq_ent_t *t; | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2008-03-11 05:08:57 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 	ASSERT(tq); | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	tq->tq_flags &= ~TQ_ACTIVE; | 
					
						
							| 
									
										
										
										
											2008-11-03 23:21:08 +03:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	/* TQ_ACTIVE cleared prevents new tasks being added to pending */ | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	__taskq_wait(tq); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Signal each thread to exit and block until it does.  Each thread | 
					
						
							|  |  |  | 	 * is responsible for removing itself from the list and freeing its | 
					
						
							|  |  |  | 	 * taskq_thread_t.  This allows for idle threads to opt to remove | 
					
						
							|  |  |  | 	 * themselves from the taskq.  They can be recreated as needed. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	while (!list_empty(&tq->tq_thread_list)) { | 
					
						
							|  |  |  | 		tqt = list_entry(tq->tq_thread_list.next, | 
					
						
							|  |  |  | 				 taskq_thread_t, tqt_thread_list); | 
					
						
							|  |  |  | 		thread = tqt->tqt_thread; | 
					
						
							|  |  |  | 		spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		kthread_stop(thread); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 		spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	while (!list_empty(&tq->tq_free_list)) { | 
					
						
							| 
									
										
										
										
											2011-11-12 03:06:35 +04:00
										 |  |  | 		t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); | 
					
						
							| 
									
										
										
										
											2011-12-06 22:04:51 +04:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 		list_del_init(&t->tqent_list); | 
					
						
							|  |  |  | 		task_free(tq, t); | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	ASSERT(tq->tq_nthreads == 0); | 
					
						
							|  |  |  | 	ASSERT(tq->tq_nalloc == 0); | 
					
						
							|  |  |  | 	ASSERT(list_empty(&tq->tq_thread_list)); | 
					
						
							|  |  |  | 	ASSERT(list_empty(&tq->tq_active_list)); | 
					
						
							|  |  |  | 	ASSERT(list_empty(&tq->tq_free_list)); | 
					
						
							|  |  |  | 	ASSERT(list_empty(&tq->tq_pend_list)); | 
					
						
							|  |  |  | 	ASSERT(list_empty(&tq->tq_prio_list)); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); | 
					
						
							| 
									
										
										
										
											2011-12-06 05:32:48 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	kmem_free(tq, sizeof(taskq_t)); | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2008-03-11 05:08:57 +03:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2008-04-26 02:10:47 +04:00
										 |  |  | EXPORT_SYMBOL(__taskq_destroy); | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | int | 
					
						
							|  |  |  | spl_taskq_init(void) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:53:53 +03:00
										 |  |  | 	/* Solaris creates a dynamic taskq of up to 64 threads, however in
 | 
					
						
							|  |  |  | 	 * a Linux environment 1 thread per-core is usually about right */ | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	system_taskq = taskq_create("spl_system_taskq", num_online_cpus(), | 
					
						
							| 
									
										
										
										
											2009-02-02 19:53:53 +03:00
										 |  |  | 				    minclsyspri, 4, 512, TASKQ_PREPOPULATE); | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | 	if (system_taskq == NULL) | 
					
						
							| 
									
										
										
										
											2010-07-20 22:55:37 +04:00
										 |  |  | 		SRETURN(1); | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SRETURN(0); | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void | 
					
						
							|  |  |  | spl_taskq_fini(void) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SENTRY; | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | 	taskq_destroy(system_taskq); | 
					
						
							| 
									
										
										
										
											2012-12-07 00:57:42 +04:00
										 |  |  | 	SEXIT; | 
					
						
							| 
									
										
										
										
											2009-01-06 02:08:03 +03:00
										 |  |  | } |