Prefix all SPL debug macros with 'S'

To avoid conflicts with symbols defined by dependent packages
all debugging symbols have been prefixed with a 'S' for SPL.
Any dependent package needing to integrate with the SPL debug
should include the spl-debug.h header and use the 'S' prefixed
macros.  They must also build with DEBUG defined.
This commit is contained in:
Brian Behlendorf 2010-07-20 11:55:37 -07:00
parent 55abb0929e
commit b17edc10a9
14 changed files with 532 additions and 491 deletions

View File

@ -26,17 +26,17 @@
* Available debug functions. These function should be used by any
* package which needs to integrate with the SPL log infrastructure.
*
* CDEBUG() - Log debug message with specified mask.
* CDEBUG_LIMIT() - Log just 1 debug message with specified mask.
* CWARN() - Log a warning message.
* CERROR() - Log an error message.
* CEMERG() - Log an emergency error message.
* CONSOLE() - Log a generic message to the console.
* SDEBUG() - Log debug message with specified mask.
* SDEBUG_LIMIT() - Log just 1 debug message with specified mask.
* SWARN() - Log a warning message.
* SERROR() - Log an error message.
* SEMERG() - Log an emergency error message.
* SCONSOLE() - Log a generic message to the console.
*
* ENTRY - Log entry point to a function.
* EXIT - Log exit point from a function.
* RETURN(x) - Log return from a function.
* GOTO(x, y) - Log goto within a function.
* SENTRY - Log entry point to a function.
* SEXIT - Log exit point from a function.
* SRETURN(x) - Log return from a function.
* SGOTO(x, y) - Log goto within a function.
*/
#ifndef _SPL_DEBUG_INTERNAL_H
@ -44,95 +44,104 @@
#include <linux/limits.h>
#define S_UNDEFINED 0x00000001
#define S_ATOMIC 0x00000002
#define S_KOBJ 0x00000004
#define S_VNODE 0x00000008
#define S_TIME 0x00000010
#define S_RWLOCK 0x00000020
#define S_THREAD 0x00000040
#define S_CONDVAR 0x00000080
#define S_MUTEX 0x00000100
#define S_RNG 0x00000200
#define S_TASKQ 0x00000400
#define S_KMEM 0x00000800
#define S_DEBUG 0x00001000
#define S_GENERIC 0x00002000
#define S_PROC 0x00004000
#define S_MODULE 0x00008000
#define S_CRED 0x00010000
#define SS_UNDEFINED 0x00000001
#define SS_ATOMIC 0x00000002
#define SS_KOBJ 0x00000004
#define SS_VNODE 0x00000008
#define SS_TIME 0x00000010
#define SS_RWLOCK 0x00000020
#define SS_THREAD 0x00000040
#define SS_CONDVAR 0x00000080
#define SS_MUTEX 0x00000100
#define SS_RNG 0x00000200
#define SS_TASKQ 0x00000400
#define SS_KMEM 0x00000800
#define SS_DEBUG 0x00001000
#define SS_GENERIC 0x00002000
#define SS_PROC 0x00004000
#define SS_MODULE 0x00008000
#define SS_CRED 0x00010000
#define SS_KSTAT 0x00020000
#define SS_XDR 0x00040000
#define SS_USER1 0x01000000
#define SS_USER2 0x02000000
#define SS_USER3 0x04000000
#define SS_USER4 0x08000000
#define SS_USER5 0x10000000
#define SS_USER6 0x20000000
#define SS_USER7 0x40000000
#define SS_USER8 0x80000000
#define SS_DEBUG_SUBSYS SS_UNDEFINED
#define D_TRACE 0x00000001
#define D_INFO 0x00000002
#define D_WARNING 0x00000004
#define D_ERROR 0x00000008
#define D_EMERG 0x00000010
#define D_CONSOLE 0x00000020
#define D_IOCTL 0x00000040
#define D_DPRINTF 0x00000080
#define D_OTHER 0x00000100
#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
#define DEBUG_SUBSYSTEM S_UNDEFINED
#define SD_TRACE 0x00000001
#define SD_INFO 0x00000002
#define SD_WARNING 0x00000004
#define SD_ERROR 0x00000008
#define SD_EMERG 0x00000010
#define SD_CONSOLE 0x00000020
#define SD_IOCTL 0x00000040
#define SD_DPRINTF 0x00000080
#define SD_OTHER 0x00000100
#define SD_CANTMASK (SD_ERROR | SD_EMERG | SD_WARNING | SD_CONSOLE)
#ifdef NDEBUG /* Debugging Disabled */
#define CDEBUG(mask, fmt, a...) ((void)0)
#define CDEBUG_LIMIT(x, y, fmt, a...) ((void)0)
#define CWARN(fmt, a...) ((void)0)
#define CERROR(fmt, a...) ((void)0)
#define CEMERG(fmt, a...) ((void)0)
#define CONSOLE(mask, fmt, a...) ((void)0)
#define SDEBUG(mask, fmt, a...) ((void)0)
#define SDEBUG_LIMIT(x, y, fmt, a...) ((void)0)
#define SWARN(fmt, a...) ((void)0)
#define SERROR(fmt, a...) ((void)0)
#define SEMERG(fmt, a...) ((void)0)
#define SCONSOLE(mask, fmt, a...) ((void)0)
#define ENTRY ((void)0)
#define EXIT ((void)0)
#define RETURN(x) return (x)
#define GOTO(x, y) { ((void)(y)); goto x; }
#define SENTRY ((void)0)
#define SEXIT ((void)0)
#define SRETURN(x) return (x)
#define SGOTO(x, y) { ((void)(y)); goto x; }
#else /* Debugging Enabled */
#define __CDEBUG(cdls, subsys, mask, format, a...) \
#define __SDEBUG(cdls, subsys, mask, format, a...) \
do { \
if (((mask) & D_CANTMASK) != 0 || \
if (((mask) & SD_CANTMASK) != 0 || \
((spl_debug_mask & (mask)) != 0 && \
(spl_debug_subsys & (subsys)) != 0)) \
spl_debug_msg(cdls, subsys, mask, __FILE__, \
__FUNCTION__, __LINE__, format, ## a); \
} while (0)
#define CDEBUG(mask, format, a...) \
__CDEBUG(NULL, DEBUG_SUBSYSTEM, mask, format, ## a)
#define SDEBUG(mask, format, a...) \
__SDEBUG(NULL, SS_DEBUG_SUBSYS, mask, format, ## a)
#define __CDEBUG_LIMIT(subsys, mask, format, a...) \
#define __SDEBUG_LIMIT(subsys, mask, format, a...) \
do { \
static spl_debug_limit_state_t cdls; \
\
__CDEBUG(&cdls, subsys, mask, format, ## a); \
__SDEBUG(&cdls, subsys, mask, format, ## a); \
} while (0)
#define CDEBUG_LIMIT(mask, format, a...) \
__CDEBUG_LIMIT(DEBUG_SUBSYSTEM, mask, format, ## a)
#define SDEBUG_LIMIT(mask, format, a...) \
__SDEBUG_LIMIT(SS_DEBUG_SUBSYS, mask, format, ## a)
#define CWARN(fmt, a...) CDEBUG_LIMIT(D_WARNING, fmt, ## a)
#define CERROR(fmt, a...) CDEBUG_LIMIT(D_ERROR, fmt, ## a)
#define CEMERG(fmt, a...) CDEBUG_LIMIT(D_EMERG, fmt, ## a)
#define CONSOLE(mask, fmt, a...) CDEBUG(D_CONSOLE | (mask), fmt, ## a)
#define SWARN(fmt, a...) SDEBUG_LIMIT(SD_WARNING, fmt, ## a)
#define SERROR(fmt, a...) SDEBUG_LIMIT(SD_ERROR, fmt, ## a)
#define SEMERG(fmt, a...) SDEBUG_LIMIT(SD_EMERG, fmt, ## a)
#define SCONSOLE(mask, fmt, a...) SDEBUG(SD_CONSOLE | (mask), fmt, ## a)
#define ENTRY CDEBUG(D_TRACE, "Process entered\n")
#define EXIT CDEBUG(D_TRACE, "Process leaving\n")
#define SENTRY SDEBUG(SD_TRACE, "Process entered\n")
#define SEXIT SDEBUG(SD_TRACE, "Process leaving\n")
#define RETURN(rc) \
#define SRETURN(rc) \
do { \
typeof(rc) RETURN__ret = (rc); \
CDEBUG(D_TRACE, "Process leaving (rc=%lu : %ld : %lx)\n", \
SDEBUG(SD_TRACE, "Process leaving (rc=%lu : %ld : %lx)\n", \
(long)RETURN__ret, (long)RETURN__ret, (long)RETURN__ret); \
return RETURN__ret; \
} while (0)
#define GOTO(label, rc) \
#define SGOTO(label, rc) \
do { \
long GOTO__ret = (long)(rc); \
CDEBUG(D_TRACE,"Process leaving via %s (rc=%lu : %ld : %lx)\n", \
SDEBUG(SD_TRACE,"Process leaving via %s (rc=%lu : %ld : %lx)\n",\
#label, (unsigned long)GOTO__ret, (signed long)GOTO__ret, \
(signed long)GOTO__ret); \
goto label; \

View File

@ -27,18 +27,18 @@
#include <sys/condvar.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_CONDVAR
#define SS_DEBUG_SUBSYS SS_CONDVAR
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
int flags = KM_SLEEP;
ENTRY;
SENTRY;
ASSERT(cvp);
ASSERT(name);
ASSERT(type == CV_DEFAULT);
@ -62,14 +62,14 @@ __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
if (cvp->cv_name)
strcpy(cvp->cv_name, name);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__cv_init);
void
__cv_destroy(kcondvar_t *cvp)
{
ENTRY;
SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
@ -81,7 +81,7 @@ __cv_destroy(kcondvar_t *cvp)
spin_unlock(&cvp->cv_lock);
memset(cvp, CV_POISON, sizeof(*cvp));
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__cv_destroy);
@ -89,7 +89,7 @@ static void
cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state)
{
DEFINE_WAIT(wait);
ENTRY;
SENTRY;
ASSERT(cvp);
ASSERT(mp);
@ -116,7 +116,7 @@ cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state)
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
EXIT;
SEXIT;
}
void
@ -141,7 +141,7 @@ __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
{
DEFINE_WAIT(wait);
clock_t time_left;
ENTRY;
SENTRY;
ASSERT(cvp);
ASSERT(mp);
@ -159,7 +159,7 @@ __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
/* XXX - Does not handle jiffie wrap properly */
time_left = expire_time - jiffies;
if (time_left <= 0)
RETURN(-1);
SRETURN(-1);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
TASK_UNINTERRUPTIBLE);
@ -175,14 +175,14 @@ __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
RETURN(time_left > 0 ? time_left : -1);
SRETURN(time_left > 0 ? time_left : -1);
}
EXPORT_SYMBOL(__cv_timedwait);
void
__cv_signal(kcondvar_t *cvp)
{
ENTRY;
SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
@ -193,7 +193,7 @@ __cv_signal(kcondvar_t *cvp)
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up(&cvp->cv_event);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__cv_signal);
@ -202,13 +202,13 @@ __cv_broadcast(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ENTRY;
SENTRY;
/* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set. */
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up_all(&cvp->cv_event);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__cv_broadcast);

View File

@ -41,23 +41,23 @@
#include <spl-trace.h>
#include <spl-ctl.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_DEBUG
#define SS_DEBUG_SUBSYS SS_DEBUG
unsigned long spl_debug_subsys = ~0;
EXPORT_SYMBOL(spl_debug_subsys);
module_param(spl_debug_subsys, ulong, 0644);
MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
unsigned long spl_debug_mask = (D_EMERG | D_ERROR | D_WARNING | D_CONSOLE);
unsigned long spl_debug_mask = SD_CANTMASK;
EXPORT_SYMBOL(spl_debug_mask);
module_param(spl_debug_mask, ulong, 0644);
MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
unsigned long spl_debug_printk = D_CANTMASK;
unsigned long spl_debug_printk = SD_CANTMASK;
EXPORT_SYMBOL(spl_debug_printk);
module_param(spl_debug_printk, ulong, 0644);
MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
@ -120,40 +120,60 @@ spl_debug_subsys2str(int subsys)
switch (subsys) {
default:
return NULL;
case S_UNDEFINED:
case SS_UNDEFINED:
return "undefined";
case S_ATOMIC:
case SS_ATOMIC:
return "atomic";
case S_KOBJ:
case SS_KOBJ:
return "kobj";
case S_VNODE:
case SS_VNODE:
return "vnode";
case S_TIME:
case SS_TIME:
return "time";
case S_RWLOCK:
case SS_RWLOCK:
return "rwlock";
case S_THREAD:
case SS_THREAD:
return "thread";
case S_CONDVAR:
case SS_CONDVAR:
return "condvar";
case S_MUTEX:
case SS_MUTEX:
return "mutex";
case S_RNG:
case SS_RNG:
return "rng";
case S_TASKQ:
case SS_TASKQ:
return "taskq";
case S_KMEM:
case SS_KMEM:
return "kmem";
case S_DEBUG:
case SS_DEBUG:
return "debug";
case S_GENERIC:
case SS_GENERIC:
return "generic";
case S_PROC:
case SS_PROC:
return "proc";
case S_MODULE:
case SS_MODULE:
return "module";
case S_CRED:
case SS_CRED:
return "cred";
case SS_KSTAT:
return "kstat";
case SS_XDR:
return "xdr";
case SS_USER1:
return "user1";
case SS_USER2:
return "user2";
case SS_USER3:
return "user3";
case SS_USER4:
return "user4";
case SS_USER5:
return "user5";
case SS_USER6:
return "user6";
case SS_USER7:
return "user7";
case SS_USER8:
return "user8";
}
}
@ -163,23 +183,23 @@ spl_debug_dbg2str(int debug)
switch (debug) {
default:
return NULL;
case D_TRACE:
case SD_TRACE:
return "trace";
case D_INFO:
case SD_INFO:
return "info";
case D_WARNING:
case SD_WARNING:
return "warning";
case D_ERROR:
case SD_ERROR:
return "error";
case D_EMERG:
case SD_EMERG:
return "emerg";
case D_CONSOLE:
case SD_CONSOLE:
return "console";
case D_IOCTL:
case SD_IOCTL:
return "ioctl";
case D_DPRINTF:
case SD_DPRINTF:
return "dprintf";
case D_OTHER:
case SD_OTHER:
return "other";
}
}
@ -493,21 +513,21 @@ trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
{
char *prefix = "SPL", *ptype = NULL;
if ((mask & D_EMERG) != 0) {
if ((mask & SD_EMERG) != 0) {
prefix = "SPLError";
ptype = KERN_EMERG;
} else if ((mask & D_ERROR) != 0) {
} else if ((mask & SD_ERROR) != 0) {
prefix = "SPLError";
ptype = KERN_ERR;
} else if ((mask & D_WARNING) != 0) {
} else if ((mask & SD_WARNING) != 0) {
prefix = "SPL";
ptype = KERN_WARNING;
} else if ((mask & (D_CONSOLE | spl_debug_printk)) != 0) {
} else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
prefix = "SPL";
ptype = KERN_INFO;
}
if ((mask & D_CONSOLE) != 0) {
if ((mask & SD_CONSOLE) != 0) {
printk("%s%s: %.*s", ptype, prefix, len, buf);
} else {
printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
@ -652,10 +672,10 @@ spl_debug_msg(void *arg, int subsys, int mask, const char *file,
int remain;
if (subsys == 0)
subsys = DEBUG_SUBSYSTEM;
subsys = SS_DEBUG_SUBSYS;
if (mask == 0)
mask = D_EMERG;
mask = SD_EMERG;
if (strchr(file, '/'))
file = strrchr(file, '/') + 1;
@ -685,7 +705,7 @@ spl_debug_msg(void *arg, int subsys, int mask, const char *file,
tage = trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
if (needed + known_size > PAGE_SIZE)
mask |= D_ERROR;
mask |= SD_ERROR;
trace_put_tcd(tcd);
tcd = NULL;
@ -698,7 +718,7 @@ spl_debug_msg(void *arg, int subsys, int mask, const char *file,
max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
mask |= D_ERROR;
mask |= SD_ERROR;
trace_put_tcd(tcd);
tcd = NULL;
goto console;
@ -1069,7 +1089,7 @@ EXPORT_SYMBOL(spl_debug_dumpstack);
void spl_debug_bug(char *file, const char *func, const int line, int flags)
{
spl_debug_catastrophe = 1;
spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SPL PANIC\n");
spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
if (in_interrupt())
panic("SPL PANIC in interrupt.\n");
@ -1104,9 +1124,9 @@ EXPORT_SYMBOL(spl_debug_clear_buffer);
int
spl_debug_mark_buffer(char *text)
{
CDEBUG(D_WARNING, "*************************************\n");
CDEBUG(D_WARNING, "DEBUG MARKER: %s\n", text);
CDEBUG(D_WARNING, "*************************************\n");
SDEBUG(SD_WARNING, "*************************************\n");
SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
SDEBUG(SD_WARNING, "*************************************\n");
return 0;
}

View File

@ -28,11 +28,11 @@
#include <sys/cmn_err.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_GENERIC
#define SS_DEBUG_SUBSYS SS_GENERIC
#ifndef NDEBUG
static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
@ -61,10 +61,10 @@ vcmn_err(int ce, const char *fmt, va_list ap)
vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
if (fmt[0] == '!')
CDEBUG(D_INFO, "%s%s%s",
SDEBUG(SD_INFO, "%s%s%s",
ce_prefix[ce], msg, ce_suffix[ce]);
else
CERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
SERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
}
} /* vcmn_err() */
EXPORT_SYMBOL(vcmn_err);

View File

@ -41,11 +41,11 @@
#include <linux/proc_compat.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_GENERIC
#define SS_DEBUG_SUBSYS SS_GENERIC
char spl_version[16] = "SPL v" SPL_META_VERSION;
EXPORT_SYMBOL(spl_version);
@ -67,10 +67,10 @@ int
highbit(unsigned long i)
{
register int h = 1;
ENTRY;
SENTRY;
if (i == 0)
RETURN(0);
SRETURN(0);
#if BITS_PER_LONG == 64
if (i & 0xffffffff00000000ul) {
h += 32; i >>= 32;
@ -91,7 +91,7 @@ highbit(unsigned long i)
if (i & 0x2) {
h += 1;
}
RETURN(h);
SRETURN(h);
}
EXPORT_SYMBOL(highbit);
@ -447,39 +447,39 @@ __init spl_init(void)
return rc;
if ((rc = spl_kmem_init()))
GOTO(out1, rc);
SGOTO(out1, rc);
if ((rc = spl_mutex_init()))
GOTO(out2, rc);
SGOTO(out2, rc);
if ((rc = spl_rw_init()))
GOTO(out3, rc);
SGOTO(out3, rc);
if ((rc = spl_taskq_init()))
GOTO(out4, rc);
SGOTO(out4, rc);
if ((rc = vn_init()))
GOTO(out5, rc);
SGOTO(out5, rc);
if ((rc = proc_init()))
GOTO(out6, rc);
SGOTO(out6, rc);
if ((rc = kstat_init()))
GOTO(out7, rc);
SGOTO(out7, rc);
if ((rc = set_hostid()))
GOTO(out8, rc = -EADDRNOTAVAIL);
SGOTO(out8, rc = -EADDRNOTAVAIL);
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
if ((rc = set_kallsyms_lookup_name()))
GOTO(out8, rc = -EADDRNOTAVAIL);
SGOTO(out8, rc = -EADDRNOTAVAIL);
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
if ((rc = spl_kmem_init_kallsyms_lookup()))
GOTO(out8, rc);
SGOTO(out8, rc);
printk("SPL: Loaded Solaris Porting Layer v%s\n", SPL_META_VERSION);
RETURN(rc);
SRETURN(rc);
out8:
kstat_fini();
out7:
@ -505,7 +505,7 @@ out1:
static void
spl_fini(void)
{
ENTRY;
SENTRY;
printk("SPL: Unloaded Solaris Porting Layer v%s\n", SPL_META_VERSION);
kstat_fini();

View File

@ -27,11 +27,11 @@
#include <sys/kmem.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
# undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_KMEM
#define SS_DEBUG_SUBSYS SS_KMEM
/*
* The minimum amount of memory measured in pages to be free at all
@ -416,7 +416,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits,
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
ENTRY;
SENTRY;
spin_lock_irqsave(lock, flags);
@ -432,7 +432,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits,
spin_unlock_irqrestore(lock, flags);
RETURN(NULL);
SRETURN(NULL);
}
void *
@ -442,13 +442,13 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
ENTRY;
SENTRY;
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (dptr == NULL) {
CDEBUG_LIMIT(D_CONSOLE | D_WARNING, "debug "
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
"kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
sizeof(kmem_debug_t), flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
@ -456,7 +456,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
CDEBUG_LIMIT(D_CONSOLE | D_WARNING, "large "
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
"kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
@ -469,7 +469,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"debug kstrdup() at %s:%d failed (%lld/%llu)\n",
func, line, kmem_alloc_used_read(), kmem_alloc_max);
goto out;
@ -488,7 +488,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
CDEBUG_LIMIT(D_CONSOLE | D_WARNING, "kmem_alloc"
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
"(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
@ -512,13 +512,13 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line,
list_add_tail(&dptr->kd_list, &kmem_list);
spin_unlock_irqrestore(&kmem_lock, irq_flags);
CDEBUG_LIMIT(D_INFO,
SDEBUG_LIMIT(SD_INFO,
"kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
(unsigned long long) size, flags, func, line, ptr,
kmem_alloc_used_read(), kmem_alloc_max);
}
out:
RETURN(ptr);
SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_track);
@ -526,7 +526,7 @@ void
kmem_free_track(void *ptr, size_t size)
{
kmem_debug_t *dptr;
ENTRY;
SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
@ -541,7 +541,7 @@ kmem_free_track(void *ptr, size_t size)
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
kmem_alloc_used_sub(size);
CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
@ -553,7 +553,7 @@ kmem_free_track(void *ptr, size_t size)
memset(ptr, 0x5a, size);
kfree(ptr);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(kmem_free_track);
@ -563,14 +563,14 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
ENTRY;
SENTRY;
ASSERT(flags & KM_SLEEP);
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (dptr == NULL) {
CDEBUG_LIMIT(D_CONSOLE | D_WARNING, "debug "
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
"vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
sizeof(kmem_debug_t), flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
@ -581,7 +581,7 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"debug kstrdup() at %s:%d failed (%lld/%llu)\n",
func, line, vmem_alloc_used_read(), vmem_alloc_max);
goto out;
@ -593,7 +593,7 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
CDEBUG_LIMIT(D_CONSOLE | D_WARNING, "vmem_alloc"
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
"(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
@ -620,13 +620,13 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line)
list_add_tail(&dptr->kd_list, &vmem_list);
spin_unlock_irqrestore(&vmem_lock, irq_flags);
CDEBUG_LIMIT(D_INFO,
SDEBUG_LIMIT(SD_INFO,
"vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
ptr, vmem_alloc_used_read(), vmem_alloc_max);
}
out:
RETURN(ptr);
SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_track);
@ -634,7 +634,7 @@ void
vmem_free_track(void *ptr, size_t size)
{
kmem_debug_t *dptr;
ENTRY;
SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
@ -648,7 +648,7 @@ vmem_free_track(void *ptr, size_t size)
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
vmem_alloc_used_sub(size);
CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
@ -660,7 +660,7 @@ vmem_free_track(void *ptr, size_t size)
memset(ptr, 0x5a, size);
vfree(ptr);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(vmem_free_track);
@ -671,12 +671,12 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
int node_alloc, int node)
{
void *ptr;
ENTRY;
SENTRY;
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
CDEBUG(D_CONSOLE | D_WARNING,
SDEBUG(SD_CONSOLE | SD_WARNING,
"Large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
@ -694,7 +694,7 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
}
if (ptr == NULL) {
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
@ -703,32 +703,32 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
kmem_alloc_max = kmem_alloc_used_read();
CDEBUG_LIMIT(D_INFO,
SDEBUG_LIMIT(SD_INFO,
"kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
(unsigned long long) size, flags, func, line, ptr,
kmem_alloc_used_read(), kmem_alloc_max);
}
RETURN(ptr);
SRETURN(ptr);
}
EXPORT_SYMBOL(kmem_alloc_debug);
void
kmem_free_debug(void *ptr, size_t size)
{
ENTRY;
SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
kmem_alloc_used_sub(size);
CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, kmem_alloc_used_read(),
kmem_alloc_max);
memset(ptr, 0x5a, size);
kfree(ptr);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(kmem_free_debug);
@ -736,14 +736,14 @@ void *
vmem_alloc_debug(size_t size, int flags, const char *func, int line)
{
void *ptr;
ENTRY;
SENTRY;
ASSERT(flags & KM_SLEEP);
ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
PAGE_KERNEL);
if (ptr == NULL) {
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
@ -755,32 +755,32 @@ vmem_alloc_debug(size_t size, int flags, const char *func, int line)
if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
vmem_alloc_max = vmem_alloc_used_read();
CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
"(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
vmem_alloc_used_read(), vmem_alloc_max);
}
RETURN(ptr);
SRETURN(ptr);
}
EXPORT_SYMBOL(vmem_alloc_debug);
void
vmem_free_debug(void *ptr, size_t size)
{
ENTRY;
SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
vmem_alloc_used_sub(size);
CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
(unsigned long long) size, vmem_alloc_used_read(),
vmem_alloc_max);
memset(ptr, 0x5a, size);
vfree(ptr);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(vmem_free_debug);
@ -901,7 +901,7 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
RETURN(NULL);
SRETURN(NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
@ -920,7 +920,7 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
if (skc->skc_flags & KMC_OFFSLAB) {
obj = kv_alloc(skc, offslab_size, flags);
if (!obj)
GOTO(out, rc = -ENOMEM);
SGOTO(out, rc = -ENOMEM);
} else {
obj = base + spl_sks_size(skc) + (i * obj_size);
}
@ -948,7 +948,7 @@ out:
sks = NULL;
}
RETURN(sks);
SRETURN(sks);
}
/*
@ -961,7 +961,7 @@ spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ENTRY;
SENTRY;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
@ -982,7 +982,7 @@ spl_slab_free(spl_kmem_slab_t *sks,
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
EXIT;
SEXIT;
}
/*
@ -1002,7 +1002,7 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
LIST_HEAD(sko_list);
uint32_t size = 0;
int i = 0;
ENTRY;
SENTRY;
/*
* Move empty slabs and objects which have not been touched in
@ -1057,7 +1057,7 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
cond_resched();
}
EXIT;
SEXIT;
}
/*
@ -1136,7 +1136,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
RETURN(0);
SRETURN(0);
}
/*
@ -1147,10 +1147,10 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
*size = max_size;
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
RETURN(0);
SRETURN(0);
}
RETURN(-ENOSPC);
SRETURN(-ENOSPC);
}
/*
@ -1163,7 +1163,7 @@ spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
ENTRY;
SENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
@ -1177,7 +1177,7 @@ spl_magazine_size(spl_kmem_cache_t *skc)
else
size = 256;
RETURN(size);
SRETURN(size);
}
/*
@ -1189,7 +1189,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
ENTRY;
SENTRY;
skm = kmem_alloc_node(size, KM_SLEEP, node);
if (skm) {
@ -1202,7 +1202,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
skm->skm_age = jiffies;
}
RETURN(skm);
SRETURN(skm);
}
/*
@ -1214,12 +1214,12 @@ spl_magazine_free(spl_kmem_magazine_t *skm)
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skm->skm_size;
ENTRY;
SENTRY;
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kmem_free(skm, size);
EXIT;
SEXIT;
}
/*
@ -1229,7 +1229,7 @@ static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
ENTRY;
SENTRY;
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
@ -1240,7 +1240,7 @@ spl_magazine_create(spl_kmem_cache_t *skc)
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
RETURN(-ENOMEM);
SRETURN(-ENOMEM);
}
}
@ -1249,7 +1249,7 @@ spl_magazine_create(spl_kmem_cache_t *skc)
schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
skc->skc_delay / 3 * HZ);
RETURN(0);
SRETURN(0);
}
/*
@ -1260,7 +1260,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i;
ENTRY;
SENTRY;
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
@ -1268,7 +1268,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
spl_magazine_free(skm);
}
EXIT;
SEXIT;
}
/*
@ -1300,7 +1300,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
{
spl_kmem_cache_t *skc;
int rc, kmem_flags = KM_SLEEP;
ENTRY;
SENTRY;
ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
@ -1321,14 +1321,14 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
kmem_flags | KM_NODEBUG);
if (skc == NULL)
RETURN(NULL);
SRETURN(NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
RETURN(NULL);
SRETURN(NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
@ -1375,11 +1375,11 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
GOTO(out, rc);
SGOTO(out, rc);
rc = spl_magazine_create(skc);
if (rc)
GOTO(out, rc);
SGOTO(out, rc);
spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
@ -1388,11 +1388,11 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
RETURN(skc);
SRETURN(skc);
out:
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
RETURN(NULL);
SRETURN(NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
@ -1404,7 +1404,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
int i;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
@ -1442,7 +1442,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
kmem_free(skc, sizeof(*skc));
EXIT;
SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
@ -1495,7 +1495,7 @@ static spl_kmem_slab_t *
spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
local_irq_enable();
@ -1508,13 +1508,13 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags)
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
schedule();
GOTO(out, sks= NULL);
SGOTO(out, sks= NULL);
}
/* Allocate a new slab for the cache */
sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG);
if (sks == NULL)
GOTO(out, sks = NULL);
SGOTO(out, sks = NULL);
/* Link the new empty slab in to the end of skc_partial_list. */
spin_lock(&skc->skc_lock);
@ -1525,7 +1525,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags)
out:
local_irq_disable();
RETURN(sks);
SRETURN(sks);
}
/*
@ -1539,7 +1539,7 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int rc = 0, refill;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
@ -1554,11 +1554,11 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
sks = spl_cache_grow(skc, flags);
if (!sks)
GOTO(out, rc);
SGOTO(out, rc);
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
GOTO(out, rc);
SGOTO(out, rc);
/* Potentially rescheduled to the same CPU but
* allocations may have occured from this CPU while
@ -1594,7 +1594,7 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
spin_unlock(&skc->skc_lock);
out:
/* Returns the number of entries added to cache */
RETURN(rc);
SRETURN(rc);
}
/*
@ -1605,7 +1605,7 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
@ -1637,7 +1637,7 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
skc->skc_slab_alloc--;
}
EXIT;
SEXIT;
}
/*
@ -1651,7 +1651,7 @@ static int
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
int i, count = MIN(flush, skm->skm_avail);
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
@ -1673,7 +1673,7 @@ spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
spin_unlock(&skc->skc_lock);
RETURN(count);
SRETURN(count);
}
/*
@ -1686,7 +1686,7 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
spl_kmem_magazine_t *skm;
unsigned long irq_flags;
void *obj = NULL;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
@ -1712,7 +1712,7 @@ restart:
/* Per-CPU cache empty, directly allocate from
* the slab and refill the per-CPU cache. */
(void)spl_cache_refill(skc, skm, flags);
GOTO(restart, obj = NULL);
SGOTO(restart, obj = NULL);
}
local_irq_restore(irq_flags);
@ -1723,7 +1723,7 @@ restart:
prefetchw(obj);
atomic_dec(&skc->skc_ref);
RETURN(obj);
SRETURN(obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
@ -1738,7 +1738,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
@ -1762,7 +1762,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
local_irq_restore(flags);
atomic_dec(&skc->skc_ref);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_free);
@ -1814,14 +1814,14 @@ spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ENTRY;
SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/* Prevent concurrent cache reaping when contended */
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
EXIT;
SEXIT;
return;
}
@ -1834,7 +1834,7 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
atomic_dec(&skc->skc_ref);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
@ -1894,7 +1894,7 @@ static int
spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
{
int i;
ENTRY;
SENTRY;
spin_lock_init(lock);
INIT_LIST_HEAD(list);
@ -1902,7 +1902,7 @@ spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
for (i = 0; i < size; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
RETURN(0);
SRETURN(0);
}
static void
@ -1911,7 +1911,7 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
unsigned long flags;
kmem_debug_t *kd;
char str[17];
ENTRY;
SENTRY;
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
@ -1924,7 +1924,7 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
EXIT;
SEXIT;
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
#define spl_kmem_init_tracking(list, lock, size)
@ -2031,7 +2031,7 @@ int
spl_kmem_init(void)
{
int rc = 0;
ENTRY;
SENTRY;
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
@ -2040,7 +2040,7 @@ spl_kmem_init(void)
spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
spl_kmem_cache_generic_shrinker);
if (spl_kmem_cache_shrinker == NULL)
RETURN(rc = -ENOMEM);
SRETURN(rc = -ENOMEM);
#else
register_shrinker(&spl_kmem_cache_shrinker);
#endif
@ -2052,7 +2052,7 @@ spl_kmem_init(void)
spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
#endif
RETURN(rc);
SRETURN(rc);
}
void
@ -2064,20 +2064,20 @@ spl_kmem_fini(void)
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
if (kmem_alloc_used_read() != 0)
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"kmem leaked %ld/%ld bytes\n",
kmem_alloc_used_read(), kmem_alloc_max);
if (vmem_alloc_used_read() != 0)
CDEBUG_LIMIT(D_CONSOLE | D_WARNING,
SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
"vmem leaked %ld/%ld bytes\n",
vmem_alloc_used_read(), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
#endif /* DEBUG_KMEM */
ENTRY;
SENTRY;
#ifdef HAVE_SET_SHRINKER
remove_shrinker(spl_kmem_cache_shrinker);
@ -2085,5 +2085,5 @@ spl_kmem_fini(void)
unregister_shrinker(&spl_kmem_cache_shrinker);
#endif
EXIT;
SEXIT;
}

View File

@ -27,11 +27,11 @@
#include <sys/kobj.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_KOBJ
#define SS_DEBUG_SUBSYS SS_KOBJ
struct _buf *
kobj_open_file(const char *name)
@ -39,39 +39,39 @@ kobj_open_file(const char *name)
struct _buf *file;
vnode_t *vp;
int rc;
ENTRY;
SENTRY;
file = kmalloc(sizeof(_buf_t), GFP_KERNEL);
if (file == NULL)
RETURN((_buf_t *)-1UL);
SRETURN((_buf_t *)-1UL);
if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0))) {
kfree(file);
RETURN((_buf_t *)-1UL);
SRETURN((_buf_t *)-1UL);
}
file->vp = vp;
RETURN(file);
SRETURN(file);
} /* kobj_open_file() */
EXPORT_SYMBOL(kobj_open_file);
void
kobj_close_file(struct _buf *file)
{
ENTRY;
SENTRY;
VOP_CLOSE(file->vp, 0, 0, 0, 0, 0);
VN_RELE(file->vp);
kfree(file);
EXIT;
SEXIT;
} /* kobj_close_file() */
EXPORT_SYMBOL(kobj_close_file);
int
kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off)
{
ENTRY;
RETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
SENTRY;
SRETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL));
} /* kobj_read_file() */
EXPORT_SYMBOL(kobj_read_file);
@ -81,14 +81,14 @@ kobj_get_filesize(struct _buf *file, uint64_t *size)
{
vattr_t vap;
int rc;
ENTRY;
SENTRY;
rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL);
if (rc)
RETURN(rc);
SRETURN(rc);
*size = vap.va_size;
RETURN(rc);
SRETURN(rc);
} /* kobj_get_filesize() */
EXPORT_SYMBOL(kobj_get_filesize);

View File

@ -28,6 +28,12 @@
#include <sys/kstat.h>
#include <spl-debug.h>
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define SS_DEBUG_SUBSYS SS_KSTAT
static spinlock_t kstat_lock;
static struct list_head kstat_list;
static kid_t kstat_id;
@ -221,7 +227,7 @@ static void *
kstat_seq_data_addr(kstat_t *ksp, loff_t n)
{
void *rc = NULL;
ENTRY;
SENTRY;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
@ -243,7 +249,7 @@ kstat_seq_data_addr(kstat_t *ksp, loff_t n)
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
RETURN(rc);
SRETURN(rc);
}
static void *
@ -252,7 +258,7 @@ kstat_seq_start(struct seq_file *f, loff_t *pos)
loff_t n = *pos;
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
ENTRY;
SENTRY;
spin_lock(&ksp->ks_lock);
ksp->ks_snaptime = gethrtime();
@ -261,9 +267,9 @@ kstat_seq_start(struct seq_file *f, loff_t *pos)
kstat_seq_show_headers(f);
if (n >= ksp->ks_ndata)
RETURN(NULL);
SRETURN(NULL);
RETURN(kstat_seq_data_addr(ksp, n));
SRETURN(kstat_seq_data_addr(ksp, n));
}
static void *
@ -271,13 +277,13 @@ kstat_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
ENTRY;
SENTRY;
++*pos;
if (*pos >= ksp->ks_ndata)
RETURN(NULL);
SRETURN(NULL);
RETURN(kstat_seq_data_addr(ksp, *pos));
SRETURN(kstat_seq_data_addr(ksp, *pos));
}
static void
@ -401,7 +407,7 @@ __kstat_install(kstat_t *ksp)
struct proc_dir_entry *de_module, *de_name;
kstat_t *tmp;
int rc = 0;
ENTRY;
SENTRY;
spin_lock(&kstat_lock);
@ -409,7 +415,7 @@ __kstat_install(kstat_t *ksp)
list_for_each_entry(tmp, &kstat_list, ks_list) {
if (tmp == ksp) {
spin_unlock(&kstat_lock);
GOTO(out, rc = -EEXIST);
SGOTO(out, rc = -EEXIST);
}
}
@ -420,12 +426,12 @@ __kstat_install(kstat_t *ksp)
if (de_module == NULL) {
de_module = proc_mkdir(ksp->ks_module, proc_spl_kstat);
if (de_module == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
}
de_name = create_proc_entry(ksp->ks_name, 0444, de_module);
if (de_name == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
spin_lock(&ksp->ks_lock);
ksp->ks_proc = de_name;
@ -439,7 +445,7 @@ out:
spin_unlock(&kstat_lock);
}
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__kstat_install);
@ -473,18 +479,18 @@ EXPORT_SYMBOL(__kstat_delete);
int
kstat_init(void)
{
ENTRY;
SENTRY;
spin_lock_init(&kstat_lock);
INIT_LIST_HEAD(&kstat_list);
kstat_id = 0;
RETURN(0);
SRETURN(0);
}
void
kstat_fini(void)
{
ENTRY;
SENTRY;
ASSERT(list_empty(&kstat_list));
EXIT;
SEXIT;
}

View File

@ -27,11 +27,11 @@
#include <sys/sunddi.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_MODULE
#define SS_DEBUG_SUBSYS SS_MODULE
static spinlock_t dev_info_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(dev_info_list);
@ -98,7 +98,7 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
struct cb_ops *cb_ops;
struct file_operations *fops;
int rc;
ENTRY;
SENTRY;
ASSERT(spec_type == S_IFCHR);
ASSERT(minor_num < di->di_minors);
@ -106,12 +106,12 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
if (fops == NULL)
RETURN(DDI_FAILURE);
SRETURN(DDI_FAILURE);
cdev = cdev_alloc();
if (cdev == NULL) {
kfree(fops);
RETURN(DDI_FAILURE);
SRETURN(DDI_FAILURE);
}
cdev->ops = fops;
@ -169,11 +169,11 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
rc = cdev_add(cdev, di->di_dev, 1);
if (rc) {
CERROR("Error adding cdev, %d\n", rc);
SERROR("Error adding cdev, %d\n", rc);
kfree(fops);
cdev_del(cdev);
mutex_exit(&di->di_lock);
RETURN(DDI_FAILURE);
SRETURN(DDI_FAILURE);
}
spin_lock(&dev_info_lock);
@ -182,7 +182,7 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
mutex_exit(&di->di_lock);
RETURN(DDI_SUCCESS);
SRETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__ddi_create_minor_node);
@ -202,18 +202,18 @@ __ddi_remove_minor_node_locked(dev_info_t *di, char *name)
void
__ddi_remove_minor_node(dev_info_t *di, char *name)
{
ENTRY;
SENTRY;
mutex_enter(&di->di_lock);
__ddi_remove_minor_node_locked(di, name);
mutex_exit(&di->di_lock);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__ddi_remove_minor_node);
int
ddi_quiesce_not_needed(dev_info_t *dip)
{
RETURN(DDI_SUCCESS);
SRETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(ddi_quiesce_not_needed);
@ -280,12 +280,12 @@ __mod_install(struct modlinkage *modlp)
struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di;
int rc;
ENTRY;
SENTRY;
di = dev_info_alloc(modlp->ml_major, modlp->ml_minors,
drv->drv_dev_ops);
if (di == NULL)
RETURN(ENOMEM);
SRETURN(ENOMEM);
/* XXX: Really we need to be calling devo_probe if it's available
* and then calling devo_attach for each device discovered. However
@ -294,12 +294,12 @@ __mod_install(struct modlinkage *modlp)
rc = drv->drv_dev_ops->devo_attach(di, DDI_ATTACH);
if (rc != DDI_SUCCESS) {
dev_info_free(di);
RETURN(rc);
SRETURN(rc);
}
drv->drv_dev_info = di;
RETURN(DDI_SUCCESS);
SRETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__mod_install);
@ -333,16 +333,16 @@ __mod_remove(struct modlinkage *modlp)
struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di = drv->drv_dev_info;
int rc;
ENTRY;
SENTRY;
rc = drv->drv_dev_ops->devo_detach(di, DDI_DETACH);
if (rc != DDI_SUCCESS)
RETURN(rc);
SRETURN(rc);
dev_info_free(di);
drv->drv_dev_info = NULL;
RETURN(DDI_SUCCESS);
SRETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__mod_remove);
@ -350,28 +350,28 @@ int
ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
{
ldi_ident_t li;
ENTRY;
SENTRY;
ASSERT(modlp);
ASSERT(lip);
li = kmalloc(sizeof(struct ldi_ident), GFP_KERNEL);
if (li == NULL)
RETURN(ENOMEM);
SRETURN(ENOMEM);
li->li_dev = MKDEV(modlp->ml_major, 0);
*lip = li;
RETURN(0);
SRETURN(0);
}
EXPORT_SYMBOL(ldi_ident_from_mod);
void
ldi_ident_release(ldi_ident_t lip)
{
ENTRY;
SENTRY;
ASSERT(lip);
kfree(lip);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(ldi_ident_release);

View File

@ -31,11 +31,11 @@
#include <linux/proc_compat.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_PROC
#define SS_DEBUG_SUBSYS SS_PROC
#ifdef DEBUG_KMEM
static unsigned long table_min = 0;
@ -217,21 +217,21 @@ SPL_PROC_HANDLER(proc_dobitmasks)
int is_printk = (mask == &spl_debug_printk) ? 1 : 0;
int size = 512, rc;
char *str;
ENTRY;
SENTRY;
str = kmem_alloc(size, KM_SLEEP);
if (str == NULL)
RETURN(-ENOMEM);
SRETURN(-ENOMEM);
if (write) {
rc = proc_copyin_string(str, size, buffer, *lenp);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
rc = spl_debug_str2mask(mask, str, is_subsys);
/* Always print BUG/ASSERT to console, so keep this mask */
if (is_printk)
*mask |= D_EMERG;
*mask |= SD_EMERG;
*ppos += *lenp;
} else {
@ -248,19 +248,19 @@ SPL_PROC_HANDLER(proc_dobitmasks)
}
kmem_free(str, size);
RETURN(rc);
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_debug_mb)
{
char str[32];
int rc, len;
ENTRY;
SENTRY;
if (write) {
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
rc = spl_debug_set_mb(simple_strtoul(str, NULL, 0));
*ppos += *lenp;
@ -277,12 +277,12 @@ SPL_PROC_HANDLER(proc_debug_mb)
}
}
RETURN(rc);
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_dump_kernel)
{
ENTRY;
SENTRY;
if (write) {
spl_debug_dumplog(0);
@ -291,19 +291,19 @@ SPL_PROC_HANDLER(proc_dump_kernel)
*lenp = 0;
}
RETURN(0);
SRETURN(0);
}
SPL_PROC_HANDLER(proc_force_bug)
{
ENTRY;
SENTRY;
if (write)
PANIC("Crashing due to forced panic\n");
else
*lenp = 0;
RETURN(0);
SRETURN(0);
}
SPL_PROC_HANDLER(proc_console_max_delay_cs)
@ -311,7 +311,7 @@ SPL_PROC_HANDLER(proc_console_max_delay_cs)
int rc, max_delay_cs;
struct ctl_table dummy = *table;
long d;
ENTRY;
SENTRY;
dummy.data = &max_delay_cs;
dummy.proc_handler = &proc_dointvec;
@ -320,14 +320,14 @@ SPL_PROC_HANDLER(proc_console_max_delay_cs)
max_delay_cs = 0;
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
if (max_delay_cs <= 0)
RETURN(-EINVAL);
SRETURN(-EINVAL);
d = (max_delay_cs * HZ) / 100;
if (d == 0 || d < spl_console_min_delay)
RETURN(-EINVAL);
SRETURN(-EINVAL);
spl_console_max_delay = d;
} else {
@ -335,7 +335,7 @@ SPL_PROC_HANDLER(proc_console_max_delay_cs)
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
}
RETURN(rc);
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_console_min_delay_cs)
@ -343,7 +343,7 @@ SPL_PROC_HANDLER(proc_console_min_delay_cs)
int rc, min_delay_cs;
struct ctl_table dummy = *table;
long d;
ENTRY;
SENTRY;
dummy.data = &min_delay_cs;
dummy.proc_handler = &proc_dointvec;
@ -352,14 +352,14 @@ SPL_PROC_HANDLER(proc_console_min_delay_cs)
min_delay_cs = 0;
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
if (min_delay_cs <= 0)
RETURN(-EINVAL);
SRETURN(-EINVAL);
d = (min_delay_cs * HZ) / 100;
if (d == 0 || d > spl_console_max_delay)
RETURN(-EINVAL);
SRETURN(-EINVAL);
spl_console_min_delay = d;
} else {
@ -367,14 +367,14 @@ SPL_PROC_HANDLER(proc_console_min_delay_cs)
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
}
RETURN(rc);
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_console_backoff)
{
int rc, backoff;
struct ctl_table dummy = *table;
ENTRY;
SENTRY;
dummy.data = &backoff;
dummy.proc_handler = &proc_dointvec;
@ -383,10 +383,10 @@ SPL_PROC_HANDLER(proc_console_backoff)
backoff = 0;
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
if (backoff <= 0)
RETURN(-EINVAL);
SRETURN(-EINVAL);
spl_console_backoff = backoff;
} else {
@ -394,7 +394,7 @@ SPL_PROC_HANDLER(proc_console_backoff)
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
}
RETURN(rc);
SRETURN(rc);
}
#ifdef DEBUG_KMEM
@ -403,7 +403,7 @@ SPL_PROC_HANDLER(proc_domemused)
int rc = 0;
unsigned long min = 0, max = ~0, val;
struct ctl_table dummy = *table;
ENTRY;
SENTRY;
dummy.data = &val;
dummy.proc_handler = &proc_dointvec;
@ -422,7 +422,7 @@ SPL_PROC_HANDLER(proc_domemused)
buffer, lenp, ppos);
}
RETURN(rc);
SRETURN(rc);
}
#endif /* DEBUG_KMEM */
@ -431,7 +431,7 @@ SPL_PROC_HANDLER(proc_dohostid)
int len, rc = 0;
int32_t val;
char *end, str[32];
ENTRY;
SENTRY;
if (write) {
/* We can't use spl_proc_doulongvec_minmax() in the write
@ -439,11 +439,11 @@ SPL_PROC_HANDLER(proc_dohostid)
* leading 0x which confuses the helper function. */
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
val = simple_strtol(str, &end, 16);
if (str == end)
RETURN(-EINVAL);
SRETURN(-EINVAL);
spl_hostid = (long) val;
(void) snprintf(hw_serial, HW_HOSTID_LEN, "%u",
@ -463,7 +463,7 @@ SPL_PROC_HANDLER(proc_dohostid)
}
}
RETURN(rc);
SRETURN(rc);
}
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
@ -471,24 +471,24 @@ SPL_PROC_HANDLER(proc_dokallsyms_lookup_name)
{
int len, rc = 0;
char *end, str[32];
ENTRY;
SENTRY;
if (write) {
/* This may only be set once at module load time */
if (spl_kallsyms_lookup_name_fn != SYMBOL_POISON)
RETURN(-EEXIST);
SRETURN(-EEXIST);
/* We can't use spl_proc_doulongvec_minmax() in the write
* case hear because the address while a hex value has no
* leading 0x which confuses the helper function. */
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
if (rc < 0)
RETURN(rc);
SRETURN(rc);
spl_kallsyms_lookup_name_fn =
(kallsyms_lookup_name_t)simple_strtoul(str, &end, 16);
if (str == end)
RETURN(-EINVAL);
SRETURN(-EINVAL);
*ppos += *lenp;
} else {
@ -505,7 +505,7 @@ SPL_PROC_HANDLER(proc_dokallsyms_lookup_name)
}
}
RETURN(rc);
SRETURN(rc);
}
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
@ -513,7 +513,7 @@ SPL_PROC_HANDLER(proc_doavailrmem)
{
int len, rc = 0;
char str[32];
ENTRY;
SENTRY;
if (write) {
*ppos += *lenp;
@ -531,14 +531,14 @@ SPL_PROC_HANDLER(proc_doavailrmem)
}
}
RETURN(rc);
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_dofreemem)
{
int len, rc = 0;
char str[32];
ENTRY;
SENTRY;
if (write) {
*ppos += *lenp;
@ -555,7 +555,7 @@ SPL_PROC_HANDLER(proc_dofreemem)
}
}
RETURN(rc);
SRETURN(rc);
}
#ifdef DEBUG_KMEM
@ -605,7 +605,7 @@ slab_seq_start(struct seq_file *f, loff_t *pos)
{
struct list_head *p;
loff_t n = *pos;
ENTRY;
SENTRY;
down_read(&spl_kmem_cache_sem);
if (!n)
@ -615,20 +615,20 @@ slab_seq_start(struct seq_file *f, loff_t *pos)
while (n--) {
p = p->next;
if (p == &spl_kmem_cache_list)
RETURN(NULL);
SRETURN(NULL);
}
RETURN(list_entry(p, spl_kmem_cache_t, skc_list));
SRETURN(list_entry(p, spl_kmem_cache_t, skc_list));
}
static void *
slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
spl_kmem_cache_t *skc = p;
ENTRY;
SENTRY;
++*pos;
RETURN((skc->skc_list.next == &spl_kmem_cache_list) ?
SRETURN((skc->skc_list.next == &spl_kmem_cache_list) ?
NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
}
@ -1025,33 +1025,33 @@ int
proc_init(void)
{
int rc = 0;
ENTRY;
SENTRY;
#ifdef CONFIG_SYSCTL
spl_header = spl_register_sysctl_table(spl_root, 0);
if (spl_header == NULL)
RETURN(-EUNATCH);
SRETURN(-EUNATCH);
#endif /* CONFIG_SYSCTL */
proc_spl = proc_mkdir("spl", NULL);
if (proc_spl == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
#ifdef DEBUG_KMEM
proc_spl_kmem = proc_mkdir("kmem", proc_spl);
if (proc_spl_kmem == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
proc_spl_kmem_slab = create_proc_entry("slab", 0444, proc_spl_kmem);
if (proc_spl_kmem_slab == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
proc_spl_kmem_slab->proc_fops = &proc_slab_operations;
#endif /* DEBUG_KMEM */
proc_spl_kstat = proc_mkdir("kstat", proc_spl);
if (proc_spl_kstat == NULL)
GOTO(out, rc = -EUNATCH);
SGOTO(out, rc = -EUNATCH);
out:
if (rc) {
remove_proc_entry("kstat", proc_spl);
@ -1065,13 +1065,13 @@ out:
#endif /* CONFIG_SYSCTL */
}
RETURN(rc);
SRETURN(rc);
}
void
proc_fini(void)
{
ENTRY;
SENTRY;
remove_proc_entry("kstat", proc_spl);
#ifdef DEBUG_KMEM
@ -1085,5 +1085,5 @@ proc_fini(void)
spl_unregister_sysctl_table(spl_header);
#endif /* CONFIG_SYSCTL */
EXIT;
SEXIT;
}

View File

@ -28,11 +28,11 @@
#include <sys/kmem.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_TASKQ
#define SS_DEBUG_SUBSYS SS_TASKQ
/* Global system-wide dynamic task queue available for all consumers */
taskq_t *system_taskq;
@ -55,7 +55,7 @@ task_alloc(taskq_t *tq, uint_t flags)
{
spl_task_t *t;
int count = 0;
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */
@ -66,17 +66,17 @@ retry:
if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
list_del_init(&t->t_list);
RETURN(t);
SRETURN(t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
RETURN(NULL);
SRETURN(NULL);
/* Hit maximum spl_task_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
RETURN(NULL);
SRETURN(NULL);
/* Sleep periodically polling the free list for an available
* spl_task_t. If a full second passes and we have not found
@ -86,9 +86,9 @@ retry:
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
if (count < 100)
GOTO(retry, count++);
SGOTO(retry, count++);
RETURN(NULL);
SRETURN(NULL);
}
/* Unreachable, Neither TQ_SLEEP or TQ_NOSLEEP set */
@ -108,7 +108,7 @@ retry:
tq->tq_nalloc++;
}
RETURN(t);
SRETURN(t);
}
/*
@ -118,7 +118,7 @@ retry:
static void
task_free(taskq_t *tq, spl_task_t *t)
{
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(t);
@ -128,7 +128,7 @@ task_free(taskq_t *tq, spl_task_t *t)
kmem_free(t, sizeof(spl_task_t));
tq->tq_nalloc--;
EXIT;
SEXIT;
}
/*
@ -138,7 +138,7 @@ task_free(taskq_t *tq, spl_task_t *t)
static void
task_done(taskq_t *tq, spl_task_t *t)
{
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
@ -154,7 +154,7 @@ task_done(taskq_t *tq, spl_task_t *t)
task_free(tq, t);
}
EXIT;
SEXIT;
}
/*
@ -190,18 +190,18 @@ taskq_wait_check(taskq_t *tq, taskqid_t id)
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
RETURN(rc);
SRETURN(rc);
}
void
__taskq_wait_id(taskq_t *tq, taskqid_t id)
{
ENTRY;
SENTRY;
ASSERT(tq);
wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id));
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__taskq_wait_id);
@ -209,7 +209,7 @@ void
__taskq_wait(taskq_t *tq)
{
taskqid_t id;
ENTRY;
SENTRY;
ASSERT(tq);
/* Wait for the largest outstanding taskqid */
@ -219,7 +219,7 @@ __taskq_wait(taskq_t *tq)
__taskq_wait_id(tq, id);
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__taskq_wait);
@ -228,16 +228,16 @@ int
__taskq_member(taskq_t *tq, void *t)
{
int i;
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(t);
for (i = 0; i < tq->tq_nthreads; i++)
if (tq->tq_threads[i] == (struct task_struct *)t)
RETURN(1);
SRETURN(1);
RETURN(0);
SRETURN(0);
}
EXPORT_SYMBOL(__taskq_member);
@ -246,7 +246,7 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
spl_task_t *t;
taskqid_t rc = 0;
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(func);
@ -263,15 +263,15 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
GOTO(out, rc = 0);
SGOTO(out, rc = 0);
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
GOTO(out, rc = 0);
SGOTO(out, rc = 0);
if ((t = task_alloc(tq, flags)) == NULL)
GOTO(out, rc = 0);
SGOTO(out, rc = 0);
spin_lock(&t->t_lock);
@ -290,7 +290,7 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
RETURN(rc);
SRETURN(rc);
}
EXPORT_SYMBOL(__taskq_dispatch);
@ -305,7 +305,7 @@ taskq_lowest_id(taskq_t *tq)
{
taskqid_t lowest_id = tq->tq_next_id;
spl_task_t *t;
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
@ -325,7 +325,7 @@ taskq_lowest_id(taskq_t *tq)
lowest_id = MIN(lowest_id, t->t_id);
}
RETURN(lowest_id);
SRETURN(lowest_id);
}
/*
@ -338,7 +338,7 @@ taskq_insert_in_order(taskq_t *tq, spl_task_t *t)
spl_task_t *w;
struct list_head *l;
ENTRY;
SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
@ -353,7 +353,7 @@ taskq_insert_in_order(taskq_t *tq, spl_task_t *t)
if (l == &tq->tq_work_list)
list_add(&t->t_list, &tq->tq_work_list);
EXIT;
SEXIT;
}
static int
@ -365,7 +365,7 @@ taskq_thread(void *args)
taskq_t *tq = args;
spl_task_t *t;
struct list_head *pend_list;
ENTRY;
SENTRY;
ASSERT(tq);
current->flags |= PF_NOFREEZE;
@ -433,7 +433,7 @@ taskq_thread(void *args)
tq->tq_nthreads--;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
RETURN(0);
SRETURN(0);
}
taskq_t *
@ -443,7 +443,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
taskq_t *tq;
struct task_struct *t;
int rc = 0, i, j = 0;
ENTRY;
SENTRY;
ASSERT(name != NULL);
ASSERT(pri <= maxclsyspri);
@ -462,12 +462,12 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
if (tq == NULL)
RETURN(NULL);
SRETURN(NULL);
tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP);
if (tq->tq_threads == NULL) {
kmem_free(tq, sizeof(*tq));
RETURN(NULL);
SRETURN(NULL);
}
spin_lock_init(&tq->tq_lock);
@ -517,7 +517,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
tq = NULL;
}
RETURN(tq);
SRETURN(tq);
}
EXPORT_SYMBOL(__taskq_create);
@ -526,7 +526,7 @@ __taskq_destroy(taskq_t *tq)
{
spl_task_t *t;
int i, nthreads;
ENTRY;
SENTRY;
ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
@ -560,29 +560,29 @@ __taskq_destroy(taskq_t *tq)
kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *));
kmem_free(tq, sizeof(taskq_t));
EXIT;
SEXIT;
}
EXPORT_SYMBOL(__taskq_destroy);
int
spl_taskq_init(void)
{
ENTRY;
SENTRY;
/* Solaris creates a dynamic taskq of up to 64 threads, however in
* a Linux environment 1 thread per-core is usually about right */
system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
minclsyspri, 4, 512, TASKQ_PREPOPULATE);
if (system_taskq == NULL)
RETURN(1);
SRETURN(1);
RETURN(0);
SRETURN(0);
}
void
spl_taskq_fini(void)
{
ENTRY;
SENTRY;
taskq_destroy(system_taskq);
EXIT;
SEXIT;
}

View File

@ -28,11 +28,11 @@
#include <sys/kmem.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_THREAD
#define SS_DEBUG_SUBSYS SS_THREAD
/*
* Thread interfaces
@ -72,8 +72,8 @@ thread_generic_wrapper(void *arg)
void
__thread_exit(void)
{
ENTRY;
EXIT;
SENTRY;
SEXIT;
complete_and_exit(NULL, 0);
/* Unreachable */
}
@ -90,7 +90,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
thread_priv_t *tp;
struct task_struct *tsk;
char *p;
ENTRY;
SENTRY;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
@ -98,7 +98,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tp = kmem_alloc(sizeof(thread_priv_t), KM_SLEEP);
if (tp == NULL)
RETURN(NULL);
SRETURN(NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_name_size = strlen(name) + 1;
@ -106,7 +106,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tp->tp_name = kmem_alloc(tp->tp_name_size, KM_SLEEP);
if (tp->tp_name == NULL) {
kmem_free(tp, sizeof(thread_priv_t));
RETURN(NULL);
SRETURN(NULL);
}
strncpy(tp->tp_name, name, tp->tp_name_size);
@ -127,11 +127,11 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tsk = kthread_create(thread_generic_wrapper, (void *)tp,
"%s", tp->tp_name);
if (IS_ERR(tsk)) {
CERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
RETURN(NULL);
SERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
SRETURN(NULL);
}
wake_up_process(tsk);
RETURN((kthread_t *)tsk);
SRETURN((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);

View File

@ -27,11 +27,11 @@
#include <sys/vnode.h>
#include <spl-debug.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define DEBUG_SUBSYSTEM S_VNODE
#define SS_DEBUG_SUBSYS SS_VNODE
vnode_t *rootdir = (vnode_t *)0xabcd1234;
EXPORT_SYMBOL(rootdir);
@ -76,7 +76,7 @@ vnode_t *
vn_alloc(int flag)
{
vnode_t *vp;
ENTRY;
SENTRY;
vp = kmem_cache_alloc(vn_cache, flag);
if (vp != NULL) {
@ -84,16 +84,16 @@ vn_alloc(int flag)
vp->v_type = 0;
}
RETURN(vp);
SRETURN(vp);
} /* vn_alloc() */
EXPORT_SYMBOL(vn_alloc);
void
vn_free(vnode_t *vp)
{
ENTRY;
SENTRY;
kmem_cache_free(vn_cache, vp);
EXIT;
SEXIT;
} /* vn_free() */
EXPORT_SYMBOL(vn_free);
@ -105,7 +105,7 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
struct kstat stat;
int rc, saved_umask = 0;
vnode_t *vp;
ENTRY;
SENTRY;
ASSERT(flags & (FWRITE | FREAD));
ASSERT(seg == UIO_SYSSPACE);
@ -131,18 +131,18 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
(void)xchg(&current->fs->umask, saved_umask);
if (IS_ERR(fp))
RETURN(-PTR_ERR(fp));
SRETURN(-PTR_ERR(fp));
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc) {
filp_close(fp, 0);
RETURN(-rc);
SRETURN(-rc);
}
vp = vn_alloc(KM_SLEEP);
if (!vp) {
filp_close(fp, 0);
RETURN(ENOMEM);
SRETURN(ENOMEM);
}
mutex_enter(&vp->v_lock);
@ -151,7 +151,7 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
*vpp = vp;
mutex_exit(&vp->v_lock);
RETURN(0);
SRETURN(0);
} /* vn_open() */
EXPORT_SYMBOL(vn_open);
@ -161,20 +161,20 @@ vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
{
char *realpath;
int len, rc;
ENTRY;
SENTRY;
ASSERT(vp == rootdir);
len = strlen(path) + 2;
realpath = kmalloc(len, GFP_KERNEL);
if (!realpath)
RETURN(ENOMEM);
SRETURN(ENOMEM);
(void)snprintf(realpath, len, "/%s", path);
rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
kfree(realpath);
RETURN(rc);
SRETURN(rc);
} /* vn_openat() */
EXPORT_SYMBOL(vn_openat);
@ -186,7 +186,7 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
mm_segment_t saved_fs;
struct file *fp;
int rc;
ENTRY;
SENTRY;
ASSERT(uio == UIO_WRITE || uio == UIO_READ);
ASSERT(vp);
@ -215,16 +215,16 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
set_fs(saved_fs);
if (rc < 0)
RETURN(-rc);
SRETURN(-rc);
if (residp) {
*residp = len - rc;
} else {
if (rc != len)
RETURN(EIO);
SRETURN(EIO);
}
RETURN(0);
SRETURN(0);
} /* vn_rdwr() */
EXPORT_SYMBOL(vn_rdwr);
@ -232,7 +232,7 @@ int
vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
{
int rc;
ENTRY;
SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@ -240,7 +240,7 @@ vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
rc = filp_close(vp->v_file, 0);
vn_free(vp);
RETURN(-rc);
SRETURN(-rc);
} /* vn_close() */
EXPORT_SYMBOL(vn_close);
@ -275,18 +275,18 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
struct nameidata nd;
struct inode *inode = NULL;
int rc = 0;
ENTRY;
SENTRY;
ASSERT(seg == UIO_SYSSPACE);
ASSERT(flags == RMFILE);
rc = path_lookup(path, LOOKUP_PARENT, &nd);
if (rc)
GOTO(exit, rc);
SGOTO(exit, rc);
rc = -EISDIR;
if (nd.last_type != LAST_NORM)
GOTO(exit1, rc);
SGOTO(exit1, rc);
#ifdef HAVE_INODE_I_MUTEX
mutex_lock_nested(&nd.nd_dentry->d_inode->i_mutex, I_MUTEX_PARENT);
@ -298,7 +298,7 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct rc value */
if (nd.last.name[nd.last.len])
GOTO(slashes, rc);
SGOTO(slashes, rc);
inode = dentry->d_inode;
if (inode)
@ -321,12 +321,12 @@ exit2:
exit1:
vn_path_release(&nd);
exit:
RETURN(-rc);
SRETURN(-rc);
slashes:
rc = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
GOTO(exit2, rc);
SGOTO(exit2, rc);
} /* vn_remove() */
EXPORT_SYMBOL(vn_remove);
@ -339,28 +339,28 @@ vn_rename(const char *oldname, const char *newname, int x1)
struct dentry *trap;
struct nameidata oldnd, newnd;
int rc = 0;
ENTRY;
SENTRY;
rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
if (rc)
GOTO(exit, rc);
SGOTO(exit, rc);
rc = path_lookup(newname, LOOKUP_PARENT, &newnd);
if (rc)
GOTO(exit1, rc);
SGOTO(exit1, rc);
rc = -EXDEV;
if (oldnd.nd_mnt != newnd.nd_mnt)
GOTO(exit2, rc);
SGOTO(exit2, rc);
old_dir = oldnd.nd_dentry;
rc = -EBUSY;
if (oldnd.last_type != LAST_NORM)
GOTO(exit2, rc);
SGOTO(exit2, rc);
new_dir = newnd.nd_dentry;
if (newnd.last_type != LAST_NORM)
GOTO(exit2, rc);
SGOTO(exit2, rc);
trap = lock_rename(new_dir, old_dir);
@ -368,36 +368,36 @@ vn_rename(const char *oldname, const char *newname, int x1)
rc = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
GOTO(exit3, rc);
SGOTO(exit3, rc);
/* source must exist */
rc = -ENOENT;
if (!old_dentry->d_inode)
GOTO(exit4, rc);
SGOTO(exit4, rc);
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
rc = -ENOTDIR;
if (oldnd.last.name[oldnd.last.len])
GOTO(exit4, rc);
SGOTO(exit4, rc);
if (newnd.last.name[newnd.last.len])
GOTO(exit4, rc);
SGOTO(exit4, rc);
}
/* source should not be ancestor of target */
rc = -EINVAL;
if (old_dentry == trap)
GOTO(exit4, rc);
SGOTO(exit4, rc);
new_dentry = vn_lookup_hash(&newnd);
rc = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
GOTO(exit4, rc);
SGOTO(exit4, rc);
/* target should not be an ancestor of source */
rc = -ENOTEMPTY;
if (new_dentry == trap)
GOTO(exit5, rc);
SGOTO(exit5, rc);
#ifdef HAVE_4ARGS_VFS_RENAME
rc = vfs_rename(old_dir->d_inode, old_dentry,
@ -417,7 +417,7 @@ exit2:
exit1:
vn_path_release(&oldnd);
exit:
RETURN(-rc);
SRETURN(-rc);
}
EXPORT_SYMBOL(vn_rename);
@ -427,7 +427,7 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
struct file *fp;
struct kstat stat;
int rc;
ENTRY;
SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@ -437,7 +437,7 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc)
RETURN(-rc);
SRETURN(-rc);
vap->va_type = vn_get_sol_type(stat.mode);
vap->va_mode = stat.mode;
@ -457,14 +457,14 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
vap->va_rdev = stat.rdev;
vap->va_blocks = stat.blocks;
RETURN(0);
SRETURN(0);
}
EXPORT_SYMBOL(vn_getattr);
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{
int datasync = 0;
ENTRY;
SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
@ -472,7 +472,7 @@ int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
if (flags & FDSYNC)
datasync = 1;
RETURN(-spl_filp_fsync(vp->v_file, datasync));
SRETURN(-spl_filp_fsync(vp->v_file, datasync));
} /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync);
@ -502,7 +502,7 @@ vn_getf(int fd)
file_t *fp;
vnode_t *vp;
int rc = 0;
ENTRY;
SENTRY;
/* Already open just take an extra reference */
spin_lock(&vn_file_lock);
@ -511,7 +511,7 @@ vn_getf(int fd)
if (fp) {
atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock);
RETURN(fp);
SRETURN(fp);
}
spin_unlock(&vn_file_lock);
@ -519,7 +519,7 @@ vn_getf(int fd)
/* File was not yet opened create the object and setup */
fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
if (fp == NULL)
GOTO(out, rc);
SGOTO(out, rc);
mutex_enter(&fp->f_lock);
@ -529,14 +529,14 @@ vn_getf(int fd)
lfp = fget(fd);
if (lfp == NULL)
GOTO(out_mutex, rc);
SGOTO(out_mutex, rc);
vp = vn_alloc(KM_SLEEP);
if (vp == NULL)
GOTO(out_fget, rc);
SGOTO(out_fget, rc);
if (vfs_getattr(lfp->f_vfsmnt, lfp->f_dentry, &stat))
GOTO(out_vnode, rc);
SGOTO(out_vnode, rc);
mutex_enter(&vp->v_lock);
vp->v_type = vn_get_sol_type(stat.mode);
@ -552,7 +552,7 @@ vn_getf(int fd)
spin_unlock(&vn_file_lock);
mutex_exit(&fp->f_lock);
RETURN(fp);
SRETURN(fp);
out_vnode:
vn_free(vp);
@ -562,7 +562,7 @@ out_mutex:
mutex_exit(&fp->f_lock);
kmem_cache_free(vn_file_cache, fp);
out:
RETURN(NULL);
SRETURN(NULL);
} /* getf() */
EXPORT_SYMBOL(getf);
@ -582,7 +582,7 @@ void
vn_releasef(int fd)
{
file_t *fp;
ENTRY;
SENTRY;
spin_lock(&vn_file_lock);
fp = file_find(fd);
@ -590,7 +590,7 @@ vn_releasef(int fd)
atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) {
spin_unlock(&vn_file_lock);
EXIT;
SEXIT;
return;
}
@ -599,7 +599,7 @@ vn_releasef(int fd)
}
spin_unlock(&vn_file_lock);
EXIT;
SEXIT;
return;
} /* releasef() */
EXPORT_SYMBOL(releasef);
@ -654,7 +654,7 @@ vn_set_pwd(const char *filename)
#endif /* HAVE_2ARGS_SET_FS_PWD */
mm_segment_t saved_fs;
int rc;
ENTRY;
SENTRY;
/*
* user_path_dir() and __user_walk() both expect 'filename' to be
@ -668,11 +668,11 @@ vn_set_pwd(const char *filename)
# ifdef HAVE_USER_PATH_DIR
rc = user_path_dir(filename, &path);
if (rc)
GOTO(out, rc);
SGOTO(out, rc);
rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
if (rc)
GOTO(dput_and_out, rc);
SGOTO(dput_and_out, rc);
set_fs_pwd(current->fs, &path);
@ -682,11 +682,11 @@ dput_and_out:
rc = __user_walk(filename,
LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
if (rc)
GOTO(out, rc);
SGOTO(out, rc);
rc = vfs_permission(&nd, MAY_EXEC);
if (rc)
GOTO(dput_and_out, rc);
SGOTO(dput_and_out, rc);
set_fs_pwd(current->fs, &nd.path);
@ -697,11 +697,11 @@ dput_and_out:
rc = __user_walk(filename,
LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
if (rc)
GOTO(out, rc);
SGOTO(out, rc);
rc = vfs_permission(&nd, MAY_EXEC);
if (rc)
GOTO(dput_and_out, rc);
SGOTO(dput_and_out, rc);
set_fs_pwd(current->fs, nd.nd_mnt, nd.nd_dentry);
@ -711,7 +711,7 @@ dput_and_out:
out:
set_fs(saved_fs);
RETURN(-rc);
SRETURN(-rc);
} /* vn_set_pwd() */
EXPORT_SYMBOL(vn_set_pwd);
@ -756,7 +756,7 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
int
vn_init(void)
{
ENTRY;
SENTRY;
vn_cache = kmem_cache_create("spl_vn_cache",
sizeof(struct vnode), 64,
vn_cache_constructor,
@ -768,7 +768,7 @@ vn_init(void)
vn_file_cache_constructor,
vn_file_cache_destructor,
NULL, NULL, NULL, 0);
RETURN(0);
SRETURN(0);
} /* vn_init() */
void
@ -776,7 +776,7 @@ vn_fini(void)
{
file_t *fp, *next_fp;
int leaked = 0;
ENTRY;
SENTRY;
spin_lock(&vn_file_lock);
@ -791,10 +791,10 @@ vn_fini(void)
spin_unlock(&vn_file_lock);
if (leaked > 0)
CWARN("Warning %d files leaked\n", leaked);
SWARN("Warning %d files leaked\n", leaked);
kmem_cache_destroy(vn_cache);
EXIT;
SEXIT;
return;
} /* vn_fini() */

View File

@ -29,6 +29,12 @@
#include <rpc/xdr.h>
#include <spl-debug.h>
#ifdef SS_DEBUG_SUBSYS
#undef SS_DEBUG_SUBSYS
#endif
#define SS_DEBUG_SUBSYS SS_XDR
/*
* SPL's XDR mem implementation.
*
@ -144,7 +150,7 @@ xdrmem_create(XDR *xdrs, const caddr_t addr, const uint_t size,
xdrs->x_ops = &xdrmem_decode_ops;
break;
default:
CWARN("Invalid op value: %d\n", op);
SWARN("Invalid op value: %d\n", op);
xdrs->x_ops = NULL; /* Let the caller know we failed */
return;
}
@ -154,7 +160,7 @@ xdrmem_create(XDR *xdrs, const caddr_t addr, const uint_t size,
xdrs->x_addr_end = addr + size;
if (xdrs->x_addr_end < xdrs->x_addr) {
CWARN("Overflow while creating xdrmem: %p, %u\n", addr, size);
SWARN("Overflow while creating xdrmem: %p, %u\n", addr, size);
xdrs->x_ops = NULL;
}
}
@ -166,7 +172,7 @@ xdrmem_control(XDR *xdrs, int req, void *info)
struct xdr_bytesrec *rec = (struct xdr_bytesrec *) info;
if (req != XDR_GET_BYTES_AVAIL) {
CWARN("Called with unknown request: %d\n", req);
SWARN("Called with unknown request: %d\n", req);
return FALSE;
}